2 * This file is part of the coreboot project.
4 * Copyright (C) 2007 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
32 #include <cpu/x86/lapic.h>
34 #if CONFIG_LOGICAL_CPUS==1
35 #include <cpu/amd/multicore.h>
36 #include <pc80/mc146818rtc.h>
40 #include "root_complex/chip.h"
41 #include "northbridge.h"
45 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
46 #include <cpu/amd/model_10xxx_rev.h>
49 #include <cpu/amd/amdfam10_sysconf.h>
51 struct amdfam10_sysconf_t sysconf;
53 #define FX_DEVS NODE_NUMS
54 static device_t __f0_dev[FX_DEVS];
55 static device_t __f1_dev[FX_DEVS];
56 static device_t __f2_dev[FX_DEVS];
57 static device_t __f4_dev[FX_DEVS];
58 static unsigned fx_devs=0;
60 device_t get_node_pci(u32 nodeid, u32 fn)
64 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
66 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
70 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
74 static void get_fx_devs(void)
77 for(i = 0; i < FX_DEVS; i++) {
78 __f0_dev[i] = get_node_pci(i, 0);
79 __f1_dev[i] = get_node_pci(i, 1);
80 __f2_dev[i] = get_node_pci(i, 2);
81 __f4_dev[i] = get_node_pci(i, 4);
82 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
85 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
86 die("Cannot find 0:0x18.[0|1]\n");
90 static u32 f1_read_config32(unsigned reg)
94 return pci_read_config32(__f1_dev[0], reg);
97 static void f1_write_config32(unsigned reg, u32 value)
102 for(i = 0; i < fx_devs; i++) {
105 if (dev && dev->enabled) {
106 pci_write_config32(dev, reg, value);
111 static u32 amdfam10_nodeid(device_t dev)
115 busn = dev->bus->secondary;
116 if(busn != CONFIG_CBB) {
117 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
119 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
123 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
127 #include "amdfam10_conf.c"
129 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
133 val = 1 | (nodeid<<4) | (linkn<<12);
134 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
136 f1_write_config32(0xf4, val);
140 static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, struct bus *link, u32 link_num, u32 sblink,
141 u32 max, u32 offset_unitid)
143 // I want to put sb chain in bus 0 can I?
149 u32 ht_unitid_base[4]; // here assume only 4 HT device on chain
152 u32 is_sublink1 = (link_num>3);
156 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
161 #if CONFIG_HT3_SUPPORT==1
165 regpos = 0x170 + 4 * (link_num&3); // it is only on sublink0
166 reg = pci_read_config32(dev, regpos);
167 if(reg & 1) return max; // already ganged no sblink1
168 devx = get_node_pci(nodeid, 4);
174 link->cap = 0x80 + ((link_num&3) *0x20);
176 link_type = pci_read_config32(devx, link->cap + 0x18);
177 } while(link_type & ConnectionPending);
178 if (!(link_type & LinkConnected)) {
182 link_type = pci_read_config32(devx, link->cap + 0x18);
183 } while(!(link_type & InitComplete));
184 if (!(link_type & NonCoherent)) {
187 /* See if there is an available configuration space mapping
188 * register in function 1.
190 ht_c_index = get_ht_c_index(nodeid, link_num, &sysconf);
192 #if CONFIG_EXT_CONF_SUPPORT == 0
193 if(ht_c_index>=4) return max;
196 /* Set up the primary, secondary and subordinate bus numbers.
197 * We have no idea how many busses are behind this bridge yet,
198 * so we set the subordinate bus number to 0xff for the moment.
200 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
201 // first chain will on bus 0
202 if((nodeid == 0) && (sblink==link_num)) { // actually max is 0 here
205 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
206 // second chain will be on 0x40, third 0x80, forth 0xc0
207 // i would refined that to 2, 3, 4 ==> 0, 0x, 40, 0x80, 0xc0
208 // >4 will use more segments, We can have 16 segmment and every segment have 256 bus, For that case need the kernel support mmio pci config.
210 min_bus = ((busn>>3) + 1) << 3; // one node can have 8 link and segn is the same
212 max = min_bus | (segn<<8);
222 max_bus = 0xfc | (segn<<8);
224 link->secondary = min_bus;
225 link->subordinate = max_bus;
227 /* Read the existing primary/secondary/subordinate bus
228 * number configuration.
230 busses = pci_read_config32(devx, link->cap + 0x14);
232 /* Configure the bus numbers for this bridge: the configuration
233 * transactions will not be propagates by the bridge if it is
234 * not correctly configured
236 busses &= 0xffff00ff;
237 busses |= ((u32)(link->secondary) << 8);
238 pci_write_config32(devx, link->cap + 0x14, busses);
241 /* set the config map space */
243 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
245 /* Now we can scan all of the subordinate busses i.e. the
246 * chain on the hypertranport link
249 ht_unitid_base[i] = 0x20;
252 //if ext conf is enabled, only need use 0x1f
254 max_devfn = (0x17<<3) | 7;
256 max_devfn = (0x1f<<3) | 7;
258 max = hypertransport_scan_chain(link, 0, max_devfn, max, ht_unitid_base, offset_unitid);
260 /* We know the number of busses behind this bridge. Set the
261 * subordinate bus number to it's real value
263 if(ht_c_index>3) { // clear the extend reg
264 clear_config_map_reg(nodeid, link_num, ht_c_index, (max+1)>>sysconf.segbit, (link->subordinate)>>sysconf.segbit, sysconf.nodes);
267 link->subordinate = max;
268 set_config_map_reg(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, sysconf.segbit, sysconf.nodes);
272 // use ht_unitid_base to update hcdn_reg
275 temp |= (ht_unitid_base[i] & 0xff) << (i*8);
278 sysconf.hcdn_reg[ht_c_index] = temp;
281 store_ht_c_conf_bus(nodeid, link_num, ht_c_index, link->secondary, link->subordinate, &sysconf);
285 static unsigned amdfam10_scan_chains(device_t dev, unsigned max)
289 unsigned sblink = sysconf.sblk;
290 unsigned offset_unitid = 0;
292 nodeid = amdfam10_nodeid(dev);
294 // Put sb chain in bus 0
295 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
297 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
300 for (link = dev->link_list; link; link = link->next)
301 if (link->link_num == sblink)
302 max = amdfam10_scan_chain(dev, nodeid, link, sblink, sblink, max, offset_unitid ); // do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0
306 #if CONFIG_PCI_BUS_SEGN_BITS
307 max = check_segn(dev, max, sysconf.nodes, &sysconf);
310 for(link = dev->link_list; link; link = link->next) {
311 #if CONFIG_SB_HT_CHAIN_ON_BUS0 > 0
312 if( (nodeid == 0) && (sblink == link->link_num) ) continue; //already done
315 #if ((CONFIG_HT_CHAIN_UNITID_BASE != 1) || (CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20))
316 #if CONFIG_SB_HT_CHAIN_UNITID_OFFSET_ONLY == 1
317 if((nodeid == 0) && (sblink == link->link_num))
322 max = amdfam10_scan_chain(dev, nodeid, link, link->link_num, sblink, max, offset_unitid);
328 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
331 struct resource *res;
332 unsigned nodeid, link = 0;
335 for(nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
337 dev = __f0_dev[nodeid];
340 for(link = 0; !res && (link < 8); link++) {
341 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
347 if ( (goal_link == (link - 1)) &&
348 (goal_nodeid == (nodeid - 1)) &&
356 static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsigned link)
358 struct resource *resource;
362 for(reg = 0xc0; reg <= 0xd8; reg += 0x8) {
364 result = reg_useable(reg, dev, nodeid, link);
366 /* I have been allocated this one */
369 else if (result > 1) {
370 /* I have a free register pair */
375 reg = free_reg; // if no free, the free_reg still be 0
380 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
381 u32 index = get_io_addr_index(nodeid, link);
382 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
385 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
390 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
392 struct resource *resource;
396 for(reg = 0x80; reg <= 0xb8; reg += 0x8) {
398 result = reg_useable(reg, dev, nodeid, link);
400 /* I have been allocated this one */
403 else if (result > 1) {
404 /* I have a free register pair */
414 //because of Extend conf space, we will never run out of reg,
415 // but we need one index to differ them. so same node and
416 // same link can have multi range
417 u32 index = get_mmio_addr_index(nodeid, link);
418 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
421 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
426 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
428 struct resource *resource;
430 /* Initialize the io space constraints on the current bus */
431 resource = amdfam10_find_iopair(dev, nodeid, link);
434 #if CONFIG_EXT_CONF_SUPPORT == 1
435 if((resource->index & 0x1fff) == 0x1110) { // ext
440 align = log2(HT_IO_HOST_ALIGN);
443 resource->align = align;
444 resource->gran = align;
445 resource->limit = 0xffffUL;
446 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
449 /* Initialize the prefetchable memory constraints on the current bus */
450 resource = amdfam10_find_mempair(dev, nodeid, link);
454 resource->align = log2(HT_MEM_HOST_ALIGN);
455 resource->gran = log2(HT_MEM_HOST_ALIGN);
456 resource->limit = 0xffffffffffULL;
457 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
458 resource->flags |= IORESOURCE_BRIDGE;
460 #if CONFIG_EXT_CONF_SUPPORT == 1
461 if((resource->index & 0x1fff) == 0x1110) { // ext
462 normalize_resource(resource);
468 /* Initialize the memory constraints on the current bus */
469 resource = amdfam10_find_mempair(dev, nodeid, link);
473 resource->align = log2(HT_MEM_HOST_ALIGN);
474 resource->gran = log2(HT_MEM_HOST_ALIGN);
475 resource->limit = 0xffffffffffULL;
476 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
477 #if CONFIG_EXT_CONF_SUPPORT == 1
478 if((resource->index & 0x1fff) == 0x1110) { // ext
479 normalize_resource(resource);
485 static void amdfam10_read_resources(device_t dev)
489 nodeid = amdfam10_nodeid(dev);
490 for(link = dev->link_list; link; link = link->next) {
491 if (link->children) {
492 amdfam10_link_read_bases(dev, nodeid, link->link_num);
497 static void amdfam10_set_resource(device_t dev, struct resource *resource,
500 resource_t rbase, rend;
501 unsigned reg, link_num;
504 /* Make certain the resource has actually been set */
505 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
509 /* If I have already stored this resource don't worry about it */
510 if (resource->flags & IORESOURCE_STORED) {
514 /* Only handle PCI memory and IO resources */
515 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
518 /* Ensure I am actually looking at a resource of function 1 */
519 if ((resource->index & 0xffff) < 0x1000) {
522 /* Get the base address */
523 rbase = resource->base;
525 /* Get the limit (rounded up) */
526 rend = resource_end(resource);
528 /* Get the register and link */
529 reg = resource->index & 0xfff; // 4k
530 link_num = IOINDEX_LINK(resource->index);
532 if (resource->flags & IORESOURCE_IO) {
534 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
535 store_conf_io_addr(nodeid, link_num, reg, (resource->index >> 24), rbase>>8, rend>>8);
537 else if (resource->flags & IORESOURCE_MEM) {
538 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
539 store_conf_mmio_addr(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8);
541 resource->flags |= IORESOURCE_STORED;
542 sprintf(buf, " <node %x link %x>",
544 report_resource_stored(dev, resource, buf);
549 * I tried to reuse the resource allocation code in amdfam10_set_resource()
550 * but it is too diffcult to deal with the resource allocation magic.
552 #if CONFIG_CONSOLE_VGA_MULTI == 1
553 extern device_t vga_pri; // the primary vga device, defined in device.c
556 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
560 /* find out which link the VGA card is connected,
561 * we only deal with the 'first' vga card */
562 for (link = dev->link_list; link; link = link->next) {
563 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
564 #if CONFIG_CONSOLE_VGA_MULTI == 1
565 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
566 link->secondary,link->subordinate);
567 /* We need to make sure the vga_pri is under the link */
568 if((vga_pri->bus->secondary >= link->secondary ) &&
569 (vga_pri->bus->secondary <= link->subordinate )
576 /* no VGA card installed */
580 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link->link_num);
581 set_vga_enable_reg(nodeid, link->link_num);
584 static void amdfam10_set_resources(device_t dev)
588 struct resource *res;
590 /* Find the nodeid */
591 nodeid = amdfam10_nodeid(dev);
593 amdfam10_create_vga_resource(dev, nodeid);
595 /* Set each resource we have found */
596 for(res = dev->resource_list; res; res = res->next) {
597 amdfam10_set_resource(dev, res, nodeid);
600 for(bus = dev->link_list; bus; bus = bus->next) {
602 assign_resources(bus);
607 static void mcf0_control_init(struct device *dev)
611 static struct device_operations northbridge_operations = {
612 .read_resources = amdfam10_read_resources,
613 .set_resources = amdfam10_set_resources,
614 .enable_resources = pci_dev_enable_resources,
615 .init = mcf0_control_init,
616 .scan_bus = amdfam10_scan_chains,
622 static const struct pci_driver mcf0_driver __pci_driver = {
623 .ops = &northbridge_operations,
624 .vendor = PCI_VENDOR_ID_AMD,
628 struct chip_operations northbridge_amd_amdfam10_ops = {
629 CHIP_NAME("AMD FAM10 Northbridge")
633 static void amdfam10_domain_read_resources(device_t dev)
637 /* Find the already assigned resource pairs */
639 for(reg = 0x80; reg <= 0xd8; reg+= 0x08) {
641 base = f1_read_config32(reg);
642 limit = f1_read_config32(reg + 0x04);
643 /* Is this register allocated? */
644 if ((base & 3) != 0) {
645 unsigned nodeid, reg_link;
647 if(reg<0xc0) { // mmio
648 nodeid = (limit & 0xf) + (base&0x30);
650 nodeid = (limit & 0xf) + ((base>>4)&0x30);
652 reg_link = (limit >> 4) & 7;
653 reg_dev = __f0_dev[nodeid];
655 /* Reserve the resource */
656 struct resource *res;
657 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
664 /* FIXME: do we need to check extend conf space?
665 I don't believe that much preset value */
667 #if CONFIG_PCI_64BIT_PREF_MEM == 0
668 pci_domain_read_resources(dev);
671 struct resource *resource;
672 for(link=dev->link_list; link; link = link->next) {
673 /* Initialize the system wide io space constraints */
674 resource = new_resource(dev, 0|(link->link_num<<2));
675 resource->base = 0x400;
676 resource->limit = 0xffffUL;
677 resource->flags = IORESOURCE_IO;
679 /* Initialize the system wide prefetchable memory resources constraints */
680 resource = new_resource(dev, 1|(link->link_num<<2));
681 resource->limit = 0xfcffffffffULL;
682 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
684 /* Initialize the system wide memory resources constraints */
685 resource = new_resource(dev, 2|(link->link_num<<2));
686 resource->limit = 0xfcffffffffULL;
687 resource->flags = IORESOURCE_MEM;
692 static void ram_resource(device_t dev, unsigned long index,
693 resource_t basek, resource_t sizek)
695 struct resource *resource;
700 resource = new_resource(dev, index);
701 resource->base = basek << 10;
702 resource->size = sizek << 10;
703 resource->flags = IORESOURCE_MEM | IORESOURCE_CACHEABLE | \
704 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
707 static void tolm_test(void *gp, struct device *dev, struct resource *new)
709 struct resource **best_p = gp;
710 struct resource *best;
712 if (!best || (best->base > new->base)) {
718 static u32 find_pci_tolm(struct bus *bus, u32 tolm)
720 struct resource *min;
722 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
723 if (min && tolm > min->base) {
729 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
731 struct hw_mem_hole_info {
732 unsigned hole_startk;
736 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
738 struct hw_mem_hole_info mem_hole;
741 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
742 mem_hole.node_id = -1;
744 for (i = 0; i < sysconf.nodes; i++) {
745 struct dram_base_mask_t d;
747 d = get_dram_base_mask(i);
748 if(!(d.mask & 1)) continue; // no memory on this node
750 hole = pci_read_config32(__f1_dev[i], 0xf0);
751 if(hole & 1) { // we find the hole
752 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
753 mem_hole.node_id = i; // record the node No with hole
754 break; // only one hole
758 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
759 if(mem_hole.node_id==-1) {
760 resource_t limitk_pri = 0;
761 for(i=0; i<sysconf.nodes; i++) {
762 struct dram_base_mask_t d;
763 resource_t base_k, limit_k;
764 d = get_dram_base_mask(i);
765 if(!(d.base & 1)) continue;
767 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
768 if(base_k > 4 *1024 * 1024) break; // don't need to go to check
769 if(limitk_pri != base_k) { // we find the hole
770 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
771 mem_hole.node_id = i;
772 break; //only one hole
775 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
776 limitk_pri = limit_k;
782 // WHY this check? CONFIG_AMDMCT is enabled on all Fam10 boards.
783 // Does it make sense not to?
784 #if CONFIG_AMDMCT == 0
785 static void disable_hoist_memory(unsigned long hole_startk, int node_id)
789 struct dram_base_mask_t d;
796 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
797 struct mem_info *meminfo;
798 meminfo = &sysinfox->meminfo[node_id];
800 one_DCT = get_one_DCT(meminfo);
802 // 1. find which node has hole
803 // 2. change limit in that node.
804 // 3. change base and limit in later node
805 // 4. clear that node f0
807 // if there is not mem hole enabled, we need to change it's base instead
809 hole_sizek = (4*1024*1024) - hole_startk;
811 for(i=NODE_NUMS-1;i>node_id;i--) {
813 d = get_dram_base_mask(i);
815 if(!(d.mask & 1)) continue;
817 d.base -= (hole_sizek>>9);
818 d.mask -= (hole_sizek>>9);
819 set_dram_base_mask(i, d, sysconf.nodes);
821 if(get_DctSelHiEn(i) & 1) {
822 sel_m = get_DctSelBaseAddr(i);
823 sel_m -= hole_startk>>10;
824 set_DctSelBaseAddr(i, sel_m);
828 d = get_dram_base_mask(node_id);
829 dev = __f1_dev[node_id];
830 sel_hi_en = get_DctSelHiEn(node_id);
833 sel_m = get_DctSelBaseAddr(node_id);
835 hoist = pci_read_config32(dev, 0xf0);
837 pci_write_config32(dev, 0xf0, 0);
838 d.mask -= (hole_sizek>>9);
839 set_dram_base_mask(node_id, d, sysconf.nodes);
840 if(one_DCT || (sel_m >= (hole_startk>>10))) {
842 sel_m -= hole_startk>>10;
843 set_DctSelBaseAddr(node_id, sel_m);
847 set_DctSelBaseOffset(node_id, 0);
850 d.base -= (hole_sizek>>9);
851 d.mask -= (hole_sizek>>9);
852 set_dram_base_mask(node_id, d, sysconf.nodes);
855 sel_m -= hole_startk>>10;
856 set_DctSelBaseAddr(node_id, sel_m);
865 #if CONFIG_WRITE_HIGH_TABLES==1
866 #define HIGH_TABLES_SIZE 64 // maximum size of high tables in KB
867 extern uint64_t high_tables_base, high_tables_size;
870 #if CONFIG_GFXUMA == 1
871 extern uint64_t uma_memory_base, uma_memory_size;
873 static void add_uma_resource(struct device *dev, int index)
875 struct resource *resource;
877 printk(BIOS_DEBUG, "Adding UMA memory area\n");
878 resource = new_resource(dev, index);
879 resource->base = (resource_t) uma_memory_base;
880 resource->size = (resource_t) uma_memory_size;
881 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
882 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
886 static void amdfam10_domain_set_resources(device_t dev)
888 #if CONFIG_PCI_64BIT_PREF_MEM == 1
889 struct resource *io, *mem1, *mem2;
890 struct resource *res;
892 unsigned long mmio_basek;
896 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
897 struct hw_mem_hole_info mem_hole;
898 u32 reset_memhole = 1;
901 #if CONFIG_PCI_64BIT_PREF_MEM == 1
903 for(link = dev->link_list; link; link = link->next) {
904 /* Now reallocate the pci resources memory with the
905 * highest addresses I can manage.
907 mem1 = find_resource(dev, 1|(link->link_num<<2));
908 mem2 = find_resource(dev, 2|(link->link_num<<2));
910 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
911 mem1->base, mem1->limit, mem1->size, mem1->align);
912 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
913 mem2->base, mem2->limit, mem2->size, mem2->align);
915 /* See if both resources have roughly the same limits */
916 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
917 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
919 /* If so place the one with the most stringent alignment first
921 if (mem2->align > mem1->align) {
922 struct resource *tmp;
927 /* Now place the memory as high up as it will go */
928 mem2->base = resource_max(mem2);
929 mem1->limit = mem2->base - 1;
930 mem1->base = resource_max(mem1);
933 /* Place the resources as high up as they will go */
934 mem2->base = resource_max(mem2);
935 mem1->base = resource_max(mem1);
938 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
939 mem1->base, mem1->limit, mem1->size, mem1->align);
940 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
941 mem2->base, mem2->limit, mem2->size, mem2->align);
944 for(res = &dev->resource_list; res; res = res->next)
946 res->flags |= IORESOURCE_ASSIGNED;
947 res->flags |= IORESOURCE_STORED;
948 report_resource_stored(dev, res, "");
952 pci_tolm = 0xffffffffUL;
953 for(link = dev->link_list; link; link = link->next) {
954 pci_tolm = find_pci_tolm(link, pci_tolm);
957 // FIXME handle interleaved nodes. If you fix this here, please fix
959 mmio_basek = pci_tolm >> 10;
960 /* Round mmio_basek to something the processor can support */
961 mmio_basek &= ~((1 << 6) -1);
963 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
964 // MMIO hole. If you fix this here, please fix amdk8, too.
965 /* Round the mmio hole to 64M */
966 mmio_basek &= ~((64*1024) - 1);
968 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
969 /* if the hw mem hole is already set in raminit stage, here we will compare
970 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
971 * use hole_basek as mmio_basek and we don't need to reset hole.
972 * otherwise We reset the hole to the mmio_basek
975 mem_hole = get_hw_mem_hole_info();
977 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
978 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
979 mmio_basek = mem_hole.hole_startk;
983 #if CONFIG_AMDMCT == 0
984 //mmio_basek = 3*1024*1024; // for debug to meet boundary
987 if(mem_hole.node_id!=-1) {
988 /* We need to select CONFIG_HW_MEM_HOLE_SIZEK for raminit, it can not
989 make hole_startk to some basek too!
990 We need to reset our Mem Hole, because We want more big HOLE
992 Before that We need to disable mem hole at first, becase
993 memhole could already be set on i+1 instead
995 disable_hoist_memory(mem_hole.hole_startk, mem_hole.node_id);
998 #if CONFIG_HW_MEM_HOLE_SIZE_AUTO_INC == 1
999 // We need to double check if the mmio_basek is valid for hole
1000 // setting, if it is equal to basek, we need to decrease it some
1001 resource_t basek_pri;
1002 for (i = 0; i < sysconf.nodes; i++) {
1003 struct dram_base_mask_t d;
1005 d = get_dram_base_mask(i);
1007 if(!(d.mask &1)) continue;
1009 basek = ((resource_t)(d.base & 0x1fffff00)) << 9;
1010 if(mmio_basek == (u32)basek) {
1011 mmio_basek -= (uin32_t)(basek - basek_pri); // increase mem hole size to make sure it is on middle of pri node
1024 for(i = 0; i < sysconf.nodes; i++) {
1025 struct dram_base_mask_t d;
1026 resource_t basek, limitk, sizek; // 4 1T
1027 d = get_dram_base_mask(i);
1029 if(!(d.mask & 1)) continue;
1030 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1031 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1032 sizek = limitk - basek;
1034 /* see if we need a hole from 0xa0000 to 0xbffff */
1035 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1036 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1038 basek = (8*64)+(16*16);
1039 sizek = limitk - ((8*64)+(16*16));
1043 // printk(BIOS_DEBUG, "node %d : mmio_basek=%08x, basek=%08x, limitk=%08x\n", i, mmio_basek, basek, limitk);
1045 /* split the region to accomodate pci memory space */
1046 if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
1047 if (basek <= mmio_basek) {
1049 pre_sizek = mmio_basek - basek;
1051 ram_resource(dev, (idx | i), basek, pre_sizek);
1054 #if CONFIG_WRITE_HIGH_TABLES==1
1055 if (high_tables_base==0) {
1056 /* Leave some space for ACPI, PIRQ and MP tables */
1057 #if CONFIG_GFXUMA == 1
1058 high_tables_base = uma_memory_base - (HIGH_TABLES_SIZE * 1024);
1060 high_tables_base = (mmio_basek - HIGH_TABLES_SIZE) * 1024;
1062 high_tables_size = HIGH_TABLES_SIZE * 1024;
1063 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n", HIGH_TABLES_SIZE,
1068 #if CONFIG_AMDMCT == 0
1069 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1071 struct sys_info *sysinfox = (struct sys_info *)((CONFIG_RAMTOP) - CONFIG_DCACHE_RAM_GLOBAL_VAR_SIZE); // in RAM
1072 struct mem_info *meminfo;
1073 meminfo = &sysinfox->meminfo[i];
1074 sizek += hoist_memory(mmio_basek,i, get_one_DCT(meminfo), sysconf.nodes);
1081 if ((basek + sizek) <= 4*1024*1024) {
1085 basek = 4*1024*1024;
1086 sizek -= (4*1024*1024 - mmio_basek);
1089 ram_resource(dev, (idx | i), basek, sizek);
1091 #if CONFIG_WRITE_HIGH_TABLES==1
1092 printk(BIOS_DEBUG, "%d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1093 i, mmio_basek, basek, limitk);
1094 if (high_tables_base==0) {
1095 /* Leave some space for ACPI, PIRQ and MP tables */
1096 #if CONFIG_GFXUMA == 1
1097 high_tables_base = uma_memory_base - (HIGH_TABLES_SIZE * 1024);
1099 high_tables_base = (limitk - HIGH_TABLES_SIZE) * 1024;
1101 high_tables_size = HIGH_TABLES_SIZE * 1024;
1106 #if CONFIG_GFXUMA == 1
1107 add_uma_resource(dev, 7);
1110 for(link = dev->link_list; link; link = link->next) {
1111 if (link->children) {
1112 assign_resources(link);
1117 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1122 /* Unmap all of the HT chains */
1123 for(reg = 0xe0; reg <= 0xec; reg += 4) {
1124 f1_write_config32(reg, 0);
1126 #if CONFIG_EXT_CONF_SUPPORT == 1
1128 for(i = 0; i< sysconf.nodes; i++) {
1130 for(index = 0; index < 64; index++) {
1131 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1132 pci_write_config32(__f1_dev[i], 0x114, 0);
1139 for(link = dev->link_list; link; link = link->next) {
1140 max = pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1143 /* Tune the hypertransport transaction for best performance.
1144 * Including enabling relaxed ordering if it is safe.
1147 for(i = 0; i < fx_devs; i++) {
1149 f0_dev = __f0_dev[i];
1150 if (f0_dev && f0_dev->enabled) {
1152 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1153 httc &= ~HTTC_RSP_PASS_PW;
1154 if (!dev->link_list->disable_relaxed_ordering) {
1155 httc |= HTTC_RSP_PASS_PW;
1157 printk(BIOS_SPEW, "%s passpw: %s\n",
1159 (!dev->link_list->disable_relaxed_ordering)?
1160 "enabled":"disabled");
1161 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1167 static struct device_operations pci_domain_ops = {
1168 .read_resources = amdfam10_domain_read_resources,
1169 .set_resources = amdfam10_domain_set_resources,
1170 .enable_resources = NULL,
1172 .scan_bus = amdfam10_domain_scan_bus,
1173 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1174 .ops_pci_bus = &pci_ops_mmconf,
1176 .ops_pci_bus = &pci_cf8_conf1,
1180 static void sysconf_init(device_t dev) // first node
1182 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1184 sysconf.ht_c_num = 0;
1186 unsigned ht_c_index;
1188 for(ht_c_index=0; ht_c_index<32; ht_c_index++) {
1189 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1192 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1;
1193 #if CONFIG_MAX_PHYSICAL_CPUS > 8
1194 sysconf.nodes += (((pci_read_config32(dev, 0x160)>>4) & 7)<<3);
1197 sysconf.enabled_apic_ext_id = 0;
1198 sysconf.lift_bsp_apicid = 0;
1200 /* Find the bootstrap processors apicid */
1201 sysconf.bsp_apicid = lapicid();
1202 sysconf.apicid_offset = sysconf.bsp_apicid;
1204 #if (CONFIG_ENABLE_APIC_EXT_ID == 1)
1205 if (pci_read_config32(dev, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
1207 sysconf.enabled_apic_ext_id = 1;
1209 #if (CONFIG_APIC_ID_OFFSET>0)
1210 if(sysconf.enabled_apic_ext_id) {
1211 if(sysconf.bsp_apicid == 0) {
1212 /* bsp apic id is not changed */
1213 sysconf.apicid_offset = CONFIG_APIC_ID_OFFSET;
1215 sysconf.lift_bsp_apicid = 1;
1222 static void add_more_links(device_t dev, unsigned total_links)
1224 struct bus *link, *last = NULL;
1227 for (link = dev->link_list; link; link = link->next)
1231 int links = total_links - last->link_num;
1232 link_num = last->link_num;
1234 link = malloc(links*sizeof(*link));
1236 die("Couldn't allocate more links!\n");
1237 memset(link, 0, links*sizeof(*link));
1243 link = malloc(total_links*sizeof(*link));
1244 memset(link, 0, total_links*sizeof(*link));
1245 dev->link_list = link;
1248 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1249 link->link_num = link_num;
1251 link->next = link + 1;
1258 static u32 cpu_bus_scan(device_t dev, u32 max)
1260 struct bus *cpu_bus;
1263 device_t pci_domain;
1270 int disable_siblings;
1271 unsigned ApicIdCoreIdSize;
1274 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1275 if(ApicIdCoreIdSize) {
1276 siblings = (1<<ApicIdCoreIdSize)-1;
1278 siblings = 3; //quad core
1281 disable_siblings = !CONFIG_LOGICAL_CPUS;
1282 #if CONFIG_LOGICAL_CPUS == 1
1283 get_option(&disable_siblings, "multi_core");
1286 // How can I get the nb_cfg_54 of every node's nb_cfg_54 in bsp???
1287 nb_cfg_54 = read_nb_cfg_54();
1290 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1291 if(dev_mc && dev_mc->bus) {
1292 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1293 pci_domain = dev_mc->bus->dev;
1294 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1295 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1296 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1297 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1300 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1302 printk(BIOS_DEBUG, "\n");
1304 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1306 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1307 if (dev_mc && dev_mc->bus) {
1308 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1309 pci_domain = dev_mc->bus->dev;
1310 if(pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1311 if((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1312 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1313 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1314 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1316 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1317 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1318 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1319 dev_mc = dev_mc->sibling;
1328 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1330 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1334 sysconf_init(dev_mc);
1336 nodes = sysconf.nodes;
1338 #if CONFIG_CBB && (NODE_NUMS > 32)
1339 if(nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1340 if(pci_domain->link_list && !pci_domain->link_list->next) {
1341 struct bus *new_link = new_link(pci_domain);
1342 pci_domain->link_list->next = new_link;
1343 new_link->link_num = 1;
1344 new_link->dev = pci_domain;
1345 new_link->children = 0;
1346 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1348 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1351 /* Find which cpus are present */
1352 cpu_bus = dev->link_list;
1353 for(i = 0; i < nodes; i++) {
1354 device_t cdb_dev, cpu;
1355 struct device_path cpu_path;
1356 unsigned busn, devn;
1360 devn = CONFIG_CDB+i;
1362 #if CONFIG_CBB && (NODE_NUMS > 32)
1366 pbus = pci_domain->link_list->next);
1370 /* Find the cpu's pci device */
1371 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1373 /* If I am probing things in a weird order
1374 * ensure all of the cpu's pci devices are found.
1377 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1378 cdb_dev = pci_probe_dev(NULL, pbus,
1379 PCI_DEVFN(devn, fn));
1381 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1384 /* Ok, We need to set the links for that device.
1385 * otherwise the device under it will not be scanned
1388 #if CONFIG_HT3_SUPPORT==1
1393 add_more_links(cdb_dev, linknum);
1396 cores_found = 0; // one core
1397 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1398 if (cdb_dev && cdb_dev->enabled) {
1399 j = pci_read_config32(cdb_dev, 0xe8);
1400 cores_found = (j >> 12) & 3; // dev is func 3
1402 cores_found |= (j >> 13) & 4;
1403 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
1407 if(disable_siblings) {
1414 for (j = 0; j <=jj; j++ ) {
1416 /* Build the cpu device path */
1417 cpu_path.type = DEVICE_PATH_APIC;
1418 cpu_path.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
1420 /* See if I can find the cpu */
1421 cpu = find_dev_path(cpu_bus, &cpu_path);
1423 /* Enable the cpu if I have the processor */
1424 if (cdb_dev && cdb_dev->enabled) {
1426 cpu = alloc_dev(cpu_bus, &cpu_path);
1433 /* Disable the cpu if I don't have the processor */
1434 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1438 /* Report what I have done */
1440 cpu->path.apic.node_id = i;
1441 cpu->path.apic.core_id = j;
1442 #if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET>0)
1443 if(sysconf.enabled_apic_ext_id) {
1444 if(sysconf.lift_bsp_apicid) {
1445 cpu->path.apic.apic_id += sysconf.apicid_offset;
1448 if (cpu->path.apic.apic_id != 0)
1449 cpu->path.apic.apic_id += sysconf.apicid_offset;
1453 printk(BIOS_DEBUG, "CPU: %s %s\n",
1454 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1462 static void cpu_bus_init(device_t dev)
1464 initialize_cpus(dev->link_list);
1467 static void cpu_bus_noop(device_t dev)
1471 static void cpu_bus_read_resources(device_t dev)
1473 #if CONFIG_MMCONF_SUPPORT
1474 struct resource *resource = new_resource(dev, 0xc0010058);
1475 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1476 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1477 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1478 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1482 static void cpu_bus_set_resources(device_t dev)
1484 struct resource *resource = find_resource(dev, 0xc0010058);
1486 report_resource_stored(dev, resource, " <mmconfig>");
1488 pci_dev_set_resources(dev);
1491 static struct device_operations cpu_bus_ops = {
1492 .read_resources = cpu_bus_read_resources,
1493 .set_resources = cpu_bus_set_resources,
1494 .enable_resources = cpu_bus_noop,
1495 .init = cpu_bus_init,
1496 .scan_bus = cpu_bus_scan,
1499 static void root_complex_enable_dev(struct device *dev)
1501 /* Set the operations if it is a special bus type */
1502 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1503 dev->ops = &pci_domain_ops;
1505 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1506 dev->ops = &cpu_bus_ops;
1510 struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
1511 CHIP_NAME("AMD FAM10 Root Complex")
1512 .enable_dev = root_complex_enable_dev,