2 * This file is part of the coreboot project.
4 * It was originally based on the Linux kernel (arch/i386/kernel/pci-pc.c).
7 * Copyright (C) 2003 Eric Biederman <ebiederm@xmission.com>
8 * Copyright (C) 2003-2004 Linux Networx
9 * (Written by Eric Biederman <ebiederman@lnxi.com> for Linux Networx)
10 * Copyright (C) 2003 Ronald G. Minnich <rminnich@gmail.com>
11 * Copyright (C) 2004-2005 Li-Ta Lo <ollie@lanl.gov>
12 * Copyright (C) 2005-2006 Tyan
13 * (Written by Yinghai Lu <yhlu@tyan.com> for Tyan)
14 * Copyright (C) 2005-2006 Stefan Reinauer <stepan@openbios.org>
15 * Copyright (C) 2009 Myles Watson <mylesgw@gmail.com>
19 * (c) 1999--2000 Martin Mares <mj@suse.cz>
21 /* lots of mods by ron minnich (rminnich@lanl.gov), with
22 * the final architecture guidance from Tom Merritt (tjm@codegen.com)
23 * In particular, we changed from the one-pass original version to
24 * Tom's recommended multiple-pass version. I wasn't sure about doing
25 * it with multiple passes, until I actually started doing it and saw
26 * the wisdom of Tom's recommendations ...
28 * Lots of cleanups by Eric Biederman to handle bridges, and to
29 * handle resource allocation for non-pci devices.
32 #include <console/console.h>
35 #include <device/device.h>
36 #include <device/pci.h>
37 #include <device/pci_ids.h>
40 #include <smp/spinlock.h>
42 /** Linked list of ALL devices */
43 struct device *all_devices = &dev_root;
44 /** Pointer to the last device */
45 extern struct device **last_dev_p;
49 * @brief Allocate a new device structure.
51 * Allocte a new device structure and attached it to the device tree as a
52 * child of the parent bus.
54 * @param parent parent bus the newly created device attached to.
55 * @param path path to the device to be created.
57 * @return pointer to the newly created device structure.
61 static spinlock_t dev_lock = SPIN_LOCK_UNLOCKED;
62 device_t alloc_dev(struct bus *parent, struct device_path *path)
69 /* Find the last child of our parent. */
70 for (child = parent->children; child && child->sibling; /* */ ) {
71 child = child->sibling;
74 dev = malloc(sizeof(*dev));
76 die("DEV: out of memory.\n");
78 memset(dev, 0, sizeof(*dev));
79 memcpy(&dev->path, path, sizeof(*path));
81 /* Initialize the back pointers in the link fields. */
82 for (link = 0; link < MAX_LINKS; link++) {
83 dev->link[link].dev = dev;
84 dev->link[link].link = link;
87 /* By default devices are enabled. */
90 /* Add the new device to the list of children of the bus. */
95 parent->children = dev;
98 /* Append a new device to the global device list.
99 * The list is used to find devices once everything is set up.
102 last_dev_p = &dev->next;
104 spin_unlock(&dev_lock);
109 * @brief round a number up to an alignment.
110 * @param val the starting value
111 * @param roundup Alignment as a power of two
112 * @returns rounded up number
114 static resource_t round(resource_t val, unsigned long pow)
117 mask = (1ULL << pow) - 1ULL;
123 /** Read the resources on all devices of a given bus.
124 * @param bus bus to read the resources on.
126 static void read_resources(struct bus *bus)
128 struct device *curdev;
130 printk_spew("%s %s bus %x link: %d\n", dev_path(bus->dev), __func__,
131 bus->secondary, bus->link);
133 /* Walk through all devices and find which resources they need. */
134 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
136 if (!curdev->enabled) {
139 if (!curdev->ops || !curdev->ops->read_resources) {
140 printk_err("%s missing read_resources\n",
144 curdev->ops->read_resources(curdev);
146 /* Read in the resources behind the current device's links. */
147 for (i = 0; i < curdev->links; i++)
148 read_resources(&curdev->link[i]);
150 printk_spew("%s read_resources bus %d link: %d done\n",
151 dev_path(bus->dev), bus->secondary, bus->link);
154 struct pick_largest_state {
155 struct resource *last;
156 struct device *result_dev;
157 struct resource *result;
161 static void pick_largest_resource(void *gp, struct device *dev,
162 struct resource *resource)
164 struct pick_largest_state *state = gp;
165 struct resource *last;
169 /* Be certain to pick the successor to last. */
170 if (resource == last) {
171 state->seen_last = 1;
174 if (resource->flags & IORESOURCE_FIXED)
176 if (last && ((last->align < resource->align) ||
177 ((last->align == resource->align) &&
178 (last->size < resource->size)) ||
179 ((last->align == resource->align) &&
180 (last->size == resource->size) && (!state->seen_last)))) {
183 if (!state->result ||
184 (state->result->align < resource->align) ||
185 ((state->result->align == resource->align) &&
186 (state->result->size < resource->size))) {
187 state->result_dev = dev;
188 state->result = resource;
192 static struct device *largest_resource(struct bus *bus,
193 struct resource **result_res,
194 unsigned long type_mask,
197 struct pick_largest_state state;
199 state.last = *result_res;
200 state.result_dev = NULL;
204 search_bus_resources(bus, type_mask, type, pick_largest_resource,
207 *result_res = state.result;
208 return state.result_dev;
211 /* Compute allocate resources is the guts of the resource allocator.
214 * - Allocate resource locations for every device.
215 * - Don't overlap, and follow the rules of bridges.
216 * - Don't overlap with resources in fixed locations.
217 * - Be efficient so we don't have ugly strategies.
220 * - Devices that have fixed addresses are the minority so don't
221 * worry about them too much. Instead only use part of the address
222 * space for devices with programmable addresses. This easily handles
223 * everything except bridges.
225 * - PCI devices are required to have their sizes and their alignments
226 * equal. In this case an optimal solution to the packing problem
227 * exists. Allocate all devices from highest alignment to least
228 * alignment or vice versa. Use this.
230 * - So we can handle more than PCI run two allocation passes on bridges. The
231 * first to see how large the resources are behind the bridge, and what
232 * their alignment requirements are. The second to assign a safe address to
233 * the devices behind the bridge. This allows us to treat a bridge as just
234 * a device with a couple of resources, and not need to special case it in
235 * the allocator. Also this allows handling of other types of bridges.
238 static void compute_resources(struct bus *bus, struct resource *bridge,
239 unsigned long type_mask, unsigned long type)
242 struct resource *resource;
244 base = round(bridge->base, bridge->align);
246 printk_spew( "%s %s_%s: base: %llx size: %llx align: %d gran: %d limit: %llx\n",
247 dev_path(bus->dev), __func__,
248 (type & IORESOURCE_IO) ? "io" : (type & IORESOURCE_PREFETCH) ?
250 base, bridge->size, bridge->align, bridge->gran, bridge->limit);
252 /* For each child which is a bridge, compute_resource_needs. */
253 for (dev = bus->children; dev; dev = dev->sibling) {
255 struct resource *child_bridge;
260 /* Find the resources with matching type flags. */
261 for (i = 0; i < dev->resources; i++) {
263 child_bridge = &dev->resource[i];
265 if (!(child_bridge->flags & IORESOURCE_BRIDGE) ||
266 (child_bridge->flags & type_mask) != type)
269 /* Split prefetchable memory if combined. Many domains
270 * use the same address space for prefetchable memory
271 * and non-prefetchable memory. Bridges below them
272 * need it separated. Add the PREFETCH flag to the
273 * type_mask and type.
275 link = IOINDEX_LINK(child_bridge->index);
276 compute_resources(&dev->link[link], child_bridge,
277 type_mask | IORESOURCE_PREFETCH,
278 type | (child_bridge->flags &
279 IORESOURCE_PREFETCH));
283 /* Remember we haven't found anything yet. */
286 /* Walk through all the resources on the current bus and compute the
287 * amount of address space taken by them. Take granularity and
288 * alignment into account.
290 while ((dev = largest_resource(bus, &resource, type_mask, type))) {
292 /* Size 0 resources can be skipped. */
293 if (!resource->size) {
297 /* Propagate the resource alignment to the bridge resource. */
298 if (resource->align > bridge->align) {
299 bridge->align = resource->align;
302 /* Propagate the resource limit to the bridge register. */
303 if (bridge->limit > resource->limit) {
304 bridge->limit = resource->limit;
307 /* Warn if it looks like APICs aren't declared. */
308 if ((resource->limit == 0xffffffff) &&
309 (resource->flags & IORESOURCE_ASSIGNED)) {
310 printk_err("Resource limit looks wrong! (no APIC?)\n");
311 printk_err("%s %02lx limit %08Lx\n", dev_path(dev),
312 resource->index, resource->limit);
315 if (resource->flags & IORESOURCE_IO) {
316 /* Don't allow potential aliases over the legacy PCI
317 * expansion card addresses. The legacy PCI decodes
318 * only 10 bits, uses 0x100 - 0x3ff. Therefore, only
319 * 0x00 - 0xff can be used out of each 0x400 block of
322 if ((base & 0x300) != 0) {
323 base = (base & ~0x3ff) + 0x400;
325 /* Don't allow allocations in the VGA I/O range.
326 * PCI has special cases for that.
328 else if ((base >= 0x3b0) && (base <= 0x3df)) {
332 /* Base must be aligned. */
333 base = round(base, resource->align);
334 resource->base = base;
335 base += resource->size;
337 printk_spew("%s %02lx * [0x%llx - 0x%llx] %s\n",
338 dev_path(dev), resource->index,
340 resource->base + resource->size - 1,
341 (resource->flags & IORESOURCE_IO) ? "io" :
342 (resource->flags & IORESOURCE_PREFETCH) ?
345 /* A pci bridge resource does not need to be a power
346 * of two size, but it does have a minimum granularity.
347 * Round the size up to that minimum granularity so we
348 * know not to place something else at an address postitively
349 * decoded by the bridge.
351 bridge->size = round(base, bridge->gran) -
352 round(bridge->base, bridge->align);
354 printk_spew("%s %s_%s: base: %llx size: %llx align: %d gran: %d limit: %llx done\n",
355 dev_path(bus->dev), __func__,
356 (bridge->flags & IORESOURCE_IO) ? "io" :
357 (bridge->flags & IORESOURCE_PREFETCH) ? "prefmem" : "mem",
358 base, bridge->size, bridge->align, bridge->gran, bridge->limit);
362 * This function is the second part of the resource allocator.
365 * - Allocate resource locations for every device.
366 * - Don't overlap, and follow the rules of bridges.
367 * - Don't overlap with resources in fixed locations.
368 * - Be efficient so we don't have ugly strategies.
371 * - Devices that have fixed addresses are the minority so don't
372 * worry about them too much. Instead only use part of the address
373 * space for devices with programmable addresses. This easily handles
374 * everything except bridges.
376 * - PCI devices are required to have their sizes and their alignments
377 * equal. In this case an optimal solution to the packing problem
378 * exists. Allocate all devices from highest alignment to least
379 * alignment or vice versa. Use this.
381 * - So we can handle more than PCI run two allocation passes on bridges. The
382 * first to see how large the resources are behind the bridge, and what
383 * their alignment requirements are. The second to assign a safe address to
384 * the devices behind the bridge. This allows us to treat a bridge as just
385 * a device with a couple of resources, and not need to special case it in
386 * the allocator. Also this allows handling of other types of bridges.
388 * - This function assigns the resources a value.
390 * @param bus The bus we are traversing.
391 * @param bridge The bridge resource which must contain the bus' resources.
392 * @param type_mask This value gets anded with the resource type.
393 * @param type This value must match the result of the and.
395 static void allocate_resources(struct bus *bus, struct resource *bridge,
396 unsigned long type_mask, unsigned long type)
399 struct resource *resource;
403 printk_spew("%s %s_%s: base:%llx size:%llx align:%d gran:%d limit:%llx\n",
404 dev_path(bus->dev), __func__,
405 (type & IORESOURCE_IO) ? "io" : (type & IORESOURCE_PREFETCH) ?
407 base, bridge->size, bridge->align, bridge->gran, bridge->limit);
409 /* Remember we haven't found anything yet. */
412 /* Walk through all the resources on the current bus and allocate them
415 while ((dev = largest_resource(bus, &resource, type_mask, type))) {
417 /* Propagate the bridge limit to the resource register. */
418 if (resource->limit > bridge->limit) {
419 resource->limit = bridge->limit;
422 /* Size 0 resources can be skipped. */
423 if (!resource->size) {
424 /* Set the base to limit so it doesn't confuse tolm. */
425 resource->base = resource->limit;
426 resource->flags |= IORESOURCE_ASSIGNED;
430 if (resource->flags & IORESOURCE_IO) {
431 /* Don't allow potential aliases over the legacy PCI
432 * expansion card addresses. The legacy PCI decodes
433 * only 10 bits, uses 0x100 - 0x3ff. Therefore, only
434 * 0x00 - 0xff can be used out of each 0x400 block of
437 if ((base & 0x300) != 0) {
438 base = (base & ~0x3ff) + 0x400;
440 /* Don't allow allocations in the VGA I/O range.
441 * PCI has special cases for that.
443 else if ((base >= 0x3b0) && (base <= 0x3df)) {
448 if ((round(base, resource->align) + resource->size - 1) <=
450 /* Base must be aligned. */
451 base = round(base, resource->align);
452 resource->base = base;
453 resource->flags |= IORESOURCE_ASSIGNED;
454 resource->flags &= ~IORESOURCE_STORED;
455 base += resource->size;
457 printk_err("!! Resource didn't fit !!\n");
458 printk_err(" aligned base %llx size %llx limit %llx\n",
459 round(base, resource->align), resource->size,
461 printk_err(" %llx needs to be <= %llx (limit)\n",
462 (round(base, resource->align) +
463 resource->size) - 1, resource->limit);
464 printk_err(" %s%s %02lx * [0x%llx - 0x%llx] %s\n",
466 flags & IORESOURCE_ASSIGNED) ? "Assigned: " :
467 "", dev_path(dev), resource->index,
469 resource->base + resource->size - 1,
471 flags & IORESOURCE_IO) ? "io" : (resource->
474 ? "prefmem" : "mem");
477 printk_spew("%s%s %02lx * [0x%llx - 0x%llx] %s\n",
478 (resource->flags & IORESOURCE_ASSIGNED) ? "Assigned: "
480 dev_path(dev), resource->index, resource->base,
481 resource->size ? resource->base + resource->size - 1 :
483 (resource->flags & IORESOURCE_IO) ? "io" :
484 (resource->flags & IORESOURCE_PREFETCH) ? "prefmem" :
487 /* A PCI bridge resource does not need to be a power of two size, but
488 * it does have a minimum granularity. Round the size up to that
489 * minimum granularity so we know not to place something else at an
490 * address positively decoded by the bridge.
493 bridge->flags |= IORESOURCE_ASSIGNED;
495 printk_spew("%s %s_%s: next_base: %llx size: %llx align: %d gran: %d done\n",
496 dev_path(bus->dev), __func__,
497 (type & IORESOURCE_IO) ? "io" : (type & IORESOURCE_PREFETCH) ?
499 base, bridge->size, bridge->align, bridge->gran);
501 /* For each child which is a bridge, allocate_resources. */
502 for (dev = bus->children; dev; dev = dev->sibling) {
504 struct resource *child_bridge;
509 /* Find the resources with matching type flags. */
510 for (i = 0; i < dev->resources; i++) {
512 child_bridge = &dev->resource[i];
514 if (!(child_bridge->flags & IORESOURCE_BRIDGE) ||
515 (child_bridge->flags & type_mask) != type)
518 /* Split prefetchable memory if combined. Many domains
519 * use the same address space for prefetchable memory
520 * and non-prefetchable memory. Bridges below them
521 * need it separated. Add the PREFETCH flag to the
522 * type_mask and type.
524 link = IOINDEX_LINK(child_bridge->index);
525 allocate_resources(&dev->link[link], child_bridge,
526 type_mask | IORESOURCE_PREFETCH,
527 type | (child_bridge->flags &
528 IORESOURCE_PREFETCH));
533 #if CONFIG_PCI_64BIT_PREF_MEM == 1
534 #define MEM_MASK (IORESOURCE_PREFETCH | IORESOURCE_MEM)
536 #define MEM_MASK (IORESOURCE_MEM)
538 #define IO_MASK (IORESOURCE_IO)
539 #define PREF_TYPE (IORESOURCE_PREFETCH | IORESOURCE_MEM)
540 #define MEM_TYPE (IORESOURCE_MEM)
541 #define IO_TYPE (IORESOURCE_IO)
544 struct resource pref, io, mem;
547 static void constrain_resources(struct device *dev, struct constraints* limits)
549 struct device *child;
550 struct resource *res;
551 struct resource *lim;
554 printk_spew("%s: %s\n", __func__, dev_path(dev));
556 /* Constrain limits based on the fixed resources of this device. */
557 for (i = 0; i < dev->resources; i++) {
558 res = &dev->resource[i];
559 if (!(res->flags & IORESOURCE_FIXED))
562 /* It makes no sense to have 0-sized, fixed resources.*/
563 printk_err("skipping %s@%lx fixed resource, size=0!\n",
564 dev_path(dev), res->index);
568 /* PREFETCH, MEM, or I/O - skip any others. */
569 if ((res->flags & MEM_MASK) == PREF_TYPE)
571 else if ((res->flags & MEM_MASK) == MEM_TYPE)
573 else if ((res->flags & IO_MASK) == IO_TYPE)
578 /* Is it already outside the limits? */
579 if (((res->base + res->size -1) < lim->base) || (res->base > lim->limit))
582 /* Choose to be above or below fixed resources. This
583 * check is signed so that "negative" amounts of space
584 * are handled correctly.
586 if ((signed long long)(lim->limit - (res->base + res->size -1)) >
587 (signed long long)(res->base - lim->base))
588 lim->base = res->base + res->size;
590 lim->limit = res->base -1;
593 /* Descend into every enabled child and look for fixed resources. */
594 for (i = 0; i < dev->links; i++)
595 for (child = dev->link[i].children; child;
596 child = child->sibling)
598 constrain_resources(child, limits);
601 static void avoid_fixed_resources(struct device *dev)
603 struct constraints limits;
604 struct resource *res;
607 printk_spew("%s: %s\n", __func__, dev_path(dev));
608 /* Initialize constraints to maximum size. */
610 limits.pref.base = 0;
611 limits.pref.limit = 0xffffffffffffffffULL;
613 limits.io.limit = 0xffffffffffffffffULL;
615 limits.mem.limit = 0xffffffffffffffffULL;
617 /* Constrain the limits to dev's initial resources. */
618 for (i = 0; i < dev->resources; i++) {
619 res = &dev->resource[i];
620 if ((res->flags & IORESOURCE_FIXED))
622 printk_spew("%s:@%s %02lx limit %08Lx\n", __func__,
623 dev_path(dev), res->index, res->limit);
624 if ((res->flags & MEM_MASK) == PREF_TYPE &&
625 (res->limit < limits.pref.limit))
626 limits.pref.limit = res->limit;
627 if ((res->flags & MEM_MASK) == MEM_TYPE &&
628 (res->limit < limits.mem.limit))
629 limits.mem.limit = res->limit;
630 if ((res->flags & IO_MASK) == IO_TYPE &&
631 (res->limit < limits.io.limit))
632 limits.io.limit = res->limit;
635 /* Look through the tree for fixed resources and update the limits. */
636 constrain_resources(dev, &limits);
638 /* Update dev's resources with new limits. */
639 for (i = 0; i < dev->resources; i++) {
640 struct resource *lim;
641 res = &dev->resource[i];
643 if ((res->flags & IORESOURCE_FIXED))
646 /* PREFETCH, MEM, or I/O - skip any others. */
647 if ((res->flags & MEM_MASK) == PREF_TYPE)
649 else if ((res->flags & MEM_MASK) == MEM_TYPE)
651 else if ((res->flags & IO_MASK) == IO_TYPE)
656 printk_spew("%s2: %s@%02lx limit %08Lx\n", __func__,
657 dev_path(dev), res->index, res->limit);
658 printk_spew("\tlim->base %08Lx lim->limit %08Lx\n",
659 lim->base, lim->limit);
661 /* Is the resource outside the limits? */
662 if (lim->base > res->base)
663 res->base = lim->base;
664 if (res->limit > lim->limit)
665 res->limit = lim->limit;
669 #if CONFIG_VGA_BRIDGE_SETUP == 1
670 device_t vga_pri = 0;
671 static void set_vga_bridge_bits(void)
674 * FIXME: Modify set_vga_bridge so it is less PCI centric!
675 * This function knows too much about PCI stuff, it should be just
676 * an iterator/visitor.
679 /* FIXME: Handle the VGA palette snooping. */
680 struct device *dev, *vga, *vga_onboard, *vga_first, *vga_last;
687 for (dev = all_devices; dev; dev = dev->next) {
690 if (((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) &&
691 ((dev->class >> 8) != PCI_CLASS_DISPLAY_OTHER)) {
693 if (dev->on_mainboard) {
699 if (dev->on_mainboard) {
706 /* It isn't safe to enable other VGA cards. */
707 dev->command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
716 #if CONFIG_CONSOLE_VGA_ONBOARD_AT_FIRST == 1
717 if (vga_onboard) // Will use on board VGA as pri.
719 if (!vga) // Will use last add on adapter as pri.
726 /* VGA is first add on card or the only onboard VGA. */
727 printk_debug("Setting up VGA for %s\n", dev_path(vga));
728 /* All legacy VGA cards have MEM & I/O space registers. */
729 vga->command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
733 /* Now walk up the bridges setting the VGA enable. */
735 printk_debug("Setting PCI_BRIDGE_CTL_VGA for bridge %s\n",
737 bus->bridge_ctrl |= PCI_BRIDGE_CTL_VGA;
738 bus = (bus == bus->dev->bus) ? 0 : bus->dev->bus;
745 * @brief Assign the computed resources to the devices on the bus.
747 * @param bus Pointer to the structure for this bus
749 * Use the device specific set_resources method to store the computed
750 * resources to hardware. For bridge devices, the set_resources() method
751 * has to recurse into every down stream buses.
754 * assign_resources() -> device_operation::set_resources()
755 * device_operation::set_resources() -> assign_resources()
757 void assign_resources(struct bus *bus)
759 struct device *curdev;
761 printk_spew("%s assign_resources, bus %d link: %d\n",
762 dev_path(bus->dev), bus->secondary, bus->link);
764 for (curdev = bus->children; curdev; curdev = curdev->sibling) {
765 if (!curdev->enabled || !curdev->resources) {
768 if (!curdev->ops || !curdev->ops->set_resources) {
769 printk_err("%s missing set_resources\n",
773 curdev->ops->set_resources(curdev);
775 printk_spew("%s assign_resources, bus %d link: %d\n",
776 dev_path(bus->dev), bus->secondary, bus->link);
780 * @brief Enable the resources for a specific device
782 * @param dev the device whose resources are to be enabled
784 * Enable resources of the device by calling the device specific
785 * enable_resources() method.
787 * The parent's resources should be enabled first to avoid having enabling
788 * order problem. This is done by calling the parent's enable_resources()
789 * method and let that method to call it's children's enable_resoruces()
790 * method via the (global) enable_childrens_resources().
792 * Indirect mutual recursion:
793 * enable_resources() -> device_operations::enable_resource()
794 * device_operations::enable_resource() -> enable_children_resources()
795 * enable_children_resources() -> enable_resources()
797 void enable_resources(struct device *dev)
802 if (!dev->ops || !dev->ops->enable_resources) {
803 printk_err("%s missing enable_resources\n", dev_path(dev));
806 dev->ops->enable_resources(dev);
810 * @brief Reset all of the devices a bus
812 * Reset all of the devices on a bus and clear the bus's reset_needed flag.
814 * @param bus pointer to the bus structure
816 * @return 1 if the bus was successfully reset, 0 otherwise.
819 int reset_bus(struct bus *bus)
821 if (bus && bus->dev && bus->dev->ops && bus->dev->ops->reset_bus) {
822 bus->dev->ops->reset_bus(bus);
823 bus->reset_needed = 0;
830 * @brief Scan for devices on a bus.
832 * If there are bridges on the bus, recursively scan the buses behind the
833 * bridges. If the setting up and tuning of the bus causes a reset to be
834 * required, reset the bus and scan it again.
836 * @param busdev Pointer to the bus device.
837 * @param max Current bus number.
838 * @return The maximum bus number found, after scanning all subordinate buses.
840 unsigned int scan_bus(struct device *busdev, unsigned int max)
842 unsigned int new_max;
844 if (!busdev || !busdev->enabled || !busdev->ops ||
845 !busdev->ops->scan_bus) {
850 while (do_scan_bus) {
852 new_max = busdev->ops->scan_bus(busdev, max);
854 for (link = 0; link < busdev->links; link++) {
855 if (busdev->link[link].reset_needed) {
856 if (reset_bus(&busdev->link[link])) {
859 busdev->bus->reset_needed = 1;
868 * @brief Determine the existence of devices and extend the device tree.
870 * Most of the devices in the system are listed in the mainboard Config.lb
871 * file. The device structures for these devices are generated at compile
872 * time by the config tool and are organized into the device tree. This
873 * function determines if the devices created at compile time actually exist
874 * in the physical system.
876 * For devices in the physical system but not listed in the Config.lb file,
877 * the device structures have to be created at run time and attached to the
880 * This function starts from the root device 'dev_root', scan the buses in
881 * the system recursively, modify the device tree according to the result of
884 * This function has no idea how to scan and probe buses and devices at all.
885 * It depends on the bus/device specific scan_bus() method to do it. The
886 * scan_bus() method also has to create the device structure and attach
887 * it to the device tree.
889 void dev_enumerate(void)
892 printk_info("Enumerating buses...\n");
895 show_all_devs(BIOS_SPEW, "Before Device Enumeration.");
896 printk_spew("Compare with tree...\n");
897 show_devs_tree(root, BIOS_SPEW, 0, 0);
899 if (root->chip_ops && root->chip_ops->enable_dev) {
900 root->chip_ops->enable_dev(root);
902 if (!root->ops || !root->ops->scan_bus) {
903 printk_err("dev_root missing scan_bus operation");
907 printk_info("done\n");
911 * @brief Configure devices on the devices tree.
913 * Starting at the root of the device tree, travel it recursively in two
914 * passes. In the first pass, we compute and allocate resources (ranges)
915 * requried by each device. In the second pass, the resources ranges are
916 * relocated to their final position and stored to the hardware.
918 * I/O resources grow upward. MEM resources grow downward.
920 * Since the assignment is hierarchical we set the values into the dev_root
923 void dev_configure(void)
925 struct resource *res;
927 struct device *child;
930 #if CONFIG_VGA_BRIDGE_SETUP == 1
931 set_vga_bridge_bits();
934 printk_info("Allocating resources...\n");
938 /* Each domain should create resources which contain the entire address
939 * space for IO, MEM, and PREFMEM resources in the domain. The
940 * allocation of device resources will be done from this address space.
943 /* Read the resources for the entire tree. */
945 printk_info("Reading resources...\n");
946 read_resources(&root->link[0]);
947 printk_info("Done reading resources.\n");
949 print_resource_tree(root, BIOS_SPEW, "After reading.");
951 /* Compute resources for all domains. */
952 for (child = root->link[0].children; child; child = child->sibling) {
953 if (!(child->path.type == DEVICE_PATH_PCI_DOMAIN))
955 for (i = 0; i < child->resources; i++) {
956 res = &child->resource[i];
957 if (res->flags & IORESOURCE_FIXED)
959 if (res->flags & IORESOURCE_PREFETCH) {
960 compute_resources(&child->link[0],
961 res, MEM_MASK, PREF_TYPE);
964 if (res->flags & IORESOURCE_MEM) {
965 compute_resources(&child->link[0],
966 res, MEM_MASK, MEM_TYPE);
969 if (res->flags & IORESOURCE_IO) {
970 compute_resources(&child->link[0],
971 res, IO_MASK, IO_TYPE);
977 /* For all domains. */
978 for (child = root->link[0].children; child; child=child->sibling)
979 if (child->path.type == DEVICE_PATH_PCI_DOMAIN)
980 avoid_fixed_resources(child);
982 /* Now we need to adjust the resources. MEM resources need to start at
983 * the highest address managable.
985 for (child = root->link[0].children; child; child = child->sibling) {
986 if (child->path.type != DEVICE_PATH_PCI_DOMAIN)
988 for (i = 0; i < child->resources; i++) {
989 res = &child->resource[i];
990 if (!(res->flags & IORESOURCE_MEM) ||
991 res->flags & IORESOURCE_FIXED)
993 res->base = resource_max(res);
997 /* Store the computed resource allocations into device registers ... */
998 printk_info("Setting resources...\n");
999 for (child = root->link[0].children; child; child = child->sibling) {
1000 if (!(child->path.type == DEVICE_PATH_PCI_DOMAIN))
1002 for (i = 0; i < child->resources; i++) {
1003 res = &child->resource[i];
1004 if (res->flags & IORESOURCE_FIXED)
1006 if (res->flags & IORESOURCE_PREFETCH) {
1007 allocate_resources(&child->link[0],
1008 res, MEM_MASK, PREF_TYPE);
1011 if (res->flags & IORESOURCE_MEM) {
1012 allocate_resources(&child->link[0],
1013 res, MEM_MASK, MEM_TYPE);
1016 if (res->flags & IORESOURCE_IO) {
1017 allocate_resources(&child->link[0],
1018 res, IO_MASK, IO_TYPE);
1023 assign_resources(&root->link[0]);
1024 printk_info("Done setting resources.\n");
1025 print_resource_tree(root, BIOS_SPEW, "After assigning values.");
1027 printk_info("Done allocating resources.\n");
1031 * @brief Enable devices on the device tree.
1033 * Starting at the root, walk the tree and enable all devices/bridges by
1034 * calling the device's enable_resources() method.
1036 void dev_enable(void)
1038 printk_info("Enabling resources...\n");
1040 /* now enable everything. */
1041 enable_resources(&dev_root);
1043 printk_info("done.\n");
1047 * @brief Initialize all devices in the global device list.
1049 * Starting at the first device on the global device link list,
1050 * walk the list and call the device's init() method to do deivce
1053 void dev_initialize(void)
1057 printk_info("Initializing devices...\n");
1058 for (dev = all_devices; dev; dev = dev->next) {
1059 if (dev->enabled && !dev->initialized &&
1060 dev->ops && dev->ops->init) {
1061 if (dev->path.type == DEVICE_PATH_I2C) {
1062 printk_debug("smbus: %s[%d]->",
1063 dev_path(dev->bus->dev),
1066 printk_debug("%s init\n", dev_path(dev));
1067 dev->initialized = 1;
1068 dev->ops->init(dev);
1071 printk_info("Devices initialized\n");
1072 show_all_devs(BIOS_SPEW, "After init.");