2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Low-Level PCI Support for PC
|
|
|
|
*
|
|
|
|
* (c) 1999--2000 Martin Mares <mj@ucw.cz>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/pci.h>
|
2013-04-12 13:44:23 +08:00
|
|
|
#include <linux/pci-acpi.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/init.h>
|
2006-02-18 17:36:55 +08:00
|
|
|
#include <linux/dmi.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#include <asm/acpi.h>
|
|
|
|
#include <asm/segment.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/smp.h>
|
2008-12-27 21:02:28 +08:00
|
|
|
#include <asm/pci_x86.h>
|
2012-12-06 05:33:27 +08:00
|
|
|
#include <asm/setup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
|
|
|
|
PCI_PROBE_MMCONF;
|
|
|
|
|
2008-05-23 05:35:11 +08:00
|
|
|
unsigned int pci_early_dump_regs;
|
2006-11-16 20:16:23 +08:00
|
|
|
static int pci_bf_sort;
|
2005-04-17 06:20:36 +08:00
|
|
|
int pci_routeirq;
|
2008-06-11 22:35:14 +08:00
|
|
|
int noioapicquirk;
|
2008-07-15 19:48:55 +08:00
|
|
|
#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
|
|
|
|
int noioapicreroute = 0;
|
|
|
|
#else
|
2008-06-11 22:35:15 +08:00
|
|
|
int noioapicreroute = 1;
|
2008-07-15 19:48:55 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
int pcibios_last_bus = -1;
|
2005-03-22 12:20:42 +08:00
|
|
|
unsigned long pirq_table_addr;
|
2011-09-15 15:58:51 +08:00
|
|
|
const struct pci_raw_ops *__read_mostly raw_pci_ops;
|
|
|
|
const struct pci_raw_ops *__read_mostly raw_pci_ext_ops;
|
2008-02-10 22:45:28 +08:00
|
|
|
|
|
|
|
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
|
|
int reg, int len, u32 *val)
|
|
|
|
{
|
2008-07-12 05:21:17 +08:00
|
|
|
if (domain == 0 && reg < 256 && raw_pci_ops)
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
|
|
|
|
if (raw_pci_ext_ops)
|
|
|
|
return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
|
|
|
|
int reg, int len, u32 val)
|
|
|
|
{
|
2008-07-12 05:21:17 +08:00
|
|
|
if (domain == 0 && reg < 256 && raw_pci_ops)
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
|
|
|
|
if (raw_pci_ext_ops)
|
|
|
|
return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
|
|
|
|
{
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_read(pci_domain_nr(bus), bus->number,
|
2007-10-12 04:58:30 +08:00
|
|
|
devfn, where, size, value);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
|
|
|
|
{
|
2008-02-10 22:45:28 +08:00
|
|
|
return raw_pci_write(pci_domain_nr(bus), bus->number,
|
2007-10-12 04:58:30 +08:00
|
|
|
devfn, where, size, value);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct pci_ops pci_root_ops = {
|
|
|
|
.read = pci_read,
|
|
|
|
.write = pci_write,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
2017-03-17 05:50:07 +08:00
|
|
|
* This interrupt-safe spinlock protects all accesses to PCI configuration
|
|
|
|
* space, except for the mmconfig (ECAM) based operations.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2010-02-17 22:35:25 +08:00
|
|
|
DEFINE_RAW_SPINLOCK(pci_config_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
|
2008-03-27 16:31:18 +08:00
|
|
|
{
|
|
|
|
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
|
|
|
|
printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
|
2008-03-27 16:31:18 +08:00
|
|
|
/*
|
|
|
|
* Systems where PCI IO resource ISA alignment can be skipped
|
|
|
|
* when the ISA enable bit in the bridge control is not set
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3800",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3850",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = can_skip_ioresource_align,
|
|
|
|
.ident = "IBM System x3950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
void __init dmi_check_skip_isa_align(void)
|
|
|
|
{
|
|
|
|
dmi_check_system(can_skip_pciprobe_dmi_table);
|
|
|
|
}
|
|
|
|
|
2012-12-22 06:02:53 +08:00
|
|
|
static void pcibios_fixup_device_resources(struct pci_dev *dev)
|
2008-05-13 04:57:46 +08:00
|
|
|
{
|
|
|
|
struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
|
2010-05-13 02:14:32 +08:00
|
|
|
struct resource *bar_r;
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
if (pci_probe & PCI_NOASSIGN_BARS) {
|
|
|
|
/*
|
|
|
|
* If the BIOS did not assign the BAR, zero out the
|
2016-06-11 08:05:09 +08:00
|
|
|
* resource so the kernel doesn't attempt to assign
|
2010-05-13 02:14:32 +08:00
|
|
|
* it later on in pci_assign_unassigned_resources
|
|
|
|
*/
|
|
|
|
for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
|
|
|
|
bar_r = &dev->resource[bar];
|
|
|
|
if (bar_r->start == 0 && bar_r->end != 0) {
|
|
|
|
bar_r->flags = 0;
|
|
|
|
bar_r->end = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-05-13 04:57:46 +08:00
|
|
|
|
|
|
|
if (pci_probe & PCI_NOASSIGN_ROMS) {
|
|
|
|
if (rom_r->parent)
|
|
|
|
return;
|
|
|
|
if (rom_r->start) {
|
|
|
|
/* we deal with BIOS assigned ROM later */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rom_r->start = rom_r->end = rom_r->flags = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Called after each bus is probed, but before its children
|
|
|
|
* are examined.
|
|
|
|
*/
|
|
|
|
|
2012-12-22 06:02:53 +08:00
|
|
|
void pcibios_fixup_bus(struct pci_bus *b)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-05-13 04:57:46 +08:00
|
|
|
struct pci_dev *dev;
|
|
|
|
|
PCI: Revert "PCI: Call pci_read_bridge_bases() from core instead of arch code"
Revert dff22d2054b5 ("PCI: Call pci_read_bridge_bases() from core instead
of arch code").
Reading PCI bridge windows is not arch-specific in itself, but there is PCI
core code that doesn't work correctly if we read them too early. For
example, Hannes found this case on an ARM Freescale i.mx6 board:
pci_bus 0000:00: root bus resource [mem 0x01000000-0x01efffff]
pci 0000:00:00.0: PCI bridge to [bus 01-ff]
pci 0000:00:00.0: BAR 8: no space for [mem size 0x01000000] (mem window)
pci 0000:01:00.0: BAR 2: failed to assign [mem size 0x00200000]
pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x00004000]
pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x00000100]
The 00:00.0 mem window needs to be at least 3MB: the 01:00.0 device needs
0x204100 of space, and mem windows are megabyte-aligned.
Bus sizing can increase a bridge window size, but never *decrease* it (see
d65245c3297a ("PCI: don't shrink bridge resources")). Prior to
dff22d2054b5, ARM didn't read bridge windows at all, so the "original size"
was zero, and we assigned a 3MB window.
After dff22d2054b5, we read the bridge windows before sizing the bus. The
firmware programmed a 16MB window (size 0x01000000) in 00:00.0, and since
we never decrease the size, we kept 16MB even though we only needed 3MB.
But 16MB doesn't fit in the host bridge aperture, so we failed to assign
space for the window and the downstream devices.
I think this is a defect in the PCI core: we shouldn't rely on the firmware
to assign sensible windows.
Ray reported a similar problem, also on ARM, with Broadcom iProc.
Issues like this are too hard to fix right now, so revert dff22d2054b5.
Reported-by: Hannes <oe5hpm@gmail.com>
Reported-by: Ray Jui <rjui@broadcom.com>
Link: http://lkml.kernel.org/r/CAAa04yFQEUJm7Jj1qMT57-LG7ZGtnhNDBe=PpSRa70Mj+XhW-A@mail.gmail.com
Link: http://lkml.kernel.org/r/55F75BB8.4070405@broadcom.com
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
2015-09-16 02:18:04 +08:00
|
|
|
pci_read_bridge_bases(b);
|
2008-05-13 04:57:46 +08:00
|
|
|
list_for_each_entry(dev, &b->devices, bus_list)
|
|
|
|
pcibios_fixup_device_resources(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-04-12 13:44:23 +08:00
|
|
|
void pcibios_add_bus(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
acpi_pci_add_bus(bus);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pcibios_remove_bus(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
acpi_pci_remove_bus(bus);
|
|
|
|
}
|
|
|
|
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
/*
|
|
|
|
* Only use DMI information to set this if nothing was passed
|
|
|
|
* on the kernel command line (which was parsed earlier).
|
|
|
|
*/
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static int __init set_bf_sort(const struct dmi_system_id *d)
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
{
|
|
|
|
if (pci_bf_sort == pci_bf_sort_default) {
|
|
|
|
pci_bf_sort = pci_dmi_bf;
|
|
|
|
printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static void __init read_dmi_type_b1(const struct dmi_header *dm,
|
|
|
|
void *private_data)
|
2010-12-15 01:57:12 +08:00
|
|
|
{
|
2017-06-02 22:13:11 +08:00
|
|
|
u8 *data = (u8 *)dm + 4;
|
2010-12-15 01:57:12 +08:00
|
|
|
|
|
|
|
if (dm->type != 0xB1)
|
|
|
|
return;
|
2017-06-02 22:13:11 +08:00
|
|
|
if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
|
|
|
|
set_bf_sort((const struct dmi_system_id *)private_data);
|
2010-12-15 01:57:12 +08:00
|
|
|
}
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static int __init find_sort_method(const struct dmi_system_id *d)
|
2010-12-15 01:57:12 +08:00
|
|
|
{
|
2017-06-02 22:13:11 +08:00
|
|
|
dmi_walk(read_dmi_type_b1, (void *)d);
|
|
|
|
return 0;
|
2010-12-15 01:57:12 +08:00
|
|
|
}
|
|
|
|
|
2006-02-18 17:36:55 +08:00
|
|
|
/*
|
|
|
|
* Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
|
|
|
|
*/
|
|
|
|
#ifdef __i386__
|
2014-08-26 05:26:35 +08:00
|
|
|
static int __init assign_all_busses(const struct dmi_system_id *d)
|
2006-02-18 17:36:55 +08:00
|
|
|
{
|
|
|
|
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
|
|
|
|
printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
|
|
|
|
" (pci=assign-busses)\n", d->ident);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static int __init set_scan_all(const struct dmi_system_id *d)
|
2012-05-01 05:21:02 +08:00
|
|
|
{
|
|
|
|
printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
|
|
|
|
d->ident);
|
|
|
|
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
#ifdef __i386__
|
2006-02-18 17:36:55 +08:00
|
|
|
/*
|
|
|
|
* Laptops which need pci=assign-busses to see Cardbus cards
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = assign_all_busses,
|
|
|
|
.ident = "Samsung X20 Laptop",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
#endif /* __i386__ */
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 1950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 1955",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 2900",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge 2950",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
|
|
|
|
},
|
|
|
|
},
|
2007-03-24 12:58:07 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "Dell PowerEdge R900",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
|
|
|
|
},
|
|
|
|
},
|
2011-03-19 01:22:14 +08:00
|
|
|
{
|
|
|
|
.callback = find_sort_method,
|
|
|
|
.ident = "Dell System",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
|
|
|
|
},
|
|
|
|
},
|
2007-02-06 08:36:10 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL20p G3",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL20p G4",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL30p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL25p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL35p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL45p G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL45p G2",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL460c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL465c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL480c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
|
|
|
.ident = "HP ProLiant BL685c G1",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
|
|
|
|
},
|
|
|
|
},
|
2007-10-18 00:04:35 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-05-16 02:40:14 +08:00
|
|
|
.ident = "HP ProLiant DL360",
|
2007-10-18 00:04:35 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-05-16 02:40:14 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
|
2007-10-18 00:04:35 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-05-16 02:40:14 +08:00
|
|
|
.ident = "HP ProLiant DL380",
|
2007-10-18 00:04:35 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-05-16 02:40:14 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
|
2007-10-18 00:04:35 +08:00
|
|
|
},
|
|
|
|
},
|
2007-09-14 02:21:34 +08:00
|
|
|
#ifdef __i386__
|
|
|
|
{
|
|
|
|
.callback = assign_all_busses,
|
|
|
|
.ident = "Compaq EVO N800c",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
#endif
|
2007-11-27 03:42:19 +08:00
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-07-08 00:55:26 +08:00
|
|
|
.ident = "HP ProLiant DL385 G2",
|
2007-11-27 03:42:19 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-07-08 00:55:26 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
|
2007-11-27 03:42:19 +08:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_bf_sort,
|
2008-07-08 00:55:26 +08:00
|
|
|
.ident = "HP ProLiant DL585 G2",
|
2007-11-27 03:42:19 +08:00
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
2008-07-08 00:55:26 +08:00
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
|
2007-11-27 03:42:19 +08:00
|
|
|
},
|
|
|
|
},
|
2012-05-01 05:21:02 +08:00
|
|
|
{
|
|
|
|
.callback = set_scan_all,
|
|
|
|
.ident = "Stratus/NEC ftServer",
|
|
|
|
.matches = {
|
2012-12-27 01:39:23 +08:00
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Stratus"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
|
2012-05-01 05:21:02 +08:00
|
|
|
},
|
|
|
|
},
|
2015-02-02 23:36:23 +08:00
|
|
|
{
|
|
|
|
.callback = set_scan_all,
|
|
|
|
.ident = "Stratus/NEC ftServer",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = set_scan_all,
|
|
|
|
.ident = "Stratus/NEC ftServer",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
|
|
|
|
},
|
|
|
|
},
|
2006-02-18 17:36:55 +08:00
|
|
|
{}
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-04-15 06:40:37 +08:00
|
|
|
void __init dmi_check_pciprobe(void)
|
|
|
|
{
|
|
|
|
dmi_check_system(pciprobe_dmi_table);
|
|
|
|
}
|
|
|
|
|
2014-01-29 07:40:36 +08:00
|
|
|
void pcibios_scan_root(int busnum)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2014-01-25 02:52:25 +08:00
|
|
|
struct pci_bus *bus;
|
|
|
|
struct pci_sysdata *sd;
|
|
|
|
LIST_HEAD(resources);
|
|
|
|
|
|
|
|
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
|
|
|
|
if (!sd) {
|
|
|
|
printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busnum);
|
2014-01-29 07:40:36 +08:00
|
|
|
return;
|
2014-01-25 02:52:25 +08:00
|
|
|
}
|
2014-01-25 02:54:51 +08:00
|
|
|
sd->node = x86_pci_root_bus_node(busnum);
|
2014-01-25 02:52:25 +08:00
|
|
|
x86_pci_root_bus_resources(busnum, &resources);
|
|
|
|
printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
|
|
|
|
bus = pci_scan_root_bus(NULL, busnum, &pci_root_ops, sd, &resources);
|
|
|
|
if (!bus) {
|
|
|
|
pci_free_resource_list(&resources);
|
|
|
|
kfree(sd);
|
PCI: Assign resources before drivers claim devices (pci_scan_root_bus())
Previously, pci_scan_root_bus() created a root PCI bus, enumerated the
devices on it, and called pci_bus_add_devices(), which made the devices
available for drivers to claim them.
Most callers assigned resources to devices after pci_scan_root_bus()
returns, which may be after drivers have claimed the devices. This is
incorrect; the PCI core should not change device resources while a driver
is managing the device.
Remove pci_bus_add_devices() from pci_scan_root_bus() and do it after any
resource assignment in the callers.
Note that ARM's pci_common_init_dev() already called pci_bus_add_devices()
after pci_scan_root_bus(), so we only need to remove the first call:
pci_common_init_dev
pcibios_init_hw
pci_scan_root_bus
pci_bus_add_devices # first call
pci_bus_assign_resources
pci_bus_add_devices # second call
[bhelgaas: changelog, drop "root_bus" var in alpha common_init_pci(),
return failure earlier in mn10300, add "return" in x86 pcibios_scan_root(),
return early if xtensa platform_pcibios_fixup() fails]
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
CC: Richard Henderson <rth@twiddle.net>
CC: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
CC: Matt Turner <mattst88@gmail.com>
CC: David Howells <dhowells@redhat.com>
CC: Tony Luck <tony.luck@intel.com>
CC: Michal Simek <monstr@monstr.eu>
CC: Ralf Baechle <ralf@linux-mips.org>
CC: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
CC: Sebastian Ott <sebott@linux.vnet.ibm.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Chris Metcalf <cmetcalf@ezchip.com>
CC: Chris Zankel <chris@zankel.net>
CC: Max Filippov <jcmvbkbc@gmail.com>
CC: Thomas Gleixner <tglx@linutronix.de>
2015-03-16 11:18:56 +08:00
|
|
|
return;
|
2014-01-25 02:52:25 +08:00
|
|
|
}
|
PCI: Assign resources before drivers claim devices (pci_scan_root_bus())
Previously, pci_scan_root_bus() created a root PCI bus, enumerated the
devices on it, and called pci_bus_add_devices(), which made the devices
available for drivers to claim them.
Most callers assigned resources to devices after pci_scan_root_bus()
returns, which may be after drivers have claimed the devices. This is
incorrect; the PCI core should not change device resources while a driver
is managing the device.
Remove pci_bus_add_devices() from pci_scan_root_bus() and do it after any
resource assignment in the callers.
Note that ARM's pci_common_init_dev() already called pci_bus_add_devices()
after pci_scan_root_bus(), so we only need to remove the first call:
pci_common_init_dev
pcibios_init_hw
pci_scan_root_bus
pci_bus_add_devices # first call
pci_bus_assign_resources
pci_bus_add_devices # second call
[bhelgaas: changelog, drop "root_bus" var in alpha common_init_pci(),
return failure earlier in mn10300, add "return" in x86 pcibios_scan_root(),
return early if xtensa platform_pcibios_fixup() fails]
Signed-off-by: Yijing Wang <wangyijing@huawei.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
CC: Richard Henderson <rth@twiddle.net>
CC: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
CC: Matt Turner <mattst88@gmail.com>
CC: David Howells <dhowells@redhat.com>
CC: Tony Luck <tony.luck@intel.com>
CC: Michal Simek <monstr@monstr.eu>
CC: Ralf Baechle <ralf@linux-mips.org>
CC: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
CC: Sebastian Ott <sebott@linux.vnet.ibm.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Chris Metcalf <cmetcalf@ezchip.com>
CC: Chris Zankel <chris@zankel.net>
CC: Max Filippov <jcmvbkbc@gmail.com>
CC: Thomas Gleixner <tglx@linutronix.de>
2015-03-16 11:18:56 +08:00
|
|
|
pci_bus_add_devices(bus);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2012-04-03 09:31:54 +08:00
|
|
|
|
2010-03-19 02:28:12 +08:00
|
|
|
void __init pcibios_set_cache_line_size(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
|
|
|
|
|
|
|
/*
|
2009-10-15 04:31:39 +08:00
|
|
|
* Set PCI cacheline size to that of the CPU if the CPU has reported it.
|
|
|
|
* (For older CPUs that don't support cpuid, we se it to 32 bytes
|
|
|
|
* It's also good for 386/486s (which actually have 16)
|
2005-04-17 06:20:36 +08:00
|
|
|
* as quite a few PCI devices do not support smaller values.
|
|
|
|
*/
|
2009-10-15 04:31:39 +08:00
|
|
|
if (c->x86_clflush_size > 0) {
|
|
|
|
pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
|
|
|
|
printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
|
|
|
|
pci_dfl_cache_line_size << 2);
|
|
|
|
} else {
|
|
|
|
pci_dfl_cache_line_size = 32 >> 2;
|
|
|
|
printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
|
|
|
|
}
|
2010-03-19 02:28:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int __init pcibios_init(void)
|
|
|
|
{
|
2016-03-23 18:34:29 +08:00
|
|
|
if (!raw_pci_ops && !raw_pci_ext_ops) {
|
2010-03-19 02:28:12 +08:00
|
|
|
printk(KERN_WARNING "PCI: System does not support PCI\n");
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-03-19 02:28:12 +08:00
|
|
|
pcibios_set_cache_line_size();
|
2005-04-17 06:20:36 +08:00
|
|
|
pcibios_resource_survey();
|
|
|
|
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
if (pci_bf_sort >= pci_force_bf)
|
|
|
|
pci_sort_breadthfirst();
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-26 05:26:35 +08:00
|
|
|
char *__init pcibios_setup(char *str)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (!strcmp(str, "off")) {
|
|
|
|
pci_probe = 0;
|
|
|
|
return NULL;
|
PCI: optionally sort device lists breadth-first
Problem:
New Dell PowerEdge servers have 2 embedded ethernet ports, which are
labeled NIC1 and NIC2 on the chassis, in the BIOS setup screens, and
in the printed documentation. Assuming no other add-in ethernet ports
in the system, Linux 2.4 kernels name these eth0 and eth1
respectively. Many people have come to expect this naming. Linux 2.6
kernels name these eth1 and eth0 respectively (backwards from
expectations). I also have reports that various Sun and HP servers
have similar behavior.
Root cause:
Linux 2.4 kernels walk the pci_devices list, which happens to be
sorted in breadth-first order (or pcbios_find_device order on i386,
which most often is breadth-first also). 2.6 kernels have both the
pci_devices list and the pci_bus_type.klist_devices list, the latter
is what is walked at driver load time to match the pci_id tables; this
klist happens to be in depth-first order.
On systems where, for physical routing reasons, NIC1 appears on a
lower bus number than NIC2, but NIC2's bridge is discovered first in
the depth-first ordering, NIC2 will be discovered before NIC1. If the
list were sorted breadth-first, NIC1 would be discovered before NIC2.
A PowerEdge 1955 system has the following topology which easily
exhibits the difference between depth-first and breadth-first device
lists.
-[0000:00]-+-00.0 Intel Corporation 5000P Chipset Memory Controller Hub
+-02.0-[0000:03-08]--+-00.0-[0000:04-07]--+-00.0-[0000:05-06]----00.0-[0000:06]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC2, 2.4 kernel name eth1, 2.6 kernel name eth0)
+-1c.0-[0000:01-02]----00.0-[0000:02]----00.0 Broadcom Corporation NetXtreme II BCM5708S Gigabit Ethernet (labeled NIC1, 2.4 kernel name eth0, 2.6 kernel name eth1)
Other factors, such as device driver load order and the presence of
PCI slots at various points in the bus hierarchy further complicate
this problem; I'm not trying to solve those here, just restore the
device order, and thus basic behavior, that 2.4 kernels had.
Solution:
The solution can come in multiple steps.
Suggested fix #1: kernel
Patch below optionally sorts the two device lists into breadth-first
ordering to maintain compatibility with 2.4 kernels. It adds two new
command line options:
pci=bfsort
pci=nobfsort
to force the sort order, or not, as you wish. It also adds DMI checks
for the specific Dell systems which exhibit "backwards" ordering, to
make them "right".
Suggested fix #2: udev rules from userland
Many people also have the expectation that embedded NICs are always
discovered before add-in NICs (which this patch does not try to do).
Using the PCI IRQ Routing Table provided by system BIOS, it's easy to
determine which PCI devices are embedded, or if add-in, which PCI slot
they're in. I'm working on a tool that would allow udev to name
ethernet devices in ascending embedded, slot 1 .. slot N order,
subsort by PCI bus/dev/fn breadth-first. It'll be possible to use it
independent of udev as well for those distributions that don't use
udev in their installers.
Suggested fix #3: system board routing rules
One can constrain the system board layout to put NIC1 ahead of NIC2
regardless of breadth-first or depth-first discovery order. This adds
a significant level of complexity to board routing, and may not be
possible in all instances (witness the above systems from several
major manufacturers). I don't want to encourage this particular train
of thought too far, at the expense of not doing #1 or #2 above.
Feedback appreciated. Patch tested on a Dell PowerEdge 1955 blade
with 2.6.18.
You'll also note I took some liberty and temporarily break the klist
abstraction to simplify and speed up the sort algorithm. I think
that's both safe and appropriate in this instance.
Signed-off-by: Matt Domsch <Matt_Domsch@dell.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2006-09-30 04:23:23 +08:00
|
|
|
} else if (!strcmp(str, "bfsort")) {
|
|
|
|
pci_bf_sort = pci_force_bf;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "nobfsort")) {
|
|
|
|
pci_bf_sort = pci_force_nobf;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#ifdef CONFIG_PCI_BIOS
|
|
|
|
else if (!strcmp(str, "bios")) {
|
|
|
|
pci_probe = PCI_PROBE_BIOS;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "nobios")) {
|
|
|
|
pci_probe &= ~PCI_PROBE_BIOS;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strcmp(str, "biosirq")) {
|
|
|
|
pci_probe |= PCI_BIOS_IRQ_SCAN;
|
|
|
|
return NULL;
|
2005-03-22 12:20:42 +08:00
|
|
|
} else if (!strncmp(str, "pirqaddr=", 9)) {
|
|
|
|
pirq_table_addr = simple_strtoul(str+9, NULL, 0);
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCI_DIRECT
|
|
|
|
else if (!strcmp(str, "conf1")) {
|
|
|
|
pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
else if (!strcmp(str, "conf2")) {
|
|
|
|
pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCI_MMCONFIG
|
|
|
|
else if (!strcmp(str, "nommconf")) {
|
|
|
|
pci_probe &= ~PCI_PROBE_MMCONF;
|
|
|
|
return NULL;
|
|
|
|
}
|
2008-04-15 07:08:25 +08:00
|
|
|
else if (!strcmp(str, "check_enable_amd_mmconf")) {
|
|
|
|
pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
else if (!strcmp(str, "noacpi")) {
|
|
|
|
acpi_noirq_set();
|
|
|
|
return NULL;
|
|
|
|
}
|
2006-09-26 16:52:41 +08:00
|
|
|
else if (!strcmp(str, "noearly")) {
|
|
|
|
pci_probe |= PCI_PROBE_NOEARLY;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
else if (!strcmp(str, "usepirqmask")) {
|
|
|
|
pci_probe |= PCI_USE_PIRQ_MASK;
|
|
|
|
return NULL;
|
|
|
|
} else if (!strncmp(str, "irqmask=", 8)) {
|
|
|
|
pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
|
|
|
|
return NULL;
|
|
|
|
} else if (!strncmp(str, "lastbus=", 8)) {
|
|
|
|
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
|
|
|
|
return NULL;
|
2014-02-26 04:05:34 +08:00
|
|
|
} else if (!strcmp(str, "rom")) {
|
2005-04-17 06:20:36 +08:00
|
|
|
pci_probe |= PCI_ASSIGN_ROMS;
|
|
|
|
return NULL;
|
2008-05-13 04:57:46 +08:00
|
|
|
} else if (!strcmp(str, "norom")) {
|
|
|
|
pci_probe |= PCI_NOASSIGN_ROMS;
|
|
|
|
return NULL;
|
2010-05-13 02:14:32 +08:00
|
|
|
} else if (!strcmp(str, "nobar")) {
|
|
|
|
pci_probe |= PCI_NOASSIGN_BARS;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (!strcmp(str, "assign-busses")) {
|
|
|
|
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
|
|
|
|
return NULL;
|
2009-06-25 07:23:03 +08:00
|
|
|
} else if (!strcmp(str, "use_crs")) {
|
|
|
|
pci_probe |= PCI_USE__CRS;
|
2007-10-04 06:56:51 +08:00
|
|
|
return NULL;
|
2010-02-24 01:24:41 +08:00
|
|
|
} else if (!strcmp(str, "nocrs")) {
|
|
|
|
pci_probe |= PCI_ROOT_NO_CRS;
|
|
|
|
return NULL;
|
2008-05-23 05:35:11 +08:00
|
|
|
} else if (!strcmp(str, "earlydump")) {
|
|
|
|
pci_early_dump_regs = 1;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (!strcmp(str, "routeirq")) {
|
|
|
|
pci_routeirq = 1;
|
|
|
|
return NULL;
|
2008-03-27 16:31:18 +08:00
|
|
|
} else if (!strcmp(str, "skip_isa_align")) {
|
|
|
|
pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
|
|
|
|
return NULL;
|
2008-06-11 22:35:14 +08:00
|
|
|
} else if (!strcmp(str, "noioapicquirk")) {
|
|
|
|
noioapicquirk = 1;
|
|
|
|
return NULL;
|
2008-06-11 22:35:15 +08:00
|
|
|
} else if (!strcmp(str, "ioapicreroute")) {
|
|
|
|
if (noioapicreroute != -1)
|
|
|
|
noioapicreroute = 0;
|
|
|
|
return NULL;
|
2008-07-15 19:48:55 +08:00
|
|
|
} else if (!strcmp(str, "noioapicreroute")) {
|
|
|
|
if (noioapicreroute != -1)
|
|
|
|
noioapicreroute = 1;
|
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
return str;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned int pcibios_assign_all_busses(void)
|
|
|
|
{
|
|
|
|
return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2016-01-13 04:18:08 +08:00
|
|
|
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
|
|
|
|
static LIST_HEAD(dma_domain_list);
|
|
|
|
static DEFINE_SPINLOCK(dma_domain_list_lock);
|
|
|
|
|
|
|
|
void add_dma_domain(struct dma_domain *domain)
|
|
|
|
{
|
|
|
|
spin_lock(&dma_domain_list_lock);
|
|
|
|
list_add(&domain->node, &dma_domain_list);
|
|
|
|
spin_unlock(&dma_domain_list_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(add_dma_domain);
|
|
|
|
|
|
|
|
void del_dma_domain(struct dma_domain *domain)
|
|
|
|
{
|
|
|
|
spin_lock(&dma_domain_list_lock);
|
|
|
|
list_del(&domain->node);
|
|
|
|
spin_unlock(&dma_domain_list_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(del_dma_domain);
|
|
|
|
|
|
|
|
static void set_dma_domain_ops(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
struct dma_domain *domain;
|
|
|
|
|
|
|
|
spin_lock(&dma_domain_list_lock);
|
|
|
|
list_for_each_entry(domain, &dma_domain_list, node) {
|
|
|
|
if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
|
2017-01-21 05:04:02 +08:00
|
|
|
pdev->dev.dma_ops = domain->dma_ops;
|
2016-01-13 04:18:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&dma_domain_list_lock);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void set_dma_domain_ops(struct pci_dev *pdev) {}
|
|
|
|
#endif
|
|
|
|
|
2016-09-13 23:05:40 +08:00
|
|
|
static void set_dev_domain_options(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
if (is_vmd(pdev->bus))
|
|
|
|
pdev->hotplug_user_indicators = 1;
|
|
|
|
}
|
|
|
|
|
2012-12-06 05:33:27 +08:00
|
|
|
int pcibios_add_device(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
struct setup_data *data;
|
|
|
|
struct pci_setup_rom *rom;
|
|
|
|
u64 pa_data;
|
|
|
|
|
|
|
|
pa_data = boot_params.hdr.setup_data;
|
|
|
|
while (pa_data) {
|
2017-07-18 05:10:00 +08:00
|
|
|
data = memremap(pa_data, sizeof(*rom), MEMREMAP_WB);
|
2013-06-05 22:15:41 +08:00
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
2012-12-06 05:33:27 +08:00
|
|
|
|
|
|
|
if (data->type == SETUP_PCI) {
|
|
|
|
rom = (struct pci_setup_rom *)data;
|
|
|
|
|
|
|
|
if ((pci_domain_nr(dev->bus) == rom->segment) &&
|
|
|
|
(dev->bus->number == rom->bus) &&
|
|
|
|
(PCI_SLOT(dev->devfn) == rom->device) &&
|
|
|
|
(PCI_FUNC(dev->devfn) == rom->function) &&
|
|
|
|
(dev->vendor == rom->vendor) &&
|
|
|
|
(dev->device == rom->devid)) {
|
2012-12-11 02:24:42 +08:00
|
|
|
dev->rom = pa_data +
|
|
|
|
offsetof(struct pci_setup_rom, romdata);
|
2012-12-06 05:33:27 +08:00
|
|
|
dev->romlen = rom->pcilen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pa_data = data->next;
|
2017-07-18 05:10:00 +08:00
|
|
|
memunmap(data);
|
2012-12-06 05:33:27 +08:00
|
|
|
}
|
2016-01-13 04:18:08 +08:00
|
|
|
set_dma_domain_ops(dev);
|
2016-09-13 23:05:40 +08:00
|
|
|
set_dev_domain_options(dev);
|
2012-12-06 05:33:27 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-18 02:26:42 +08:00
|
|
|
int pcibios_enable_device(struct pci_dev *dev, int mask)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2016-02-18 02:26:42 +08:00
|
|
|
int err;
|
2005-07-28 11:02:00 +08:00
|
|
|
|
2016-02-18 02:26:42 +08:00
|
|
|
if ((err = pci_enable_resources(dev, mask)) < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (!pci_dev_msi_enabled(dev))
|
|
|
|
return pcibios_enable_irq(dev);
|
|
|
|
return 0;
|
2015-03-20 21:56:19 +08:00
|
|
|
}
|
|
|
|
|
2016-02-18 02:26:42 +08:00
|
|
|
void pcibios_disable_device (struct pci_dev *dev)
|
2015-06-10 16:54:59 +08:00
|
|
|
{
|
2016-02-18 02:26:42 +08:00
|
|
|
if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
|
|
|
|
pcibios_disable_irq(dev);
|
2015-06-10 16:54:59 +08:00
|
|
|
}
|
|
|
|
|
2017-02-28 21:34:28 +08:00
|
|
|
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
|
|
|
void pcibios_release_device(struct pci_dev *dev)
|
|
|
|
{
|
|
|
|
if (atomic_dec_return(&dev->enable_cnt) >= 0)
|
|
|
|
pcibios_disable_device(dev);
|
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-10-30 14:26:18 +08:00
|
|
|
int pci_ext_cfg_avail(void)
|
2008-11-11 06:30:50 +08:00
|
|
|
{
|
|
|
|
if (raw_pci_ext_ops)
|
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|