Merge branch 'pci/enumeration' into next

* pci/enumeration:
  PCI: Warn periodically while waiting for non-CRS ("device ready") status
  PCI: Wait up to 60 seconds for device to become ready after FLR
  PCI: Factor out pci_bus_wait_crs()
  PCI: Add pci_bus_crs_vendor_id() to detect CRS response data
  PCI: Always check for non-CRS response before timeout
  PCI: Avoid race while enabling upstream bridges
  PCI: Mark Broadcom HT2100 Root Port Extended Tags as broken
This commit is contained in:
Bjorn Helgaas 2017-09-07 13:24:14 -05:00
commit c5efc22095
5 changed files with 154 additions and 49 deletions

View File

@ -52,6 +52,7 @@ static void pci_pme_list_scan(struct work_struct *work);
static LIST_HEAD(pci_pme_list);
static DEFINE_MUTEX(pci_pme_list_mutex);
static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
static DEFINE_MUTEX(pci_bridge_mutex);
struct pci_pme_device {
struct list_head list;
@ -1348,10 +1349,16 @@ static void pci_enable_bridge(struct pci_dev *dev)
if (bridge)
pci_enable_bridge(bridge);
/*
* Hold pci_bridge_mutex to prevent a race when enabling two
* devices below the bridge simultaneously. The race may cause a
* PCI_COMMAND_MEMORY update to be lost (see changelog).
*/
mutex_lock(&pci_bridge_mutex);
if (pci_is_enabled(dev)) {
if (!dev->is_busmaster)
pci_set_master(dev);
return;
goto end;
}
retval = pci_enable_device(dev);
@ -1359,6 +1366,8 @@ static void pci_enable_bridge(struct pci_dev *dev)
dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
retval);
pci_set_master(dev);
end:
mutex_unlock(&pci_bridge_mutex);
}
static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
@ -1383,7 +1392,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
return 0; /* already enabled */
bridge = pci_upstream_bridge(dev);
if (bridge)
if (bridge && !pci_is_enabled(bridge))
pci_enable_bridge(bridge);
/* only skip sriov related */
@ -3811,27 +3820,49 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
/*
* We should only need to wait 100ms after FLR, but some devices take longer.
* Wait for up to 1000ms for config space to return something other than -1.
* Intel IGD requires this when an LCD panel is attached. We read the 2nd
* dword because VFs don't implement the 1st dword.
*/
static void pci_flr_wait(struct pci_dev *dev)
{
int i = 0;
int delay = 1, timeout = 60000;
u32 id;
do {
msleep(100);
pci_read_config_dword(dev, PCI_COMMAND, &id);
} while (i++ < 10 && id == ~0);
/*
* Per PCIe r3.1, sec 6.6.2, a device must complete an FLR within
* 100ms, but may silently discard requests while the FLR is in
* progress. Wait 100ms before trying to access the device.
*/
msleep(100);
if (id == ~0)
dev_warn(&dev->dev, "Failed to return from FLR\n");
else if (i > 1)
dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
(i - 1) * 100);
/*
* After 100ms, the device should not silently discard config
* requests, but it may still indicate that it needs more time by
* responding to them with CRS completions. The Root Port will
* generally synthesize ~0 data to complete the read (except when
* CRS SV is enabled and the read was for the Vendor ID; in that
* case it synthesizes 0x0001 data).
*
* Wait for the device to return a non-CRS completion. Read the
* Command register instead of Vendor ID so we don't have to
* contend with the CRS SV value.
*/
pci_read_config_dword(dev, PCI_COMMAND, &id);
while (id == ~0) {
if (delay > timeout) {
dev_warn(&dev->dev, "not ready %dms after FLR; giving up\n",
100 + delay - 1);
return;
}
if (delay > 1000)
dev_info(&dev->dev, "not ready %dms after FLR; waiting\n",
100 + delay - 1);
msleep(delay);
delay *= 2;
pci_read_config_dword(dev, PCI_COMMAND, &id);
}
if (delay > 1000)
dev_info(&dev->dev, "ready %dms after FLR\n", 100 + delay - 1);
}
/**

View File

@ -235,6 +235,7 @@ enum pci_bar_type {
pci_bar_mem64, /* A 64-bit memory BAR */
};
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
int crs_timeout);
int pci_setup_device(struct pci_dev *dev);

View File

@ -1745,21 +1745,50 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
static void pci_configure_extended_tags(struct pci_dev *dev)
int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
{
u32 dev_cap;
struct pci_host_bridge *host;
u32 cap;
u16 ctl;
int ret;
if (!pci_is_pcie(dev))
return;
return 0;
ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
if (ret)
return;
return 0;
if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
return 0;
ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
if (ret)
return 0;
host = pci_find_host_bridge(dev->bus);
if (!host)
return 0;
/*
* If some device in the hierarchy doesn't handle Extended Tags
* correctly, make sure they're disabled.
*/
if (host->no_ext_tags) {
if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
dev_info(&dev->dev, "disabling Extended Tags\n");
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_EXT_TAG);
}
return 0;
}
if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
dev_info(&dev->dev, "enabling Extended Tags\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_EXT_TAG);
}
return 0;
}
static void pci_configure_device(struct pci_dev *dev)
@ -1768,7 +1797,7 @@ static void pci_configure_device(struct pci_dev *dev)
int ret;
pci_configure_mps(dev);
pci_configure_extended_tags(dev);
pci_configure_extended_tags(dev, NULL);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
@ -1824,11 +1853,58 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int crs_timeout)
static bool pci_bus_crs_vendor_id(u32 l)
{
return (l & 0xffff) == 0x0001;
}
static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
int delay = 1;
if (!pci_bus_crs_vendor_id(*l))
return true; /* not a CRS completion */
if (!timeout)
return false; /* CRS, but caller doesn't want to wait */
/*
* We got the reserved Vendor ID that indicates a completion with
* Configuration Request Retry Status (CRS). Retry until we get a
* valid Vendor ID or we time out.
*/
while (pci_bus_crs_vendor_id(*l)) {
if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
return false;
}
if (delay >= 1000)
pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
msleep(delay);
delay *= 2;
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
}
if (delay >= 1000)
pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
return true;
}
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
@ -1837,28 +1913,8 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
/*
* Configuration Request Retry Status. Some root ports return the
* actual device ID instead of the synthetic ID (0xFFFF) required
* by the PCIe spec. Ignore the device ID and only check for
* (vendor id == 1).
*/
while ((*l & 0xffff) == 0x0001) {
if (!crs_timeout)
return false;
msleep(delay);
delay *= 2;
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
/* Card hasn't responded in 60 seconds? Must be stuck. */
if (delay > crs_timeout) {
printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn));
return false;
}
}
if (pci_bus_crs_vendor_id(*l))
return pci_bus_wait_crs(bus, devfn, l, timeout);
return true;
}

View File

@ -4664,3 +4664,19 @@ static void quirk_intel_no_flr(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
if (!bridge)
return;
bridge->no_ext_tags = 1;
dev_info(&pdev->dev, "disabling Extended Tags (this device can't handle them)\n");
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);

View File

@ -473,6 +473,7 @@ struct pci_host_bridge {
void *release_data;
struct msi_controller *msi;
unsigned int ignore_reset_delay:1; /* for entire hierarchy */
unsigned int no_ext_tags:1; /* no Extended Tags */
/* Resource alignment requirements */
resource_size_t (*align_resource)(struct pci_dev *dev,
const struct resource *res,