diff options
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r-- | drivers/pci/pci.c | 573 |
1 files changed, 496 insertions, 77 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e37fea6e178..b821a62958f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> +#include <linux/pci_hotplug.h> #include <asm-generic/pci-bridge.h> #include <asm/setup.h> #include "pci.h" @@ -1145,6 +1146,24 @@ int pci_reenable_device(struct pci_dev *dev) return 0; } +static void pci_enable_bridge(struct pci_dev *dev) +{ + int retval; + + if (!dev) + return; + + pci_enable_bridge(dev->bus->self); + + if (pci_is_enabled(dev)) + return; + retval = pci_enable_device(dev); + if (retval) + dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", + retval); + pci_set_master(dev); +} + static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) { int err; @@ -1165,6 +1184,8 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ + pci_enable_bridge(dev->bus->self); + /* only skip sriov related */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & flags) @@ -1992,7 +2013,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, } /** - * pci_add_save_buffer - allocate buffer for saving given capability registers + * pci_add_cap_save_buffer - allocate buffer for saving given capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @size: requested size of the buffer @@ -2095,9 +2116,9 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type) u16 ctrl = 0; if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_IDO_REQ_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_IDO_CMP_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; if (ctrl) pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl); } @@ -2113,9 +2134,9 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type) u16 ctrl = 0; if (type & PCI_EXP_IDO_REQUEST) - ctrl |= PCI_EXP_IDO_REQ_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN; if (type & PCI_EXP_IDO_COMPLETION) - ctrl |= PCI_EXP_IDO_CMP_EN; + ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN; if (ctrl) pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl); } @@ -2147,7 +2168,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) int ret; pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); - if (!(cap & PCI_EXP_OBFF_MASK)) + if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK)) return -ENOTSUPP; /* no OBFF support at all */ /* Make sure the topology supports OBFF as well */ @@ -2158,17 +2179,17 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type) } pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl); - if (cap & PCI_EXP_OBFF_WAKE) - ctrl |= PCI_EXP_OBFF_WAKE_EN; + if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE) + ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN; else { switch (type) { case PCI_EXP_OBFF_SIGNAL_L0: - if (!(ctrl & PCI_EXP_OBFF_WAKE_EN)) - ctrl |= PCI_EXP_OBFF_MSGA_EN; + if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN)) + ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN; break; case PCI_EXP_OBFF_SIGNAL_ALWAYS: - ctrl &= ~PCI_EXP_OBFF_WAKE_EN; - ctrl |= PCI_EXP_OBFF_MSGB_EN; + ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN; + ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN; break; default: WARN(1, "bad OBFF signal type\n"); @@ -2189,7 +2210,8 @@ EXPORT_SYMBOL(pci_enable_obff); */ void pci_disable_obff(struct pci_dev *dev) { - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_OBFF_WAKE_EN); } EXPORT_SYMBOL(pci_disable_obff); @@ -2237,7 +2259,8 @@ int pci_enable_ltr(struct pci_dev *dev) return ret; } - return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); + return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); } EXPORT_SYMBOL(pci_enable_ltr); @@ -2254,7 +2277,8 @@ void pci_disable_ltr(struct pci_dev *dev) if (!pci_ltr_supported(dev)) return; - pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN); + pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_LTR_EN); } EXPORT_SYMBOL(pci_disable_ltr); @@ -2359,6 +2383,27 @@ void pci_enable_acs(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); } +static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) +{ + int pos; + u16 cap, ctrl; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + if (!pos) + return false; + + /* + * Except for egress control, capabilities are either required + * or only required if controllable. Features missing from the + * capability field can therefore be assumed as hard-wired enabled. + */ + pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap); + acs_flags &= (cap | PCI_ACS_EC); + + pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); + return (ctrl & acs_flags) == acs_flags; +} + /** * pci_acs_enabled - test ACS against required flags for a given device * @pdev: device to test @@ -2366,36 +2411,76 @@ void pci_enable_acs(struct pci_dev *dev) * * Return true if the device supports the provided flags. Automatically * filters out flags that are not implemented on multifunction devices. + * + * Note that this interface checks the effective ACS capabilities of the + * device rather than the actual capabilities. For instance, most single + * function endpoints are not required to support ACS because they have no + * opportunity for peer-to-peer access. We therefore return 'true' + * regardless of whether the device exposes an ACS capability. This makes + * it much easier for callers of this function to ignore the actual type + * or topology of the device when testing ACS support. */ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) { - int pos, ret; - u16 ctrl; + int ret; ret = pci_dev_specific_acs_enabled(pdev, acs_flags); if (ret >= 0) return ret > 0; + /* + * Conventional PCI and PCI-X devices never support ACS, either + * effectively or actually. The shared bus topology implies that + * any device on the bus can receive or snoop DMA. + */ if (!pci_is_pcie(pdev)) return false; - /* Filter out flags not applicable to multifunction */ - if (pdev->multifunction) - acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | - PCI_ACS_EC | PCI_ACS_DT); - - if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM || - pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || - pdev->multifunction) { - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); - if (!pos) - return false; + switch (pci_pcie_type(pdev)) { + /* + * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, + * but since their primary inteface is PCI/X, we conservatively + * handle them as we would a non-PCIe device. + */ + case PCI_EXP_TYPE_PCIE_BRIDGE: + /* + * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never + * applicable... must never implement an ACS Extended Capability...". + * This seems arbitrary, but we take a conservative interpretation + * of this statement. + */ + case PCI_EXP_TYPE_PCI_BRIDGE: + case PCI_EXP_TYPE_RC_EC: + return false; + /* + * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should + * implement ACS in order to indicate their peer-to-peer capabilities, + * regardless of whether they are single- or multi-function devices. + */ + case PCI_EXP_TYPE_DOWNSTREAM: + case PCI_EXP_TYPE_ROOT_PORT: + return pci_acs_flags_enabled(pdev, acs_flags); + /* + * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be + * implemented by the remaining PCIe types to indicate peer-to-peer + * capabilities, but only when they are part of a multifunciton + * device. The footnote for section 6.12 indicates the specific + * PCIe types included here. + */ + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + if (!pdev->multifunction) + break; - pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); - if ((ctrl & acs_flags) != acs_flags) - return false; + return pci_acs_flags_enabled(pdev, acs_flags); } + /* + * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable + * to single function devices with the exception of downstream ports. + */ return true; } @@ -3059,18 +3144,23 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); /** - * pci_msi_off - disables any msi or msix capabilities + * pci_msi_off - disables any MSI or MSI-X capabilities * @dev: the PCI device to operate on * - * If you want to use msi see pci_enable_msi and friends. - * This is a lower level primitive that allows us to disable - * msi operation at the device level. + * If you want to use MSI, see pci_enable_msi() and friends. + * This is a lower-level primitive that allows us to disable + * MSI operation at the device level. */ void pci_msi_off(struct pci_dev *dev) { int pos; u16 control; + /* + * This looks like it could go in msi.c, but we need it even when + * CONFIG_PCI_MSI=n. For the same reason, we can't use + * dev->msi_cap or dev->msix_cap here. + */ pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (pos) { pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); @@ -3098,19 +3188,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) } EXPORT_SYMBOL(pci_set_dma_seg_boundary); -static int pcie_flr(struct pci_dev *dev, int probe) +/** + * pci_wait_for_pending_transaction - waits for pending transaction + * @dev: the PCI device to operate on + * + * Return 0 if transaction is pending 1 otherwise. + */ +int pci_wait_for_pending_transaction(struct pci_dev *dev) { int i; - u32 cap; u16 status; - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return -ENOTTY; - - if (probe) - return 0; - /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) @@ -3118,13 +3206,27 @@ static int pcie_flr(struct pci_dev *dev, int probe) pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); if (!(status & PCI_EXP_DEVSTA_TRPND)) - goto clear; + return 1; } - dev_err(&dev->dev, "transaction is not cleared; " - "proceeding with reset anyway\n"); + return 0; +} +EXPORT_SYMBOL(pci_wait_for_pending_transaction); + +static int pcie_flr(struct pci_dev *dev, int probe) +{ + u32 cap; + + pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + if (probe) + return 0; + + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); -clear: pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); msleep(100); @@ -3215,9 +3317,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) return 0; } -static int pci_parent_bus_reset(struct pci_dev *dev, int probe) +/** + * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge. + * @dev: Bridge device + * + * Use the bridge control register to assert reset on the secondary bus. + * Devices on the secondary bus are left in power-on state. + */ +void pci_reset_bridge_secondary_bus(struct pci_dev *dev) { u16 ctrl; + + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); + ctrl |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + /* + * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double + * this to 2ms to ensure that we meet the minium requirement. + */ + msleep(2); + + ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + + /* + * Trhfa for conventional PCI is 2^25 clock cycles. + * Assuming a minimum 33MHz clock this results in a 1s + * delay before we can consider subordinate devices to + * be re-initialized. PCIe has some ways to shorten this, + * but we don't make use of them yet. + */ + ssleep(1); +} +EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus); + +static int pci_parent_bus_reset(struct pci_dev *dev, int probe) +{ struct pci_dev *pdev; if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) @@ -3230,18 +3365,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) if (probe) return 0; - pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); - ctrl |= PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); - msleep(100); - - ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; - pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); - msleep(100); + pci_reset_bridge_secondary_bus(dev->bus->self); return 0; } +static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe) +{ + int rc = -ENOTTY; + + if (!hotplug || !try_module_get(hotplug->ops->owner)) + return rc; + + if (hotplug->ops->reset_slot) + rc = hotplug->ops->reset_slot(hotplug, probe); + + module_put(hotplug->ops->owner); + + return rc; +} + +static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) +{ + struct pci_dev *pdev; + + if (dev->subordinate || !dev->slot) + return -ENOTTY; + + list_for_each_entry(pdev, &dev->bus->devices, bus_list) + if (pdev != dev && pdev->slot == dev->slot) + return -ENOTTY; + + return pci_reset_hotplug_slot(dev->slot->hotplug, probe); +} + static int __pci_dev_reset(struct pci_dev *dev, int probe) { int rc; @@ -3264,27 +3421,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe) if (rc != -ENOTTY) goto done; + rc = pci_dev_reset_slot_function(dev, probe); + if (rc != -ENOTTY) + goto done; + rc = pci_parent_bus_reset(dev, probe); done: return rc; } +static void pci_dev_lock(struct pci_dev *dev) +{ + pci_cfg_access_lock(dev); + /* block PM suspend, driver probe, etc. */ + device_lock(&dev->dev); +} + +static void pci_dev_unlock(struct pci_dev *dev) +{ + device_unlock(&dev->dev); + pci_cfg_access_unlock(dev); +} + +static void pci_dev_save_and_disable(struct pci_dev *dev) +{ + /* + * Wake-up device prior to save. PM registers default to D0 after + * reset and a simple register restore doesn't reliably return + * to a non-D0 state anyway. + */ + pci_set_power_state(dev, PCI_D0); + + pci_save_state(dev); + /* + * Disable the device by clearing the Command register, except for + * INTx-disable which is set. This not only disables MMIO and I/O port + * BARs, but also prevents the device from being Bus Master, preventing + * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 + * compliant devices, INTx-disable prevents legacy interrupts. + */ + pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); +} + +static void pci_dev_restore(struct pci_dev *dev) +{ + pci_restore_state(dev); +} + static int pci_dev_reset(struct pci_dev *dev, int probe) { int rc; - if (!probe) { - pci_cfg_access_lock(dev); - /* block PM suspend, driver probe, etc. */ - device_lock(&dev->dev); - } + if (!probe) + pci_dev_lock(dev); rc = __pci_dev_reset(dev, probe); - if (!probe) { - device_unlock(&dev->dev); - pci_cfg_access_unlock(dev); - } + if (!probe) + pci_dev_unlock(dev); + return rc; } /** @@ -3375,22 +3570,249 @@ int pci_reset_function(struct pci_dev *dev) if (rc) return rc; - pci_save_state(dev); - - /* - * both INTx and MSI are disabled after the Interrupt Disable bit - * is set and the Bus Master bit is cleared. - */ - pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + pci_dev_save_and_disable(dev); rc = pci_dev_reset(dev, 0); - pci_restore_state(dev); + pci_dev_restore(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function); +/* Lock devices from the top of the tree down */ +static void pci_bus_lock(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); + } +} + +/* Unlock devices from the bottom of the tree up */ +static void pci_bus_unlock(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } +} + +/* Lock devices from the top of the tree down */ +static void pci_slot_lock(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_lock(dev); + if (dev->subordinate) + pci_bus_lock(dev->subordinate); + } +} + +/* Unlock devices from the bottom of the tree up */ +static void pci_slot_unlock(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + if (dev->subordinate) + pci_bus_unlock(dev->subordinate); + pci_dev_unlock(dev); + } +} + +/* Save and disable devices from the top of the tree down */ +static void pci_bus_save_and_disable(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_save_and_disable(dev); + if (dev->subordinate) + pci_bus_save_and_disable(dev->subordinate); + } +} + +/* + * Restore devices from top of the tree down - parent bridges need to be + * restored before we can get to subordinate devices. + */ +static void pci_bus_restore(struct pci_bus *bus) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_restore(dev); + if (dev->subordinate) + pci_bus_restore(dev->subordinate); + } +} + +/* Save and disable devices from the top of the tree down */ +static void pci_slot_save_and_disable(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_save_and_disable(dev); + if (dev->subordinate) + pci_bus_save_and_disable(dev->subordinate); + } +} + +/* + * Restore devices from top of the tree down - parent bridges need to be + * restored before we can get to subordinate devices. + */ +static void pci_slot_restore(struct pci_slot *slot) +{ + struct pci_dev *dev; + + list_for_each_entry(dev, &slot->bus->devices, bus_list) { + if (!dev->slot || dev->slot != slot) + continue; + pci_dev_restore(dev); + if (dev->subordinate) + pci_bus_restore(dev->subordinate); + } +} + +static int pci_slot_reset(struct pci_slot *slot, int probe) +{ + int rc; + + if (!slot) + return -ENOTTY; + + if (!probe) + pci_slot_lock(slot); + + might_sleep(); + + rc = pci_reset_hotplug_slot(slot->hotplug, probe); + + if (!probe) + pci_slot_unlock(slot); + + return rc; +} + +/** + * pci_probe_reset_slot - probe whether a PCI slot can be reset + * @slot: PCI slot to probe + * + * Return 0 if slot can be reset, negative if a slot reset is not supported. + */ +int pci_probe_reset_slot(struct pci_slot *slot) +{ + return pci_slot_reset(slot, 1); +} +EXPORT_SYMBOL_GPL(pci_probe_reset_slot); + +/** + * pci_reset_slot - reset a PCI slot + * @slot: PCI slot to reset + * + * A PCI bus may host multiple slots, each slot may support a reset mechanism + * independent of other slots. For instance, some slots may support slot power + * control. In the case of a 1:1 bus to slot architecture, this function may + * wrap the bus reset to avoid spurious slot related events such as hotplug. + * Generally a slot reset should be attempted before a bus reset. All of the + * function of the slot and any subordinate buses behind the slot are reset + * through this function. PCI config space of all devices in the slot and + * behind the slot is saved before and restored after reset. + * + * Return 0 on success, non-zero on error. + */ +int pci_reset_slot(struct pci_slot *slot) +{ + int rc; + + rc = pci_slot_reset(slot, 1); + if (rc) + return rc; + + pci_slot_save_and_disable(slot); + + rc = pci_slot_reset(slot, 0); + + pci_slot_restore(slot); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_slot); + +static int pci_bus_reset(struct pci_bus *bus, int probe) +{ + if (!bus->self) + return -ENOTTY; + + if (probe) + return 0; + + pci_bus_lock(bus); + + might_sleep(); + + pci_reset_bridge_secondary_bus(bus->self); + + pci_bus_unlock(bus); + + return 0; +} + +/** + * pci_probe_reset_bus - probe whether a PCI bus can be reset + * @bus: PCI bus to probe + * + * Return 0 if bus can be reset, negative if a bus reset is not supported. + */ +int pci_probe_reset_bus(struct pci_bus *bus) +{ + return pci_bus_reset(bus, 1); +} +EXPORT_SYMBOL_GPL(pci_probe_reset_bus); + +/** + * pci_reset_bus - reset a PCI bus + * @bus: top level PCI bus to reset + * + * Do a bus reset on the given bus and any subordinate buses, saving + * and restoring state of all devices. + * + * Return 0 on success, non-zero on error. + */ +int pci_reset_bus(struct pci_bus *bus) +{ + int rc; + + rc = pci_bus_reset(bus, 1); + if (rc) + return rc; + + pci_bus_save_and_disable(bus); + + rc = pci_bus_reset(bus, 0); + + pci_bus_restore(bus); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_bus); + /** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query @@ -3525,8 +3947,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); - if (mps < 0) - return mps; if (mps < rq) rq = mps; } @@ -3543,7 +3963,6 @@ EXPORT_SYMBOL(pcie_set_readrq); * @dev: PCI device to query * * Returns maximum payload size in bytes - * or appropriate error value. */ int pcie_get_mps(struct pci_dev *dev) { |