summaryrefslogtreecommitdiffstats
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c371
1 files changed, 304 insertions, 67 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e4c295a049..5b548aee9cb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -19,8 +19,8 @@
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <asm/setup.h>
#include "pci.h"
@@ -29,7 +29,23 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
@@ -47,6 +63,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value. Arch can override either
+ * the dfl or actual value as it sees fit. Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -361,10 +386,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
int i;
- struct resource *best = NULL;
+ struct resource *best = NULL, *r;
- for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
- struct resource *r = bus->resource[i];
+ pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
if (res->start && !(res->start >= r->start && res->end <= r->end))
@@ -373,8 +397,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
- if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
- best = r; /* Approximating prefetchable by non-prefetchable */
+ /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
+ if (r->flags & IORESOURCE_PREFETCH)
+ continue;
+ /* .. but we can put a prefetchable resource inside a non-prefetchable one */
+ if (!best)
+ best = r;
}
return best;
}
@@ -434,6 +462,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
}
+static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->run_wake(dev, enable) : -ENODEV;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -509,7 +543,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
@@ -728,8 +762,8 @@ static int pci_save_pcie_state(struct pci_dev *dev)
u16 *cap;
u16 flags;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (pos <= 0)
+ pos = pci_pcie_cap(dev);
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -837,7 +871,7 @@ pci_save_state(struct pci_dev *dev)
int i;
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++)
- pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+ pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
@@ -1140,11 +1174,11 @@ pci_disable_device(struct pci_dev *dev)
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1155,7 +1189,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
/**
* pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
@@ -1167,6 +1201,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
}
/**
+ * pci_check_pme_status - Check if given device has generated PME.
+ * @dev: Device to check.
+ *
+ * Check the PME status of the device and if set, clear it and clear PME enable
+ * (if set). Return 'true' if PME status and PME enable were both set or
+ * 'false' otherwise.
+ */
+bool pci_check_pme_status(struct pci_dev *dev)
+{
+ int pmcsr_pos;
+ u16 pmcsr;
+ bool ret = false;
+
+ if (!dev->pm_cap)
+ return false;
+
+ pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
+ pci_read_config_word(dev, pmcsr_pos, &pmcsr);
+ if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
+ return false;
+
+ /* Clear PME status. */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS;
+ if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
+ /* Disable PME to avoid interrupt flood. */
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+ ret = true;
+ }
+
+ pci_write_config_word(dev, pmcsr_pos, pmcsr);
+
+ return ret;
+}
+
+/**
+ * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
+ * @dev: Device to handle.
+ * @ign: Ignored.
+ *
+ * Check if @dev has generated PME and queue a resume request for it in that
+ * case.
+ */
+static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
+{
+ if (pci_check_pme_status(dev))
+ pm_request_resume(&dev->dev);
+ return 0;
+}
+
+/**
+ * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
+ * @bus: Top bus of the subtree to walk.
+ */
+void pci_pme_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_pme_wakeup, NULL);
+}
+
+/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
* @state: PCI state from which device will issue PME#.
@@ -1202,14 +1296,15 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+ dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
/**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
+ * @runtime: True if the events are to be generated at run time
* @enable: True to enable event generation; false to disable
*
* This enables the device as a wakeup event source, or disables it.
@@ -1225,11 +1320,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool runtime, bool enable)
{
int ret = 0;
- if (enable && !device_may_wakeup(&dev->dev))
+ if (enable && !runtime && !device_may_wakeup(&dev->dev))
return -EINVAL;
/* Don't do the same thing twice in a row for one device. */
@@ -1249,19 +1345,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
pci_pme_active(dev, true);
else
ret = 1;
- error = platform_pci_sleep_wake(dev, true);
+ error = runtime ? platform_pci_run_wake(dev, true) :
+ platform_pci_sleep_wake(dev, true);
if (ret)
ret = error;
if (!ret)
dev->wakeup_prepared = true;
} else {
- platform_pci_sleep_wake(dev, false);
+ if (runtime)
+ platform_pci_run_wake(dev, false);
+ else
+ platform_pci_sleep_wake(dev, false);
pci_pme_active(dev, false);
dev->wakeup_prepared = false;
}
return ret;
}
+EXPORT_SYMBOL(__pci_enable_wake);
/**
* pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
@@ -1371,6 +1472,66 @@ int pci_back_from_sleep(struct pci_dev *dev)
}
/**
+ * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
+ * @dev: PCI device being suspended.
+ *
+ * Prepare @dev to generate wake-up events at run time and put it into a low
+ * power state.
+ */
+int pci_finish_runtime_suspend(struct pci_dev *dev)
+{
+ pci_power_t target_state = pci_target_state(dev);
+ int error;
+
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+ __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ __pci_enable_wake(dev, target_state, true, false);
+
+ return error;
+}
+
+/**
+ * pci_dev_run_wake - Check if device can generate run-time wake-up events.
+ * @dev: Device to check.
+ *
+ * Return true if the device itself is cabable of generating wake-up events
+ * (through the platform or using the native PCIe PME) or if the device supports
+ * PME and one of its upstream bridges can generate wake-up events.
+ */
+bool pci_dev_run_wake(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+
+ if (device_run_wake(&dev->dev))
+ return true;
+
+ if (!dev->pme_support)
+ return false;
+
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (device_run_wake(&bridge->dev))
+ return true;
+
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ return device_run_wake(bus->bridge);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+
+/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
@@ -1379,6 +1540,7 @@ void pci_pm_init(struct pci_dev *dev)
int pm;
u16 pmc;
+ device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
dev->pm_cap = 0;
@@ -1396,6 +1558,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -1413,7 +1576,8 @@ void pci_pm_init(struct pci_dev *dev)
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1510,7 +1674,7 @@ void pci_enable_ari(struct pci_dev *dev)
u16 ctrl;
struct pci_dev *bridge;
- if (!dev->is_pcie || dev->devfn)
+ if (!pci_is_pcie(dev) || dev->devfn)
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1518,10 +1682,10 @@ void pci_enable_ari(struct pci_dev *dev)
return;
bridge = dev->bus->self;
- if (!bridge || !bridge->is_pcie)
+ if (!bridge || !pci_is_pcie(bridge))
return;
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(bridge);
if (!pos)
return;
@@ -1536,6 +1700,54 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+ pci_acs_enable = 1;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+}
+
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
@@ -1669,9 +1881,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
return 0;
err_out:
- dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
- bar,
- pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
&pdev->resource[bar]);
return -EBUSY;
}
@@ -1866,31 +2076,6 @@ void pci_clear_master(struct pci_dev *dev)
__pci_set_master(dev, false);
}
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
-#ifndef PCI_CACHE_LINE_BYTES
-#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
-#endif
-
-/* This can be overridden by arch code. */
-/* Don't forget this is measured in 32-bit words, not bytes */
-u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
-
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
@@ -1901,13 +2086,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-static int
-pci_set_cacheline_size(struct pci_dev *dev)
+int pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
if (!pci_cache_line_size)
- return -EINVAL; /* The system doesn't support MWI. */
+ return -EINVAL;
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
@@ -1928,6 +2112,24 @@ pci_set_cacheline_size(struct pci_dev *dev)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2062,6 +2264,7 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
return 0;
}
@@ -2073,6 +2276,7 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
dev->dev.coherent_dma_mask = mask;
+ dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
return 0;
}
@@ -2099,9 +2303,9 @@ static int pcie_flr(struct pci_dev *dev, int probe)
int i;
int pos;
u32 cap;
- u16 status;
+ u16 status, control;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTTY;
@@ -2126,8 +2330,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
"proceeding with reset anyway\n");
clear:
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+
msleep(100);
return 0;
@@ -2191,12 +2397,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
@@ -2240,6 +2446,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
down(&dev->dev.sem);
}
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2450,7 +2660,7 @@ int pcie_get_readrq(struct pci_dev *dev)
int ret, cap;
u16 ctl;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
return -EINVAL;
@@ -2480,7 +2690,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
goto out;
@@ -2540,7 +2750,24 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return reg;
}
- dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+ dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
+ return 0;
+}
+
+/* Some architectures require additional programming to enable VGA */
+static arch_set_vga_state_t arch_set_vga_state;
+
+void __init pci_register_set_vga_state(arch_set_vga_state_t func)
+{
+ arch_set_vga_state = func; /* NULL disables */
+}
+
+static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ if (arch_set_vga_state)
+ return arch_set_vga_state(dev, decode, command_bits,
+ change_bridge);
return 0;
}
@@ -2557,9 +2784,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
+ int rc;
WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ /* ARCH specific VGA enables */
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
+ if (rc)
+ return rc;
+
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (decode == true)
cmd |= command_bits;
@@ -2590,7 +2823,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
-spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
@@ -2723,6 +2956,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
static int __init pci_setup(char *str)
{
while (str) {
@@ -2769,6 +3007,7 @@ EXPORT_SYMBOL(pcim_pin_device);
EXPORT_SYMBOL(pci_disable_device);
EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
+EXPORT_SYMBOL(pci_register_set_vga_state);
EXPORT_SYMBOL(pci_release_regions);
EXPORT_SYMBOL(pci_request_regions);
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -2795,10 +3034,8 @@ EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
-EXPORT_SYMBOL(pci_enable_wake);
EXPORT_SYMBOL(pci_wake_from_d3);
EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
-