summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-16 07:49:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-16 07:49:54 -0700
commit4406c56d0a4da7a37b9180abeaece6cd00bcc874 (patch)
tree65a85fa73a25d24cbed6d163fdcf8df1b934a0be /drivers
parent6b7b352f2102e21f9d8f38e932f01d9c5705c073 (diff)
parent5e3573db2bd5db6925159279d99576a4635bdb66 (diff)
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (75 commits) PCI hotplug: clean up acpi_run_hpp() PCI hotplug: acpiphp: use generic pci_configure_slot() PCI hotplug: shpchp: use generic pci_configure_slot() PCI hotplug: pciehp: use generic pci_configure_slot() PCI hotplug: add pci_configure_slot() PCI hotplug: clean up acpi_get_hp_params_from_firmware() interface PCI hotplug: acpiphp: don't cache hotplug_params in acpiphp_bridge PCI hotplug: acpiphp: remove superfluous _HPP/_HPX evaluation PCI: Clear saved_state after the state has been restored PCI PM: Return error codes from pci_pm_resume() PCI: use dev_printk in quirk messages PCI / PCIe portdrv: Fix pcie_portdrv_slot_reset() PCI Hotplug: convert acpi_pci_detect_ejectable() to take an acpi_handle PCI Hotplug: acpiphp: find bridges the easy way PCI: pcie portdrv: remove unused variable PCI / ACPI PM: Propagate wake-up enable for devices w/o ACPI support ACPI PM: Replace wakeup.prepared with reference counter PCI PM: Introduce device flag wakeup_prepared PCI / ACPI PM: Rework some debug messages PCI PM: Simplify PCI wake-up code ... Fixed up conflict in arch/powerpc/kernel/pci_64.c due to OF device tree scanning having been moved and merged for the 32- and 64-bit cases. The 'needs_freset' initialization added in 6e19314cc ("PCI/powerpc: support PCIe fundamental reset") is now in arch/powerpc/kernel/pci_of_scan.c.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/power.c58
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/vga/Kconfig10
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vgaarb.c1205
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c117
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c187
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp.h9
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c7
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c10
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c137
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c187
-rw-r--r--drivers/pci/hotplug/shpchp.h9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c62
-rw-r--r--drivers/pci/legacy.c34
-rw-r--r--drivers/pci/msi.c283
-rw-r--r--drivers/pci/pci-acpi.c29
-rw-r--r--drivers/pci/pci-driver.c132
-rw-r--r--drivers/pci/pci-stub.c45
-rw-r--r--drivers/pci/pci-sysfs.c37
-rw-r--r--drivers/pci/pci.c106
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c25
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c22
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c107
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c190
-rw-r--r--drivers/pci/pcie/aspm.c495
-rw-r--r--drivers/pci/pcie/portdrv_core.c6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c1
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c36
-rw-r--r--drivers/pci/search.c31
-rw-r--r--drivers/pci/setup-bus.c22
-rw-r--r--drivers/pci/setup-res.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c16
-rw-r--r--drivers/video/Kconfig2
46 files changed, 2497 insertions, 1243 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 55b5b90c2a4..31b961c2f22 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -61,20 +61,6 @@ static struct acpi_driver acpi_pci_root_driver = {
},
};
-struct acpi_pci_root {
- struct list_head node;
- struct acpi_device *device;
- struct pci_bus *bus;
- u16 segment;
- u8 bus_nr;
-
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
- u32 osc_control_qry; /* the latest _OSC query result */
-
- u32 osc_queried:1; /* has _OSC control been queried? */
-};
-
static LIST_HEAD(acpi_pci_roots);
static struct acpi_pci_driver *sub_driver;
@@ -317,7 +303,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
return status;
}
-static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
+struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
@@ -327,6 +313,7 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(acpi_pci_find_root);
struct acpi_handle_node {
struct list_head node;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index d74365d4a6e..5a09bf392ec 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -44,6 +44,8 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#include "sleep.h"
+
#define _COMPONENT ACPI_POWER_COMPONENT
ACPI_MODULE_NAME("power");
#define ACPI_POWER_CLASS "power_resource"
@@ -361,17 +363,15 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
- int i, err;
+ int i, err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
- /*
- * Do not execute the code below twice in a row without calling
- * acpi_disable_wakeup_device_power() in between for the same device
- */
- if (dev->wakeup.flags.prepared)
- return 0;
+ mutex_lock(&acpi_device_lock);
+
+ if (dev->wakeup.prepare_count++)
+ goto out;
/* Open power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
@@ -379,7 +379,8 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
- return -ENODEV;
+ err = -ENODEV;
+ goto err_out;
}
}
@@ -388,9 +389,13 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
* in arbitrary power state afterwards.
*/
err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
- if (!err)
- dev->wakeup.flags.prepared = 1;
+ err_out:
+ if (err)
+ dev->wakeup.prepare_count = 0;
+
+ out:
+ mutex_unlock(&acpi_device_lock);
return err;
}
@@ -402,35 +407,42 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
*/
int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{
- int i, ret;
+ int i, err = 0;
if (!dev || !dev->wakeup.flags.valid)
return -EINVAL;
+ mutex_lock(&acpi_device_lock);
+
+ if (--dev->wakeup.prepare_count > 0)
+ goto out;
+
/*
- * Do not execute the code below twice in a row without calling
- * acpi_enable_wakeup_device_power() in between for the same device
+ * Executing the code below even if prepare_count is already zero when
+ * the function is called may be useful, for example for initialisation.
*/
- if (!dev->wakeup.flags.prepared)
- return 0;
+ if (dev->wakeup.prepare_count < 0)
+ dev->wakeup.prepare_count = 0;
- dev->wakeup.flags.prepared = 0;
-
- ret = acpi_device_sleep_wake(dev, 0, 0, 0);
- if (ret)
- return ret;
+ err = acpi_device_sleep_wake(dev, 0, 0, 0);
+ if (err)
+ goto out;
/* Close power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) {
- ret = acpi_power_off_device(dev->wakeup.resources.handles[i], dev);
+ int ret = acpi_power_off_device(
+ dev->wakeup.resources.handles[i], dev);
if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0;
- return -ENODEV;
+ err = -ENODEV;
+ goto out;
}
}
- return ret;
+ out:
+ mutex_unlock(&acpi_device_lock);
+ return err;
}
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 781435d7e36..318b1ea7a5b 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -781,6 +781,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
kfree(buffer.pointer);
device->wakeup.flags.valid = 1;
+ device->wakeup.prepare_count = 0;
/* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 42159a28f43..feece693d77 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -689,19 +689,25 @@ int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
acpi_handle handle;
struct acpi_device *adev;
+ int error;
- if (!device_may_wakeup(dev))
+ if (!device_can_wakeup(dev))
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
- printk(KERN_DEBUG "ACPI handle has no context!\n");
+ dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__);
return -ENODEV;
}
- return enable ?
+ error = enable ?
acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
acpi_disable_wakeup_device_power(adev);
+ if (!error)
+ dev_info(dev, "wake-up capability %s by ACPI\n",
+ enable ? "enabled" : "disabled");
+
+ return error;
}
#endif
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index 88725dcdf8b..e0ee0c036f5 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -68,7 +68,7 @@ void acpi_enable_wakeup_device(u8 sleep_state)
/* If users want to disable run-wake GPE,
* we only disable it for wake and leave it for runtime
*/
- if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+ if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
/* set_gpe_type will disable GPE, leave it like that */
@@ -100,7 +100,7 @@ void acpi_disable_wakeup_device(u8 sleep_state)
if (!dev->wakeup.flags.valid)
continue;
- if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
+ if ((!dev->wakeup.state.enabled && !dev->wakeup.prepare_count)
|| sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) {
acpi_set_gpe_type(dev->wakeup.gpe_device,
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index de566cf0414..30879df3dae 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/
+obj-y += drm/ vga/
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
new file mode 100644
index 00000000000..790e675b13e
--- /dev/null
+++ b/drivers/gpu/vga/Kconfig
@@ -0,0 +1,10 @@
+config VGA_ARB
+ bool "VGA Arbitration" if EMBEDDED
+ default y
+ depends on PCI
+ help
+ Some "legacy" VGA devices implemented on PCI typically have the same
+ hard-decoded addresses as they did on ISA. When multiple PCI devices
+ are accessed at same time they need some kind of coordination. Please
+ see Documentation/vgaarbiter.txt for more details. Select this to
+ enable VGA arbiter.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
new file mode 100644
index 00000000000..7cc8c1ed645
--- /dev/null
+++ b/drivers/gpu/vga/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VGA_ARB) += vgaarb.o
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
new file mode 100644
index 00000000000..1ac0c93603c
--- /dev/null
+++ b/drivers/gpu/vga/vgaarb.c
@@ -0,0 +1,1205 @@
+/*
+ * vgaarb.c
+ *
+ * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
+ * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
+ *
+ * Implements the VGA arbitration. For details refer to
+ * Documentation/vgaarbiter.txt
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/vgaarb.h>
+
+static void vga_arbiter_notify_clients(void);
+/*
+ * We keep a list of all vga devices in the system to speed
+ * up the various operations of the arbiter
+ */
+struct vga_device {
+ struct list_head list;
+ struct pci_dev *pdev;
+ unsigned int decodes; /* what does it decodes */
+ unsigned int owns; /* what does it owns */
+ unsigned int locks; /* what does it locks */
+ unsigned int io_lock_cnt; /* legacy IO lock count */
+ unsigned int mem_lock_cnt; /* legacy MEM lock count */
+ unsigned int io_norm_cnt; /* normal IO count */
+ unsigned int mem_norm_cnt; /* normal MEM count */
+
+ /* allow IRQ enable/disable hook */
+ void *cookie;
+ void (*irq_set_state)(void *cookie, bool enable);
+ unsigned int (*set_vga_decode)(void *cookie, bool decode);
+};
+
+static LIST_HEAD(vga_list);
+static int vga_count, vga_decode_count;
+static bool vga_arbiter_used;
+static DEFINE_SPINLOCK(vga_lock);
+static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
+
+
+static const char *vga_iostate_to_str(unsigned int iostate)
+{
+ /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
+ iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ switch (iostate) {
+ case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
+ return "io+mem";
+ case VGA_RSRC_LEGACY_IO:
+ return "io";
+ case VGA_RSRC_LEGACY_MEM:
+ return "mem";
+ }
+ return "none";
+}
+
+static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
+{
+ /* we could in theory hand out locks on IO and mem
+ * separately to userspace but it can cause deadlocks */
+ if (strncmp(buf, "none", 4) == 0) {
+ *io_state = VGA_RSRC_NONE;
+ return 1;
+ }
+
+ /* XXX We're not chekcing the str_size! */
+ if (strncmp(buf, "io+mem", 6) == 0)
+ goto both;
+ else if (strncmp(buf, "io", 2) == 0)
+ goto both;
+ else if (strncmp(buf, "mem", 3) == 0)
+ goto both;
+ return 0;
+both:
+ *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ return 1;
+}
+
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+/* this is only used a cookie - it should not be dereferenced */
+static struct pci_dev *vga_default;
+#endif
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev);
+
+/* Find somebody in our list */
+static struct vga_device *vgadev_find(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+
+ list_for_each_entry(vgadev, &vga_list, list)
+ if (pdev == vgadev->pdev)
+ return vgadev;
+ return NULL;
+}
+
+/* Returns the default VGA device (vgacon's babe) */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+struct pci_dev *vga_default_device(void)
+{
+ return vga_default;
+}
+#endif
+
+static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
+{
+ if (vgadev->irq_set_state)
+ vgadev->irq_set_state(vgadev->cookie, state);
+}
+
+
+/* If we don't ever use VGA arb we should avoid
+ turning off anything anywhere due to old X servers getting
+ confused about the boot device not being VGA */
+static void vga_check_first_use(void)
+{
+ /* we should inform all GPUs in the system that
+ * VGA arb has occured and to try and disable resources
+ * if they can */
+ if (!vga_arbiter_used) {
+ vga_arbiter_used = true;
+ vga_arbiter_notify_clients();
+ }
+}
+
+static struct vga_device *__vga_tryget(struct vga_device *vgadev,
+ unsigned int rsrc)
+{
+ unsigned int wants, legacy_wants, match;
+ struct vga_device *conflict;
+ unsigned int pci_bits;
+ /* Account for "normal" resources to lock. If we decode the legacy,
+ * counterpart, we need to request it as well
+ */
+ if ((rsrc & VGA_RSRC_NORMAL_IO) &&
+ (vgadev->decodes & VGA_RSRC_LEGACY_IO))
+ rsrc |= VGA_RSRC_LEGACY_IO;
+ if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
+ (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
+ rsrc |= VGA_RSRC_LEGACY_MEM;
+
+ pr_devel("%s: %d\n", __func__, rsrc);
+ pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
+
+ /* Check what resources we need to acquire */
+ wants = rsrc & ~vgadev->owns;
+
+ /* We already own everything, just mark locked & bye bye */
+ if (wants == 0)
+ goto lock_them;
+
+ /* We don't need to request a legacy resource, we just enable
+ * appropriate decoding and go
+ */
+ legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
+ if (legacy_wants == 0)
+ goto enable_them;
+
+ /* Ok, we don't, let's find out how we need to kick off */
+ list_for_each_entry(conflict, &vga_list, list) {
+ unsigned int lwants = legacy_wants;
+ unsigned int change_bridge = 0;
+
+ /* Don't conflict with myself */
+ if (vgadev == conflict)
+ continue;
+
+ /* Check if the architecture allows a conflict between those
+ * 2 devices or if they are on separate domains
+ */
+ if (!vga_conflicts(vgadev->pdev, conflict->pdev))
+ continue;
+
+ /* We have a possible conflict. before we go further, we must
+ * check if we sit on the same bus as the conflicting device.
+ * if we don't, then we must tie both IO and MEM resources
+ * together since there is only a single bit controlling
+ * VGA forwarding on P2P bridges
+ */
+ if (vgadev->pdev->bus != conflict->pdev->bus) {
+ change_bridge = 1;
+ lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ }
+
+ /* Check if the guy has a lock on the resource. If he does,
+ * return the conflicting entry
+ */
+ if (conflict->locks & lwants)
+ return conflict;
+
+ /* Ok, now check if he owns the resource we want. We don't need
+ * to check "decodes" since it should be impossible to own
+ * own legacy resources you don't decode unless I have a bug
+ * in this code...
+ */
+ WARN_ON(conflict->owns & ~conflict->decodes);
+ match = lwants & conflict->owns;
+ if (!match)
+ continue;
+
+ /* looks like he doesn't have a lock, we can steal
+ * them from him
+ */
+ vga_irq_set_state(conflict, false);
+
+ pci_bits = 0;
+ if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ pci_bits |= PCI_COMMAND_MEMORY;
+ if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ pci_bits |= PCI_COMMAND_IO;
+
+ pci_set_vga_state(conflict->pdev, false, pci_bits,
+ change_bridge);
+ conflict->owns &= ~lwants;
+ /* If he also owned non-legacy, that is no longer the case */
+ if (lwants & VGA_RSRC_LEGACY_MEM)
+ conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
+ if (lwants & VGA_RSRC_LEGACY_IO)
+ conflict->owns &= ~VGA_RSRC_NORMAL_IO;
+ }
+
+enable_them:
+ /* ok dude, we got it, everybody conflicting has been disabled, let's
+ * enable us. Make sure we don't mark a bit in "owns" that we don't
+ * also have in "decodes". We can lock resources we don't decode but
+ * not own them.
+ */
+ pci_bits = 0;
+ if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ pci_bits |= PCI_COMMAND_MEMORY;
+ if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ pci_bits |= PCI_COMMAND_IO;
+ pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
+
+ vga_irq_set_state(vgadev, true);
+ vgadev->owns |= (wants & vgadev->decodes);
+lock_them:
+ vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
+ if (rsrc & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt++;
+ if (rsrc & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt++;
+ if (rsrc & VGA_RSRC_NORMAL_IO)
+ vgadev->io_norm_cnt++;
+ if (rsrc & VGA_RSRC_NORMAL_MEM)
+ vgadev->mem_norm_cnt++;
+
+ return NULL;
+}
+
+static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
+{
+ unsigned int old_locks = vgadev->locks;
+
+ pr_devel("%s\n", __func__);
+
+ /* Update our counters, and account for equivalent legacy resources
+ * if we decode them
+ */
+ if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
+ vgadev->io_norm_cnt--;
+ if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
+ rsrc |= VGA_RSRC_LEGACY_IO;
+ }
+ if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
+ vgadev->mem_norm_cnt--;
+ if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
+ rsrc |= VGA_RSRC_LEGACY_MEM;
+ }
+ if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
+ vgadev->io_lock_cnt--;
+ if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
+ vgadev->mem_lock_cnt--;
+
+ /* Just clear lock bits, we do lazy operations so we don't really
+ * have to bother about anything else at this point
+ */
+ if (vgadev->io_lock_cnt == 0)
+ vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
+ if (vgadev->mem_lock_cnt == 0)
+ vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
+
+ /* Kick the wait queue in case somebody was waiting if we actually
+ * released something
+ */
+ if (old_locks != vgadev->locks)
+ wake_up_all(&vga_wait_queue);
+}
+
+int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
+{
+ struct vga_device *vgadev, *conflict;
+ unsigned long flags;
+ wait_queue_t wait;
+ int rc = 0;
+
+ vga_check_first_use();
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return 0;
+
+ for (;;) {
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ rc = -ENODEV;
+ break;
+ }
+ conflict = __vga_tryget(vgadev, rsrc);
+ spin_unlock_irqrestore(&vga_lock, flags);
+ if (conflict == NULL)
+ break;
+
+
+ /* We have a conflict, we wait until somebody kicks the
+ * work queue. Currently we have one work queue that we
+ * kick each time some resources are released, but it would
+ * be fairly easy to have a per device one so that we only
+ * need to attach to the conflicting device
+ */
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&vga_wait_queue, &wait);
+ set_current_state(interruptible ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ schedule();
+ remove_wait_queue(&vga_wait_queue, &wait);
+ set_current_state(TASK_RUNNING);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(vga_get);
+
+int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ int rc = 0;
+
+ vga_check_first_use();
+
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return 0;
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ rc = -ENODEV;
+ goto bail;
+ }
+ if (__vga_tryget(vgadev, rsrc))
+ rc = -EBUSY;
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(vga_tryget);
+
+void vga_put(struct pci_dev *pdev, unsigned int rsrc)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return;
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL)
+ goto bail;
+ __vga_put(vgadev, rsrc);
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+EXPORT_SYMBOL(vga_put);
+
+/*
+ * Currently, we assume that the "initial" setup of the system is
+ * not sane, that is we come up with conflicting devices and let
+ * the arbiter's client decides if devices decodes or not legacy
+ * things.
+ */
+static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+
+ /* Only deal with VGA class devices */
+ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return false;
+
+ /* Allocate structure */
+ vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
+ if (vgadev == NULL) {
+ pr_err("vgaarb: failed to allocate pci device\n");
+ /* What to do on allocation failure ? For now, let's
+ * just do nothing, I'm not sure there is anything saner
+ * to be done
+ */
+ return false;
+ }
+
+ memset(vgadev, 0, sizeof(*vgadev));
+
+ /* Take lock & check for duplicates */
+ spin_lock_irqsave(&vga_lock, flags);
+ if (vgadev_find(pdev) != NULL) {
+ BUG_ON(1);
+ goto fail;
+ }
+ vgadev->pdev = pdev;
+
+ /* By default, assume we decode everything */
+ vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+ /* by default mark it as decoding */
+ vga_decode_count++;
+ /* Mark that we "own" resources based on our enables, we will
+ * clear that below if the bridge isn't forwarding
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_IO)
+ vgadev->owns |= VGA_RSRC_LEGACY_IO;
+ if (cmd & PCI_COMMAND_MEMORY)
+ vgadev->owns |= VGA_RSRC_LEGACY_MEM;
+
+ /* Check if VGA cycles can get down to us */
+ bus = pdev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ u16 l;
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &l);
+ if (!(l & PCI_BRIDGE_CTL_VGA)) {
+ vgadev->owns = 0;
+ break;
+ }
+ }
+ bus = bus->parent;
+ }
+
+ /* Deal with VGA default device. Use first enabled one
+ * by default if arch doesn't have it's own hook
+ */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+ if (vga_default == NULL &&
+ ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+ vga_default = pci_dev_get(pdev);
+#endif
+
+ /* Add to the list */
+ list_add(&vgadev->list, &vga_list);
+ vga_count++;
+ pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+ pci_name(pdev),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns),
+ vga_iostate_to_str(vgadev->locks));
+
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return true;
+fail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ kfree(vgadev);
+ return false;
+}
+
+static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ ret = false;
+ goto bail;
+ }
+
+ if (vga_default == pdev) {
+ pci_dev_put(vga_default);
+ vga_default = NULL;
+ }
+
+ if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+ vga_decode_count--;
+
+ /* Remove entry from list */
+ list_del(&vgadev->list);
+ vga_count--;
+ /* Notify userland driver that the device is gone so it discards
+ * it's copies of the pci_dev pointer
+ */
+ vga_arb_device_card_gone(pdev);
+
+ /* Wake up all possible waiters */
+ wake_up_all(&vga_wait_queue);
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ kfree(vgadev);
+ return ret;
+}
+
+/* this is called with the lock */
+static inline void vga_update_device_decodes(struct vga_device *vgadev,
+ int new_decodes)
+{
+ int old_decodes;
+ struct vga_device *new_vgadev, *conflict;
+
+ old_decodes = vgadev->decodes;
+ vgadev->decodes = new_decodes;
+
+ pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+ pci_name(vgadev->pdev),
+ vga_iostate_to_str(old_decodes),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns));
+
+
+ /* if we own the decodes we should move them along to
+ another card */
+ if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
+ /* set us to own nothing */
+ vgadev->owns &= ~old_decodes;
+ list_for_each_entry(new_vgadev, &vga_list, list) {
+ if ((new_vgadev != vgadev) &&
+ (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
+ pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
+ conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
+ if (!conflict)
+ __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
+ break;
+ }
+ }
+ }
+
+ /* change decodes counter */
+ if (old_decodes != new_decodes) {
+ if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+ vga_decode_count++;
+ else
+ vga_decode_count--;
+ }
+}
+
+void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ decodes &= VGA_RSRC_LEGACY_MASK;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL)
+ goto bail;
+
+ /* don't let userspace futz with kernel driver decodes */
+ if (userspace && vgadev->set_vga_decode)
+ goto bail;
+
+ /* update the device decodes + counter */
+ vga_update_device_decodes(vgadev, decodes);
+
+ /* XXX if somebody is going from "doesn't decode" to "decodes" state
+ * here, additional care must be taken as we may have pending owner
+ * ship of non-legacy region ...
+ */
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
+{
+ __vga_set_legacy_decoding(pdev, decodes, false);
+}
+EXPORT_SYMBOL(vga_set_legacy_decoding);
+
+/* call with NULL to unregister */
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool decode))
+{
+ int ret = -1;
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (!vgadev)
+ goto bail;
+
+ vgadev->irq_set_state = irq_set_state;
+ vgadev->set_vga_decode = set_vga_decode;
+ vgadev->cookie = cookie;
+ ret = 0;
+
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return ret;
+
+}
+EXPORT_SYMBOL(vga_client_register);
+
+/*
+ * Char driver implementation
+ *
+ * Semantics is:
+ *
+ * open : open user instance of the arbitrer. by default, it's
+ * attached to the default VGA device of the system.
+ *
+ * close : close user instance, release locks
+ *
+ * read : return a string indicating the status of the target.
+ * an IO state string is of the form {io,mem,io+mem,none},
+ * mc and ic are respectively mem and io lock counts (for
+ * debugging/diagnostic only). "decodes" indicate what the
+ * card currently decodes, "owns" indicates what is currently
+ * enabled on it, and "locks" indicates what is locked by this
+ * card. If the card is unplugged, we get "invalid" then for
+ * card_ID and an -ENODEV error is returned for any command
+ * until a new card is targeted
+ *
+ * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+ *
+ * write : write a command to the arbiter. List of commands is:
+ *
+ * target <card_ID> : switch target to card <card_ID> (see below)
+ * lock <io_state> : acquires locks on target ("none" is invalid io_state)
+ * trylock <io_state> : non-blocking acquire locks on target
+ * unlock <io_state> : release locks on target
+ * unlock all : release all locks on target held by this user
+ * decodes <io_state> : set the legacy decoding attributes for the card
+ *
+ * poll : event if something change on any card (not just the target)
+ *
+ * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+ * to go back to the system default card (TODO: not implemented yet).
+ * Currently, only PCI is supported as a prefix, but the userland API may
+ * support other bus types in the future, even if the current kernel
+ * implementation doesn't.
+ *
+ * Note about locks:
+ *
+ * The driver keeps track of which user has what locks on which card. It
+ * supports stacking, like the kernel one. This complexifies the implementation
+ * a bit, but makes the arbiter more tolerant to userspace problems and able
+ * to properly cleanup in all cases when a process dies.
+ * Currently, a max of 16 cards simultaneously can have locks issued from
+ * userspace for a given user (file descriptor instance) of the arbiter.
+ *
+ * If the device is hot-unplugged, there is a hook inside the module to notify
+ * they being added/removed in the system and automatically added/removed in
+ * the arbiter.
+ */
+
+#define MAX_USER_CARDS 16
+#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
+
+/*
+ * Each user has an array of these, tracking which cards have locks
+ */
+struct vga_arb_user_card {
+ struct pci_dev *pdev;
+ unsigned int mem_cnt;
+ unsigned int io_cnt;
+};
+
+struct vga_arb_private {
+ struct list_head list;
+ struct pci_dev *target;
+ struct vga_arb_user_card cards[MAX_USER_CARDS];
+ spinlock_t lock;
+};
+
+static LIST_HEAD(vga_user_list);
+static DEFINE_SPINLOCK(vga_user_lock);
+
+
+/*
+ * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
+ * returns the respective values. If the string is not in this format,
+ * it returns 0.
+ */
+static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
+ unsigned int *bus, unsigned int *devfn)
+{
+ int n;
+ unsigned int slot, func;
+
+
+ n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
+ if (n != 4)
+ return 0;
+
+ *devfn = PCI_DEVFN(slot, func);
+
+ return 1;
+}
+
+static ssize_t vga_arb_read(struct file *file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_device *vgadev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ size_t len;
+ int rc;
+ char *lbuf;
+
+ lbuf = kmalloc(1024, GFP_KERNEL);
+ if (lbuf == NULL)
+ return -ENOMEM;
+
+ /* Shields against vga_arb_device_card_gone (pci_dev going
+ * away), and allows access to vga list
+ */
+ spin_lock_irqsave(&vga_lock, flags);
+
+ /* If we are targetting the default, use it */
+ pdev = priv->target;
+ if (pdev == NULL || pdev == PCI_INVALID_CARD) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ len = sprintf(lbuf, "invalid");
+ goto done;
+ }
+
+ /* Find card vgadev structure */
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ /* Wow, it's not in the list, that shouldn't happen,
+ * let's fix us up and return invalid card
+ */
+ if (pdev == priv->target)
+ vga_arb_device_card_gone(pdev);
+ spin_unlock_irqrestore(&vga_lock, flags);
+ len = sprintf(lbuf, "invalid");
+ goto done;
+ }
+
+ /* Fill the buffer with infos */
+ len = snprintf(lbuf, 1024,
+ "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
+ vga_decode_count, pci_name(pdev),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns),
+ vga_iostate_to_str(vgadev->locks),
+ vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
+
+ spin_unlock_irqrestore(&vga_lock, flags);
+done:
+
+ /* Copy that to user */
+ if (len > count)
+ len = count;
+ rc = copy_to_user(buf, lbuf, len);
+ kfree(lbuf);
+ if (rc)
+ return -EFAULT;
+ return len;
+}
+
+/*
+ * TODO: To avoid parsing inside kernel and to improve the speed we may
+ * consider use ioctl here
+ */
+static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_arb_user_card *uc = NULL;
+ struct pci_dev *pdev;
+
+ unsigned int io_state;
+
+ char *kbuf, *curr_pos;
+ size_t remaining = count;
+
+ int ret_val;
+ int i;
+
+
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, buf, count)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ curr_pos = kbuf;
+ kbuf[count] = '\0'; /* Just to make sure... */
+
+ if (strncmp(curr_pos, "lock ", 5) == 0) {
+ curr_pos += 5;
+ remaining -= 5;
+
+ pr_devel("client 0x%p called 'lock'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ vga_get_uninterruptible(pdev, io_state);
+
+ /* Update the client's locks lists... */
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev) {
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ priv->cards[i].io_cnt++;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ priv->cards[i].mem_cnt++;
+ break;
+ }
+ }
+
+ ret_val = count;
+ goto done;
+ } else if (strncmp(curr_pos, "unlock ", 7) == 0) {
+ curr_pos += 7;
+ remaining -= 7;
+
+ pr_devel("client 0x%p called 'unlock'\n", priv);
+
+ if (strncmp(curr_pos, "all", 3) == 0)
+ io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ else {
+ if (!vga_str_to_iostate
+ (curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ /* TODO: Add this?
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ */
+ }
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev)
+ uc = &priv->cards[i];
+ }
+
+ if (!uc)
+ return -EINVAL;
+
+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
+ return -EINVAL;
+
+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
+ return -EINVAL;
+
+ vga_put(pdev, io_state);
+
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ uc->io_cnt--;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ uc->mem_cnt--;
+
+ ret_val = count;
+ goto done;
+ } else if (strncmp(curr_pos, "trylock ", 8) == 0) {
+ curr_pos += 8;
+ remaining -= 8;
+
+ pr_devel("client 0x%p called 'trylock'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ /* TODO: Add this?
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ */
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ if (vga_tryget(pdev, io_state)) {
+ /* Update the client's locks lists... */
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev) {
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ priv->cards[i].io_cnt++;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ priv->cards[i].mem_cnt++;
+ break;
+ }
+ }
+ ret_val = count;
+ goto done;
+ } else {
+ ret_val = -EBUSY;
+ goto done;
+ }
+
+ } else if (strncmp(curr_pos, "target ", 7) == 0) {
+ unsigned int domain, bus, devfn;
+ struct vga_device *vgadev;
+
+ curr_pos += 7;
+ remaining -= 7;
+ pr_devel("client 0x%p called 'target'\n", priv);
+ /* if target is default */
+ if (!strncmp(buf, "default", 7))
+ pdev = pci_dev_get(vga_default_device());
+ else {
+ if (!vga_pci_str_to_vars(curr_pos, remaining,
+ &domain, &bus, &devfn)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+
+ pdev = pci_get_bus_and_slot(bus, devfn);
+ if (!pdev) {
+ pr_info("vgaarb: invalid PCI address!\n");
+ ret_val = -ENODEV;
+ goto done;
+ }
+ }
+
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ pr_info("vgaarb: this pci device is not a vga device\n");
+ pci_dev_put(pdev);
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ priv->target = pdev;
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev)
+ break;
+ if (priv->cards[i].pdev == NULL) {
+ priv->cards[i].pdev = pdev;
+ priv->cards[i].io_cnt = 0;
+ priv->cards[i].mem_cnt = 0;
+ break;
+ }
+ }
+ if (i == MAX_USER_CARDS) {
+ pr_err("vgaarb: maximum user cards number reached!\n");
+ pci_dev_put(pdev);
+ /* XXX: which value to return? */
+ ret_val = -ENOMEM;
+ goto done;
+ }
+
+ ret_val = count;
+ pci_dev_put(pdev);
+ goto done;
+
+
+ } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
+ curr_pos += 8;
+ remaining -= 8;
+ pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ __vga_set_legacy_decoding(pdev, io_state, true);
+ ret_val = count;
+ goto done;
+ }
+ /* If we got here, the message written is not part of the protocol! */
+ kfree(kbuf);
+ return -EPROTO;
+
+done:
+ kfree(kbuf);
+ return ret_val;
+}
+
+static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+{
+ struct vga_arb_private *priv = file->private_data;
+
+ pr_devel("%s\n", __func__);
+
+ if (priv == NULL)
+ return -ENODEV;
+ poll_wait(file, &vga_wait_queue, wait);
+ return POLLIN;
+}
+
+static int vga_arb_open(struct inode *inode, struct file *file)
+{
+ struct vga_arb_private *priv;
+ unsigned long flags;
+
+ pr_devel("%s\n", __func__);
+
+ priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ memset(priv, 0, sizeof(*priv));
+ spin_lock_init(&priv->lock);
+ file->private_data = priv;
+
+ spin_lock_irqsave(&vga_user_lock, flags);
+ list_add(&priv->list, &vga_user_list);
+ spin_unlock_irqrestore(&vga_user_lock, flags);
+
+ /* Set the client' lists of locks */
+ priv->target = vga_default_device(); /* Maybe this is still null! */
+ priv->cards[0].pdev = priv->target;
+ priv->cards[0].io_cnt = 0;
+ priv->cards[0].mem_cnt = 0;
+
+
+ return 0;
+}
+
+static int vga_arb_release(struct inode *inode, struct file *file)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_arb_user_card *uc;
+ unsigned long flags;
+ int i;
+
+ pr_devel("%s\n", __func__);
+
+ if (priv == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&vga_user_lock, flags);
+ list_del(&priv->list);
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ uc = &priv->cards[i];
+ if (uc->pdev == NULL)
+ continue;
+ pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ uc->io_cnt, uc->mem_cnt);
+ while (uc->io_cnt--)
+ vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
+ while (uc->mem_cnt--)
+ vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
+ }
+ spin_unlock_irqrestore(&vga_user_lock, flags);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev)
+{
+}
+
+/*
+ * callback any registered clients to let them know we have a
+ * change in VGA cards
+ */
+static void vga_arbiter_notify_clients(void)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ uint32_t new_decodes;
+ bool new_state;
+
+ if (!vga_arbiter_used)
+ return;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ list_for_each_entry(vgadev, &vga_list, list) {
+ if (vga_count > 1)
+ new_state = false;
+ else
+ new_state = true;
+ if (vgadev->set_vga_decode) {
+ new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+ vga_update_device_decodes(vgadev, new_decodes);
+ }
+ }
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+static int pci_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool notify = false;
+
+ pr_devel("%s\n", __func__);
+
+ /* For now we're only intereted in devices added and removed. I didn't
+ * test this thing here, so someone needs to double check for the
+ * cases of hotplugable vga cards. */
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ notify = vga_arbiter_add_pci_device(pdev);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ notify = vga_arbiter_del_pci_device(pdev);
+
+ if (notify)
+ vga_arbiter_notify_clients();
+ return 0;
+}
+
+static struct notifier_block pci_notifier = {
+ .notifier_call = pci_notify,
+};
+
+static const struct file_operations vga_arb_device_fops = {
+ .read = vga_arb_read,
+ .write = vga_arb_write,
+ .poll = vga_arb_fpoll,
+ .open = vga_arb_open,
+ .release = vga_arb_release,
+};
+
+static struct miscdevice vga_arb_device = {
+ MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
+};
+
+static int __init vga_arb_device_init(void)
+{
+ int rc;
+ struct pci_dev *pdev;
+
+ rc = misc_register(&vga_arb_device);
+ if (rc < 0)
+ pr_err("vgaarb: error %d registering device\n", rc);
+
+ bus_register_notifier(&pci_bus_type, &pci_notifier);
+
+ /* We add all pci devices satisfying vga class in the arbiter by
+ * default */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, pdev)) != NULL)
+ vga_arbiter_add_pci_device(pdev);
+
+ pr_info("vgaarb: loaded\n");
+ return rc;
+}
+subsys_initcall(vga_arb_device_init);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1ebd6b4c743..4a7f11d8f43 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_PCI_LEGACY) += legacy.o
+CFLAGS_legacy.o += -Wno-deprecated-declarations
+
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 2aa117c8cd8..3625b094bf7 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
# Link this last so it doesn't claim devices that have a real hotplug driver
obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
-pci_hotplug-objs := pci_hotplug_core.o
+pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o
ifdef CONFIG_HOTPLUG_PCI_CPCI
pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index eb159587d0b..a73028ec52e 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -41,7 +41,6 @@
#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
#define METHOD_NAME__SUN "_SUN"
-#define METHOD_NAME__HPP "_HPP"
#define METHOD_NAME_OSHP "OSHP"
static int debug_acpi;
@@ -215,80 +214,41 @@ acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
static acpi_status
acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
{
- acpi_status status;
- u8 nui[4];
- struct acpi_buffer ret_buf = { 0, NULL};
- struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *ext_obj, *package;
- int i, len = 0;
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *package, *fields;
+ int i;
- /* Clear the return buffer with zeros */
memset(hpp, 0, sizeof(struct hotplug_params));
- /* get _hpp */
- status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf);
- switch (status) {
- case AE_BUFFER_OVERFLOW:
- ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL);
- if (!ret_buf.pointer) {
- printk(KERN_ERR "%s:%s alloc for _HPP fail\n",
- __func__, (char *)string.pointer);
- kfree(string.pointer);
- return AE_NO_MEMORY;
- }
- status = acpi_evaluate_object(handle, METHOD_NAME__HPP,
- NULL, &ret_buf);
- if (ACPI_SUCCESS(status))
- break;
- default:
- if (ACPI_FAILURE(status)) {
- pr_debug("%s:%s _HPP fail=0x%x\n", __func__,
- (char *)string.pointer, status);
- kfree(string.pointer);
- return status;
- }
- }
+ status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
- ext_obj = (union acpi_object *) ret_buf.pointer;
- if (ext_obj->type != ACPI_TYPE_PACKAGE) {
- printk(KERN_ERR "%s:%s _HPP obj not a package\n", __func__,
- (char *)string.pointer);
+ package = (union acpi_object *) buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE ||
+ package->package.count != 4) {
status = AE_ERROR;
- goto free_and_return;
+ goto exit;
}
- len = ext_obj->package.count;
- package = (union acpi_object *) ret_buf.pointer;
- for ( i = 0; (i < len) || (i < 4); i++) {
- ext_obj = (union acpi_object *) &package->package.elements[i];
- switch (ext_obj->type) {
- case ACPI_TYPE_INTEGER:
- nui[i] = (u8)ext_obj->integer.value;
- break;
- default:
- printk(KERN_ERR "%s:%s _HPP obj type incorrect\n",
- __func__, (char *)string.pointer);
+ fields = package->package.elements;
+ for (i = 0; i < 4; i++) {
+ if (fields[i].type != ACPI_TYPE_INTEGER) {
status = AE_ERROR;
- goto free_and_return;
+ goto exit;
}
}
hpp->t0 = &hpp->type0_data;
- hpp->t0->cache_line_size = nui[0];
- hpp->t0->latency_timer = nui[1];
- hpp->t0->enable_serr = nui[2];
- hpp->t0->enable_perr = nui[3];
-
- pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->t0->cache_line_size);
- pr_debug(" _HPP: latency timer =0x%x\n", hpp->t0->latency_timer);
- pr_debug(" _HPP: enable SERR =0x%x\n", hpp->t0->enable_serr);
- pr_debug(" _HPP: enable PERR =0x%x\n", hpp->t0->enable_perr);
+ hpp->t0->revision = 1;
+ hpp->t0->cache_line_size = fields[0].integer.value;
+ hpp->t0->latency_timer = fields[1].integer.value;
+ hpp->t0->enable_serr = fields[2].integer.value;
+ hpp->t0->enable_perr = fields[3].integer.value;
-free_and_return:
- kfree(string.pointer);
- kfree(ret_buf.pointer);
+exit:
+ kfree(buffer.pointer);
return status;
}
@@ -322,20 +282,19 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
return status;
}
-/* acpi_get_hp_params_from_firmware
+/* pci_get_hp_params
*
- * @bus - the pci_bus of the bus on which the device is newly added
+ * @dev - the pci_dev for which we want parameters
* @hpp - allocated by the caller
*/
-acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
- struct hotplug_params *hpp)
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
{
- acpi_status status = AE_NOT_FOUND;
+ acpi_status status;
acpi_handle handle, phandle;
struct pci_bus *pbus;
handle = NULL;
- for (pbus = bus; pbus; pbus = pbus->parent) {
+ for (pbus = dev->bus; pbus; pbus = pbus->parent) {
handle = acpi_pci_get_bridge_handle(pbus);
if (handle)
break;
@@ -345,15 +304,15 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
* _HPP settings apply to all child buses, until another _HPP is
* encountered. If we don't find an _HPP for the input pci dev,
* look for it in the parent device scope since that would apply to
- * this pci dev. If we don't find any _HPP, use hardcoded defaults
+ * this pci dev.
*/
while (handle) {
status = acpi_run_hpx(handle, hpp);
if (ACPI_SUCCESS(status))
- break;
+ return 0;
status = acpi_run_hpp(handle, hpp);
if (ACPI_SUCCESS(status))
- break;
+ return 0;
if (acpi_is_root_bridge(handle))
break;
status = acpi_get_parent(handle, &phandle);
@@ -361,9 +320,9 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
break;
handle = phandle;
}
- return status;
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
+EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* acpi_get_hp_hw_control_from_firmware
@@ -500,18 +459,18 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
/**
* acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
- * @pbus - PCI bus to scan
+ * @handle - handle of the PCI bus to scan
*
* Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
*/
-int acpi_pci_detect_ejectable(struct pci_bus *pbus)
+int acpi_pci_detect_ejectable(acpi_handle handle)
{
- acpi_handle handle;
int found = 0;
- if (!(handle = acpi_pci_get_bridge_handle(pbus)))
- return 0;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ if (!handle)
+ return found;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
check_hotplug, (void *)&found, NULL);
return found;
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e68d5f20ffb..7d938df7920 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -91,9 +91,6 @@ struct acpiphp_bridge {
/* PCI-to-PCI bridge device */
struct pci_dev *pci_dev;
- /* ACPI 2.0 _HPP parameters */
- struct hotplug_params hpp;
-
spinlock_t res_lock;
};
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0cb0f830a99..58d25a163a8 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(ioapic_list_lock);
static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
-static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus);
+static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
/* callback routine to check for the existence of a pci dock device */
@@ -261,51 +261,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
/* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(struct pci_bus *pbus)
+static int detect_ejectable_slots(acpi_handle handle)
{
- int found = acpi_pci_detect_ejectable(pbus);
+ int found = acpi_pci_detect_ejectable(handle);
if (!found) {
- acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus);
- if (!bridge_handle)
- return 0;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
is_pci_dock_device, (void *)&found, NULL);
}
return found;
}
-
-/* decode ACPI 2.0 _HPP hot plug parameters */
-static void decode_hpp(struct acpiphp_bridge *bridge)
-{
- acpi_status status;
-
- status = acpi_get_hp_params_from_firmware(bridge->pci_bus, &bridge->hpp);
- if (ACPI_FAILURE(status) ||
- !bridge->hpp.t0 || (bridge->hpp.t0->revision > 1)) {
- /* use default numbers */
- printk(KERN_WARNING
- "%s: Could not get hotplug parameters. Use defaults\n",
- __func__);
- bridge->hpp.t0 = &bridge->hpp.type0_data;
- bridge->hpp.t0->revision = 0;
- bridge->hpp.t0->cache_line_size = 0x10;
- bridge->hpp.t0->latency_timer = 0x40;
- bridge->hpp.t0->enable_serr = 0;
- bridge->hpp.t0->enable_perr = 0;
- }
-}
-
-
-
/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
static void init_bridge_misc(struct acpiphp_bridge *bridge)
{
acpi_status status;
- /* decode ACPI 2.0 _HPP (hot plug parameters) */
- decode_hpp(bridge);
-
/* must be added to the list prior to calling register_slot */
list_add(&bridge->list, &bridge_list);
@@ -399,9 +369,10 @@ static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
/* allocate and initialize host bridge data structure */
-static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
+static void add_host_bridge(acpi_handle *handle)
{
struct acpiphp_bridge *bridge;
+ struct acpi_pci_root *root = acpi_pci_find_root(handle);
bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
if (bridge == NULL)
@@ -410,7 +381,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
bridge->type = BRIDGE_TYPE_HOST;
bridge->handle = handle;
- bridge->pci_bus = pci_bus;
+ bridge->pci_bus = root->bus;
spin_lock_init(&bridge->res_lock);
@@ -419,7 +390,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
/* allocate and initialize PCI-to-PCI bridge data structure */
-static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
+static void add_p2p_bridge(acpi_handle *handle)
{
struct acpiphp_bridge *bridge;
@@ -433,8 +404,8 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
bridge->handle = handle;
config_p2p_bridge_flags(bridge);
- bridge->pci_dev = pci_dev_get(pci_dev);
- bridge->pci_bus = pci_dev->subordinate;
+ bridge->pci_dev = acpi_get_pci_dev(handle);
+ bridge->pci_bus = bridge->pci_dev->subordinate;
if (!bridge->pci_bus) {
err("This is not a PCI-to-PCI bridge!\n");
goto err;
@@ -451,7 +422,7 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
init_bridge_misc(bridge);
return;
err:
- pci_dev_put(pci_dev);
+ pci_dev_put(bridge->pci_dev);
kfree(bridge);
return;
}
@@ -462,39 +433,21 @@ static acpi_status
find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
- acpi_handle dummy_handle;
- unsigned long long tmp;
- int device, function;
struct pci_dev *dev;
- struct pci_bus *pci_bus = context;
-
- status = acpi_get_handle(handle, "_ADR", &dummy_handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_evaluate_integer(handle, "_ADR", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dbg("%s: _ADR evaluation failure\n", __func__);
- return AE_OK;
- }
-
- device = (tmp >> 16) & 0xffff;
- function = tmp & 0xffff;
-
- dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
+ dev = acpi_get_pci_dev(handle);
if (!dev || !dev->subordinate)
goto out;
/* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(dev->subordinate) > 0)) {
+ if ((detect_ejectable_slots(handle) > 0)) {
dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
- add_p2p_bridge(handle, dev);
+ add_p2p_bridge(handle);
}
/* search P2P bridges under this p2p bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, dev->subordinate, NULL);
+ find_p2p_bridge, NULL, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
@@ -509,9 +462,7 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- int seg, bus;
acpi_handle dummy_handle;
- struct pci_bus *pci_bus;
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
@@ -526,36 +477,15 @@ static int add_bridge(acpi_handle handle)
return 0;
}
- /* get PCI segment number */
- status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
-
- seg = ACPI_SUCCESS(status) ? tmp : 0;
-
- /* get PCI bus number */
- status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
-
- if (ACPI_SUCCESS(status)) {
- bus = tmp;
- } else {
- warn("can't get bus number, assuming 0\n");
- bus = 0;
- }
-
- pci_bus = pci_find_bus(seg, bus);
- if (!pci_bus) {
- err("Can't find bus %04x:%02x\n", seg, bus);
- return 0;
- }
-
/* check if this bridge has ejectable slots */
- if (detect_ejectable_slots(pci_bus) > 0) {
+ if (detect_ejectable_slots(handle) > 0) {
dbg("found PCI host-bus bridge with hot-pluggable slots\n");
- add_host_bridge(handle, pci_bus);
+ add_host_bridge(handle);
}
/* search P2P bridges under this host bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, pci_bus, NULL);
+ find_p2p_bridge, NULL, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
@@ -1083,7 +1013,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(slot->bridge->handle, bus);
+ acpiphp_set_hpp_values(bus);
list_for_each_entry(func, &slot->funcs, sibling)
acpiphp_configure_ioapics(func->handle);
pci_enable_bridges(bus);
@@ -1294,70 +1224,12 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
return retval;
}
-static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge)
+static void acpiphp_set_hpp_values(struct pci_bus *bus)
{
- u16 pci_cmd, pci_bctl;
- struct pci_dev *cdev;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
- bridge->hpp.t0->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER,
- bridge->hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (bridge->hpp.t0->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (bridge->hpp.t0->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value and child devices */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- bridge->hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (bridge->hpp.t0->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (bridge->hpp.t0->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_hpp(cdev, bridge);
- }
- }
-}
-
-static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus)
-{
- struct acpiphp_bridge bridge;
struct pci_dev *dev;
- memset(&bridge, 0, sizeof(bridge));
- bridge.handle = handle;
- bridge.pci_bus = bus;
- bridge.pci_dev = bus->self;
- decode_hpp(&bridge);
list_for_each_entry(dev, &bus->devices, bus_list)
- program_hpp(dev, &bridge);
-
+ pci_configure_slot(dev);
}
/*
@@ -1387,24 +1259,23 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct pci_dev *dev;
struct pci_bus *bus;
- dev = acpi_get_pci_dev(handle);
- if (!dev) {
- err("cannot get PCI domain and bus number for bridge\n");
- return -EINVAL;
+ if (acpi_is_root_bridge(handle)) {
+ struct acpi_pci_root *root = acpi_pci_find_root(handle);
+ bus = root->bus;
+ } else {
+ struct pci_dev *pdev = acpi_get_pci_dev(handle);
+ bus = pdev->subordinate;
+ pci_dev_put(pdev);
}
- bus = dev->bus;
-
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(handle, bus);
+ acpiphp_set_hpp_values(bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
- pci_dev_put(dev);
return 0;
}
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5c5043f239c..0325d989bb4 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = {
"66 MHz PCIX 533", /* 0x11 */
"100 MHz PCIX 533", /* 0x12 */
"133 MHz PCIX 533", /* 0x13 */
- "25 GBps PCI-E", /* 0x14 */
+ "2.5 GT/s PCI-E", /* 0x14 */
+ "5.0 GT/s PCI-E", /* 0x15 */
};
#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e6cf096498b..36faa9a8e18 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -237,17 +237,8 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
return retval;
return pciehp_acpi_slot_detection_check(dev);
}
-
-static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
- struct hotplug_params *hpp)
-{
- if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
- return -ENODEV;
- return 0;
-}
#else
#define pciehp_firmware_init() do {} while (0)
#define pciehp_get_hp_hw_control_from_firmware(dev) 0
-#define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 96048010e7d..7163e6a6cfa 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -47,7 +47,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
- if (acpi_pci_detect_ejectable(dev->subordinate))
+ if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
return 0;
return -ENODEV;
}
@@ -76,9 +76,9 @@ static int __init dummy_probe(struct pcie_device *dev)
{
int pos;
u32 slot_cap;
+ acpi_handle handle;
struct slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- struct pci_bus *pbus = pdev->subordinate;
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
@@ -94,7 +94,8 @@ static int __init dummy_probe(struct pcie_device *dev)
dup_slot_id++;
}
list_add_tail(&slot->slot_list, &dummy_slots);
- if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus))
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 8aab8edf123..b97cb4c3e0f 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -246,11 +246,6 @@ static int board_added(struct slot *p_slot)
goto err_exit;
}
- /*
- * Some PCI Express root ports require fixup after hot-plug operation.
- */
- if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
if (PWR_LED(ctrl))
p_slot->hpc_ops->green_led_on(p_slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 52813257e5b..271f917b6f2 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -693,7 +693,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_cap & 0x000F) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -772,7 +775,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 10f9566ccee..02e24d63b3e 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,136 +34,6 @@
#include "../pci.h"
#include "pciehp.h"
-static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
-{
- u16 pci_cmd, pci_bctl;
-
- if (hpp->revision > 1) {
- warn("Rev.%d type0 record not supported\n", hpp->revision);
- return;
- }
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (hpp->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (hpp->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- hpp->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (hpp->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- }
-}
-
-static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
-{
- int pos;
- u16 reg16;
- u32 reg32;
-
- if (hpp->revision > 1) {
- warn("Rev.%d type2 record not supported\n", hpp->revision);
- return;
- }
-
- /* Find PCI Express capability */
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
-
- /* Initialize Device Control Register */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
-
- /* Initialize Link Control Register */
- if (dev->subordinate) {
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
- | hpp->pci_exp_lnkctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
- }
-
- /* Find Advanced Error Reporting Enhanced Capability */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
-
- /* Initialize Uncorrectable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
- reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
-
- /* Initialize Uncorrectable Error Severity Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
- reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
-
- /* Initialize Correctable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
- reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
-
- /* Initialize Advanced Error Capabilities and Control Register */
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
- reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
-
- /*
- * FIXME: The following two registers are not supported yet.
- *
- * o Secondary Uncorrectable Error Severity Register
- * o Secondary Uncorrectable Error Mask Register
- */
-}
-
-static void program_fw_provided_values(struct pci_dev *dev)
-{
- struct pci_dev *cdev;
- struct hotplug_params hpp;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- if (pciehp_get_hp_params_from_firmware(dev, &hpp)) {
- warn("Could not get hotplug parameters\n");
- return;
- }
-
- if (hpp.t2)
- program_hpp_type2(dev, hpp.t2);
- if (hpp.t0)
- program_hpp_type0(dev, hpp.t0);
-
- /* Program child devices */
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_fw_provided_values(cdev);
- }
-}
-
static int __ref pciehp_add_bridge(struct pci_dev *dev)
{
struct pci_bus *parent = dev->bus;
@@ -226,7 +96,7 @@ int pciehp_configure_device(struct slot *p_slot)
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
pciehp_add_bridge(dev);
}
- program_fw_provided_values(dev);
+ pci_configure_slot(dev);
pci_dev_put(dev);
}
@@ -285,11 +155,6 @@ int pciehp_unconfigure_device(struct slot *p_slot)
}
pci_dev_put(temp);
}
- /*
- * Some PCI Express root ports require fixup after hot-plug operation.
- */
- if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev);
return rc;
}
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
new file mode 100644
index 00000000000..cc8ec3aa41a
--- /dev/null
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+static struct hpp_type0 pci_default_type0 = {
+ .revision = 1,
+ .cache_line_size = 8,
+ .latency_timer = 0x40,
+ .enable_serr = 0,
+ .enable_perr = 0,
+};
+
+static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+{
+ u16 pci_cmd, pci_bctl;
+
+ if (!hpp) {
+ /*
+ * Perhaps we *should* use default settings for PCIe, but
+ * pciehp didn't, so we won't either.
+ */
+ if (dev->is_pcie)
+ return;
+ dev_info(&dev->dev, "using default PCI settings\n");
+ hpp = &pci_default_type0;
+ }
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev,
+ "PCI settings rev %d not supported; using defaults\n",
+ hpp->revision);
+ hpp = &pci_default_type0;
+ }
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
+ pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+ if (hpp->enable_serr)
+ pci_cmd |= PCI_COMMAND_SERR;
+ else
+ pci_cmd &= ~PCI_COMMAND_SERR;
+ if (hpp->enable_perr)
+ pci_cmd |= PCI_COMMAND_PARITY;
+ else
+ pci_cmd &= ~PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+
+ /* Program bridge control value */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
+ hpp->latency_timer);
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
+ if (hpp->enable_serr)
+ pci_bctl |= PCI_BRIDGE_CTL_SERR;
+ else
+ pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
+ if (hpp->enable_perr)
+ pci_bctl |= PCI_BRIDGE_CTL_PARITY;
+ else
+ pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
+ }
+}
+
+static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+{
+ if (hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+}
+
+static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+{
+ int pos;
+ u16 reg16;
+ u32 reg32;
+
+ if (!hpp)
+ return;
+
+ /* Find PCI Express capability */
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+ hpp->revision);
+ return;
+ }
+
+ /* Initialize Device Control Register */
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
+
+ /* Initialize Link Control Register */
+ if (dev->subordinate) {
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
+ | hpp->pci_exp_lnkctl_or;
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Initialize Uncorrectable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
+ reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
+
+ /* Initialize Uncorrectable Error Severity Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
+ reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
+
+ /* Initialize Correctable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
+ reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
+
+ /* Initialize Advanced Error Capabilities and Control Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ /*
+ * FIXME: The following two registers are not supported yet.
+ *
+ * o Secondary Uncorrectable Error Severity Register
+ * o Secondary Uncorrectable Error Mask Register
+ */
+}
+
+void pci_configure_slot(struct pci_dev *dev)
+{
+ struct pci_dev *cdev;
+ struct hotplug_params hpp;
+ int ret;
+
+ if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
+ (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
+ return;
+
+ memset(&hpp, 0, sizeof(hpp));
+ ret = pci_get_hp_params(dev, &hpp);
+ if (ret)
+ dev_warn(&dev->dev, "no hotplug settings from platform\n");
+
+ program_hpp_type2(dev, hpp.t2);
+ program_hpp_type1(dev, hpp.t1);
+ program_hpp_type0(dev, hpp.t0);
+
+ if (dev->subordinate) {
+ list_for_each_entry(cdev, &dev->subordinate->devices,
+ bus_list)
+ pci_configure_slot(cdev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 974e924ca96..bd588eb8e92 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -188,21 +188,12 @@ static inline const char *slot_name(struct slot *slot)
#ifdef CONFIG_ACPI
#include <linux/pci-acpi.h>
-static inline int get_hp_params_from_firmware(struct pci_dev *dev,
- struct hotplug_params *hpp)
-{
- if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
- return -ENODEV;
- return 0;
-}
-
static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else
-#define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#define get_hp_hw_control_from_firmware(dev) (0)
#endif
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index aa315e52529..8c3d3219f22 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,66 +34,6 @@
#include "../pci.h"
#include "shpchp.h"
-static void program_fw_provided_values(struct pci_dev *dev)
-{
- u16 pci_cmd, pci_bctl;
- struct pci_dev *cdev;
- struct hotplug_params hpp;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- /* use default values if we can't get them from firmware */
- if (get_hp_params_from_firmware(dev, &hpp) ||
- !hpp.t0 || (hpp.t0->revision > 1)) {
- warn("Could not get hotplug parameters. Use defaults\n");
- hpp.t0 = &hpp.type0_data;
- hpp.t0->revision = 0;
- hpp.t0->cache_line_size = 8;
- hpp.t0->latency_timer = 0x40;
- hpp.t0->enable_serr = 0;
- hpp.t0->enable_perr = 0;
- }
-
- pci_write_config_byte(dev,
- PCI_CACHE_LINE_SIZE, hpp.t0->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (hpp.t0->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (hpp.t0->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value and child devices */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp.t0->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (hpp.t0->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_fw_provided_values(cdev);
- }
- }
-}
-
int __ref shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
@@ -153,7 +93,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
child->subordinate = pci_do_scan_bus(child);
pci_bus_size_bridges(child);
}
- program_fw_provided_values(dev);
+ pci_configure_slot(dev);
pci_dev_put(dev);
}
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c
new file mode 100644
index 00000000000..871f65c1593
--- /dev/null
+++ b/drivers/pci/legacy.c
@@ -0,0 +1,34 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include "pci.h"
+
+/**
+ * pci_find_device - begin or continue searching for a PCI device by vendor/device id
+ * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching @vendor and @device, a pointer to its device structure is
+ * returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * Otherwise if @from is not %NULL, searches continue from next device
+ * on the global list.
+ *
+ * NOTE: Do not use this function any more; use pci_get_device() instead, as
+ * the PCI device returned by this function can disappear at any moment in
+ * time.
+ */
+struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
+{
+ struct pci_dev *pdev;
+
+ pci_dev_get(from);
+ pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
+ pci_dev_put(pdev);
+ return pdev;
+}
+EXPORT_SYMBOL(pci_find_device);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d986afb7032..f9cf3173b23 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -16,9 +16,8 @@
#include <linux/proc_fs.h>
#include <linux/msi.h>
#include <linux/smp.h>
-
-#include <asm/errno.h>
-#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/io.h>
#include "pci.h"
#include "msi.h"
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
write_msi_msg_desc(desc, msg);
}
-static int msi_free_irqs(struct pci_dev* dev);
+static void free_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry, *tmp;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int i, nvec;
+ if (!entry->irq)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ BUG_ON(irq_has_action(entry->irq + i));
+ }
+
+ arch_teardown_msi_irqs(dev);
+
+ list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ if (entry->msi_attrib.is_msix) {
+ if (list_is_last(&entry->list, &dev->msi_list))
+ iounmap(entry->mask_base);
+ }
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
{
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
pos = entry->msi_attrib.pos;
pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
if (!entry)
return -ENOMEM;
- entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.is_msix = 0;
+ entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
+ entry->msi_attrib.pos = pos;
entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
return ret;
}
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return 0;
}
-/**
- * msix_capability_init - configure device's MSI-X capability
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of struct msix_entry entries
- * @nvec: number of @entries
- *
- * Setup the MSI-X capability structure of device function with a
- * single MSI-X irq. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated irqs or non-zero for otherwise.
- **/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
+ unsigned nr_entries)
{
- struct msi_desc *entry;
- int pos, i, j, nr_entries, ret;
unsigned long phys_addr;
u32 table_offset;
- u16 control;
u8 bir;
- void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
-
- /* Ensure MSI-X is disabled while it is set up */
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
-
- /* Request & Map MSI-X table region */
- nr_entries = multi_msix_capable(control);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
- phys_addr = pci_resource_start (dev, bir) + table_offset;
- base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL)
- return -ENOMEM;
+ phys_addr = pci_resource_start(dev, bir) + table_offset;
+
+ return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+}
+
+static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
+ void __iomem *base, struct msix_entry *entries,
+ int nvec)
+{
+ struct msi_desc *entry;
+ int i;
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev,
if (!i)
iounmap(base);
else
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
/* No enough memory. Don't try again */
return -ENOMEM;
}
- j = entries[i].entry;
- entry->msi_attrib.is_msix = 1;
- entry->msi_attrib.is_64 = 1;
- entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
- entry->mask_base = base;
+ entry->msi_attrib.is_msix = 1;
+ entry->msi_attrib.is_64 = 1;
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ entry->msi_attrib.default_irq = dev->irq;
+ entry->msi_attrib.pos = pos;
+ entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
}
- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret < 0) {
- /* If we had some success report the number of irqs
- * we succeeded in setting up. */
- int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- if (entry->irq != 0) {
- avail++;
- }
- }
+ return 0;
+}
- if (avail != 0)
- ret = avail;
+static void msix_program_entries(struct pci_dev *dev,
+ struct msix_entry *entries)
+{
+ struct msi_desc *entry;
+ int i = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ entries[i].vector = entry->irq;
+ set_irq_msi(entry->irq, entry);
+ entry->masked = readl(entry->mask_base + offset);
+ msix_mask_irq(entry, 1);
+ i++;
}
+}
- if (ret) {
- msi_free_irqs(dev);
+/**
+ * msix_capability_init - configure device's MSI-X capability
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of struct msix_entry entries
+ * @nvec: number of @entries
+ *
+ * Setup the MSI-X capability structure of device function with a
+ * single MSI-X irq. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated irqs or non-zero for otherwise.
+ **/
+static int msix_capability_init(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int pos, ret;
+ u16 control;
+ void __iomem *base;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
+ /* Request & Map MSI-X table region */
+ base = msix_map_region(dev, pos, multi_msix_capable(control));
+ if (!base)
+ return -ENOMEM;
+
+ ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ if (ret)
return ret;
- }
+
+ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto error;
/*
* Some devices require MSI-X to be enabled before we can touch the
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev,
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- entries[i].vector = entry->irq;
- set_irq_msi(entry->irq, entry);
- j = entries[i].entry;
- entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- msix_mask_irq(entry, 1);
- i++;
- }
+ msix_program_entries(dev, entries);
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev,
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
+
+error:
+ if (ret < 0) {
+ /*
+ * If we had some success, report the number of irqs
+ * we succeeded in setting up.
+ */
+ struct msi_desc *entry;
+ int avail = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq != 0)
+ avail++;
+ }
+ if (avail != 0)
+ ret = avail;
+ }
+
+ free_msi_irqs(dev);
+
+ return ret;
}
/**
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev,
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
-static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
+static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
struct pci_bus *bus;
int ret;
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
if (nvec < 1)
return -ERANGE;
- /* Any bridge which does NOT route MSI transactions from it's
- * secondary bus to it's primary bus must set NO_MSI flag on
+ /*
+ * Any bridge which does NOT route MSI transactions from its
+ * secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
* We expect only arch-specific PCI host bus controller driver
* or quirks for specific PCI bridges to be setting NO_MSI.
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev)
dev->irq = desc->msi_attrib.default_irq;
}
-void pci_disable_msi(struct pci_dev* dev)
+void pci_disable_msi(struct pci_dev *dev)
{
- struct msi_desc *entry;
-
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
pci_msi_shutdown(dev);
-
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (entry->msi_attrib.is_msix)
- return;
-
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msi);
-static int msi_free_irqs(struct pci_dev* dev)
-{
- struct msi_desc *entry, *tmp;
-
- list_for_each_entry(entry, &dev->msi_list, list) {
- int i, nvec;
- if (!entry->irq)
- continue;
- nvec = 1 << entry->msi_attrib.multiple;
- for (i = 0; i < nvec; i++)
- BUG_ON(irq_has_action(entry->irq + i));
- }
-
- arch_teardown_msi_irqs(dev);
-
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
- iounmap(entry->mask_base);
- }
- list_del(&entry->list);
- kfree(entry);
- }
-
- return 0;
-}
-
/**
* pci_msix_table_size - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev)
* of irqs or MSI-X vectors available. Driver should use the returned value to
* re-send its request.
**/
-int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
int status, nr_entries;
int i, j;
if (!entries)
- return -EINVAL;
+ return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
if (status)
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
- if (dev->msi_enabled) {
+ if (dev->msi_enabled) {
dev_info(&dev->dev, "can't enable MSI-X "
"(MSI IRQ already assigned)\n");
return -EINVAL;
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
}
EXPORT_SYMBOL(pci_enable_msix);
-static void msix_free_all_irqs(struct pci_dev *dev)
-{
- msi_free_irqs(dev);
-}
-
-void pci_msix_shutdown(struct pci_dev* dev)
+void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev)
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
}
-void pci_disable_msix(struct pci_dev* dev)
+
+void pci_disable_msix(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
pci_msix_shutdown(dev);
-
- msix_free_all_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix);
* allocated for this device function, are reclaimed to unused state,
* which may be used later on.
**/
-void msi_remove_pci_irq_vectors(struct pci_dev* dev)
+void msi_remove_pci_irq_vectors(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev)
- return;
-
- if (dev->msi_enabled)
- msi_free_irqs(dev);
+ return;
- if (dev->msix_enabled)
- msix_free_all_irqs(dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ free_msi_irqs(dev);
}
void pci_no_msi(void)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ea15b053745..33317df4769 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -109,15 +109,32 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
return handle ? acpi_bus_can_wakeup(handle) : false;
}
+static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
+{
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+ int ret;
+
+ ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
+ if (!ret || bridge->is_pcie)
+ return;
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ acpi_pm_device_sleep_wake(bus->bridge, enable);
+}
+
static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
- int error = acpi_pm_device_sleep_wake(&dev->dev, enable);
+ if (acpi_pci_can_wakeup(dev))
+ return acpi_pm_device_sleep_wake(&dev->dev, enable);
- if (!error)
- dev_printk(KERN_INFO, &dev->dev,
- "wake-up capability %s by ACPI\n",
- enable ? "enabled" : "disabled");
- return error;
+ if (!dev->is_pcie)
+ acpi_pci_propagate_wakeup_enable(dev->bus, enable);
+
+ return 0;
}
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a7eb7277b10..e5d47be3c6d 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,37 +19,98 @@
#include <linux/cpu.h>
#include "pci.h"
-/*
- * Dynamic device IDs are disabled for !CONFIG_HOTPLUG
- */
-
struct pci_dynid {
struct list_head node;
struct pci_device_id id;
};
-#ifdef CONFIG_HOTPLUG
+/**
+ * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
+ * @drv: target pci driver
+ * @vendor: PCI vendor ID
+ * @device: PCI device ID
+ * @subvendor: PCI subvendor ID
+ * @subdevice: PCI subdevice ID
+ * @class: PCI class
+ * @class_mask: PCI class mask
+ * @driver_data: private driver data
+ *
+ * Adds a new dynamic pci device ID to this driver and causes the
+ * driver to probe for all devices again. @drv must have been
+ * registered prior to calling this function.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int pci_add_dynid(struct pci_driver *drv,
+ unsigned int vendor, unsigned int device,
+ unsigned int subvendor, unsigned int subdevice,
+ unsigned int class, unsigned int class_mask,
+ unsigned long driver_data)
+{
+ struct pci_dynid *dynid;
+ int retval;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.vendor = vendor;
+ dynid->id.device = device;
+ dynid->id.subvendor = subvendor;
+ dynid->id.subdevice = subdevice;
+ dynid->id.class = class;
+ dynid->id.class_mask = class_mask;
+ dynid->id.driver_data = driver_data;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ get_driver(&drv->driver);
+ retval = driver_attach(&drv->driver);
+ put_driver(&drv->driver);
+
+ return retval;
+}
+
+static void pci_free_dynids(struct pci_driver *drv)
+{
+ struct pci_dynid *dynid, *n;
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/*
+ * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG
+ */
+#ifdef CONFIG_HOTPLUG
/**
- * store_new_id - add a new PCI device ID to this driver and re-probe devices
+ * store_new_id - sysfs frontend to pci_add_dynid()
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
*
- * Adds a new dynamic pci device ID to this driver,
- * and causes the driver to probe for all devices again.
+ * Allow PCI IDs to be added to an existing driver via sysfs.
*/
static ssize_t
store_new_id(struct device_driver *driver, const char *buf, size_t count)
{
- struct pci_dynid *dynid;
struct pci_driver *pdrv = to_pci_driver(driver);
const struct pci_device_id *ids = pdrv->id_table;
__u32 vendor, device, subvendor=PCI_ANY_ID,
subdevice=PCI_ANY_ID, class=0, class_mask=0;
unsigned long driver_data=0;
int fields=0;
- int retval=0;
+ int retval;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
&vendor, &device, &subvendor, &subdevice,
@@ -72,27 +133,8 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
}
- dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
- if (!dynid)
- return -ENOMEM;
-
- dynid->id.vendor = vendor;
- dynid->id.device = device;
- dynid->id.subvendor = subvendor;
- dynid->id.subdevice = subdevice;
- dynid->id.class = class;
- dynid->id.class_mask = class_mask;
- dynid->id.driver_data = driver_data;
-
- spin_lock(&pdrv->dynids.lock);
- list_add_tail(&dynid->node, &pdrv->dynids.list);
- spin_unlock(&pdrv->dynids.lock);
-
- if (get_driver(&pdrv->driver)) {
- retval = driver_attach(&pdrv->driver);
- put_driver(&pdrv->driver);
- }
-
+ retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
+ class, class_mask, driver_data);
if (retval)
return retval;
return count;
@@ -145,19 +187,6 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
-static void
-pci_free_dynids(struct pci_driver *drv)
-{
- struct pci_dynid *dynid, *n;
-
- spin_lock(&drv->dynids.lock);
- list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
- list_del(&dynid->node);
- kfree(dynid);
- }
- spin_unlock(&drv->dynids.lock);
-}
-
static int
pci_create_newid_file(struct pci_driver *drv)
{
@@ -186,7 +215,6 @@ static void pci_remove_removeid_file(struct pci_driver *drv)
driver_remove_file(&drv->driver, &driver_attr_remove_id);
}
#else /* !CONFIG_HOTPLUG */
-static inline void pci_free_dynids(struct pci_driver *drv) {}
static inline int pci_create_newid_file(struct pci_driver *drv)
{
return 0;
@@ -417,8 +445,6 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
- pci_dev->state_saved = false;
-
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -514,7 +540,6 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
pci_restore_standard_config(pci_dev);
- pci_dev->state_saved = false;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
@@ -580,8 +605,6 @@ static int pci_pm_suspend(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
@@ -694,7 +717,7 @@ static int pci_pm_resume(struct device *dev)
pci_pm_reenable_device(pci_dev);
}
- return 0;
+ return error;
}
#else /* !CONFIG_SUSPEND */
@@ -716,8 +739,6 @@ static int pci_pm_freeze(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_FREEZE);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
return 0;
@@ -793,6 +814,8 @@ static int pci_pm_thaw(struct device *dev)
pci_pm_reenable_device(pci_dev);
}
+ pci_dev->state_saved = false;
+
return error;
}
@@ -804,8 +827,6 @@ static int pci_pm_poweroff(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
@@ -1106,6 +1127,7 @@ static int __init pci_driver_init(void)
postcore_initcall(pci_driver_init);
+EXPORT_SYMBOL_GPL(pci_add_dynid);
EXPORT_SYMBOL(pci_match_id);
EXPORT_SYMBOL(__pci_register_driver);
EXPORT_SYMBOL(pci_unregister_driver);
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 74fbec0bf6c..f7b68ca6cc9 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -19,8 +19,16 @@
#include <linux/module.h>
#include <linux/pci.h>
+static char ids[1024] __initdata;
+
+module_param_string(ids, ids, sizeof(ids), 0);
+MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
+ "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\""
+ " and multiple comma separated entries can be specified");
+
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n");
return 0;
}
@@ -32,7 +40,42 @@ static struct pci_driver stub_driver = {
static int __init pci_stub_init(void)
{
- return pci_register_driver(&stub_driver);
+ char *p, *id;
+ int rc;
+
+ rc = pci_register_driver(&stub_driver);
+ if (rc)
+ return rc;
+
+ /* add ids specified in the module parameter */
+ p = ids;
+ while ((id = strsep(&p, ","))) {
+ unsigned int vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class=0, class_mask=0;
+ int fields;
+
+ fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+
+ if (fields < 2) {
+ printk(KERN_WARNING
+ "pci-stub: invalid id string \"%s\"\n", id);
+ continue;
+ }
+
+ printk(KERN_INFO
+ "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
+ vendor, device, subvendor, subdevice, class, class_mask);
+
+ rc = pci_add_dynid(&stub_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+ printk(KERN_WARNING
+ "pci-stub: failed to add dynamic id (%d)\n", rc);
+ }
+
+ return 0;
}
static void __exit pci_stub_exit(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 85ebd02a64a..0f6382f090e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
return 0;
}
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+ ssize_t result = strict_strtoul(buf, 0, &val);
+
+ if (result < 0)
+ return result;
+
+ if (val != 1)
+ return -EINVAL;
+ return pci_reset_function(pdev);
+}
+
+static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
+
static int pci_create_capabilities_sysfs(struct pci_dev *dev)
{
int retval;
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
/* Active State Power Management */
pcie_aspm_create_sysfs_dev_files(dev);
+ if (!pci_probe_reset_function(dev)) {
+ retval = device_create_file(&dev->dev, &reset_attr);
+ if (retval)
+ goto error;
+ dev->reset_fn = 1;
+ }
return 0;
+
+error:
+ pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+
+ return retval;
}
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
}
pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->reset_fn) {
+ device_remove_file(&dev->dev, &reset_attr);
+ dev->reset_fn = 0;
+ }
}
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7b70312181d..6edecff0b41 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,6 +41,12 @@ int pci_domains_supported = 1;
unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
+#define DEFAULT_HOTPLUG_IO_SIZE (256)
+#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
+/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
+unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -848,6 +854,7 @@ pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return 0;
+
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
@@ -869,6 +876,8 @@ pci_restore_state(struct pci_dev *dev)
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);
+ dev->state_saved = false;
+
return 0;
}
@@ -1214,30 +1223,40 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
*/
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
- int error = 0;
- bool pme_done = false;
+ int ret = 0;
if (enable && !device_may_wakeup(&dev->dev))
return -EINVAL;
+ /* Don't do the same thing twice in a row for one device. */
+ if (!!enable == !!dev->wakeup_prepared)
+ return 0;
+
/*
* According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
* Anderson we should be doing PME# wake enable followed by ACPI wake
* enable. To disable wake-up we call the platform first, for symmetry.
*/
- if (!enable && platform_pci_can_wakeup(dev))
- error = platform_pci_sleep_wake(dev, false);
-
- if (!enable || pci_pme_capable(dev, state)) {
- pci_pme_active(dev, enable);
- pme_done = true;
- }
+ if (enable) {
+ int error;
- if (enable && platform_pci_can_wakeup(dev))
+ if (pci_pme_capable(dev, state))
+ pci_pme_active(dev, true);
+ else
+ ret = 1;
error = platform_pci_sleep_wake(dev, true);
+ if (ret)
+ ret = error;
+ if (!ret)
+ dev->wakeup_prepared = true;
+ } else {
+ platform_pci_sleep_wake(dev, false);
+ pci_pme_active(dev, false);
+ dev->wakeup_prepared = false;
+ }
- return pme_done ? 0 : error;
+ return ret;
}
/**
@@ -1356,6 +1375,7 @@ void pci_pm_init(struct pci_dev *dev)
int pm;
u16 pmc;
+ dev->wakeup_prepared = false;
dev->pm_cap = 0;
/* find PCI PM capability in list */
@@ -2262,6 +2282,22 @@ int __pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
+ * pci_probe_reset_function - check whether the device can be safely reset
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * Returns 0 if the device function can be reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_probe_reset_function(struct pci_dev *dev)
+{
+ return pci_dev_reset(dev, 1);
+}
+
+/**
* pci_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
@@ -2504,6 +2540,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return 0;
}
+/**
+ * pci_set_vga_state - set VGA decode state on device and parents if requested
+ * @dev the PCI device
+ * @decode - true = enable decoding, false = disable decoding
+ * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @change_bridge - traverse ancestors and change bridges
+ */
+int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+
+ WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+ else
+ cmd &= ~command_bits;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ if (change_bridge == false)
+ return 0;
+
+ bus = dev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &cmd);
+ if (decode == true)
+ cmd |= PCI_BRIDGE_CTL_VGA;
+ else
+ cmd &= ~PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
+ cmd);
+ }
+ bus = bus->parent;
+ }
+ return 0;
+}
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
@@ -2672,6 +2752,10 @@ static int __init pci_setup(char *str)
strlen(str + 19));
} else if (!strncmp(str, "ecrc=", 5)) {
pcie_ecrc_get_policy(str + 5);
+ } else if (!strncmp(str, "hpiosize=", 9)) {
+ pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmemsize=", 10)) {
+ pci_hotplug_mem_size = memparse(str + 10, &str);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ff4d25bf0e..d92d1954a2f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev);
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
struct vm_area_struct *vma);
#endif
+int pci_probe_reset_function(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -133,7 +134,6 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
-extern int pcie_mch_quirk;
extern struct device_attribute pci_dev_attrs[];
extern struct device_attribute dev_attr_cpuaffinity;
extern struct device_attribute dev_attr_cpulistaffinity;
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index d92ae21a59d..62d15f652bb 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -22,11 +22,10 @@
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "aerdrv.h"
-struct aer_error_inj
-{
+struct aer_error_inj {
u8 bus;
u8 dev;
u8 fn;
@@ -38,8 +37,7 @@ struct aer_error_inj
u32 header_log3;
};
-struct aer_error
-{
+struct aer_error {
struct list_head list;
unsigned int bus;
unsigned int devfn;
@@ -55,8 +53,7 @@ struct aer_error
u32 source_id;
};
-struct pci_bus_ops
-{
+struct pci_bus_ops {
struct list_head list;
struct pci_bus *bus;
struct pci_ops *ops;
@@ -150,7 +147,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
target = &err->header_log1;
break;
case PCI_ERR_HEADER_LOG+8:
- target = &err->header_log2;
+ target = &err->header_log2;
break;
case PCI_ERR_HEADER_LOG+12:
target = &err->header_log3;
@@ -258,8 +255,7 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
bus_ops = NULL;
out:
spin_unlock_irqrestore(&inject_lock, flags);
- if (bus_ops)
- kfree(bus_ops);
+ kfree(bus_ops);
return 0;
}
@@ -401,10 +397,8 @@ static int aer_inject(struct aer_error_inj *einj)
else
ret = -EINVAL;
out_put:
- if (err_alloc)
- kfree(err_alloc);
- if (rperr_alloc)
- kfree(rperr_alloc);
+ kfree(err_alloc);
+ kfree(rperr_alloc);
pci_dev_put(dev);
return ret;
}
@@ -458,8 +452,7 @@ static void __exit aer_inject_exit(void)
}
spin_lock_irqsave(&inject_lock, flags);
- list_for_each_entry_safe(err, err_next,
- &pci_bus_ops_list, list) {
+ list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) {
list_del(&err->list);
kfree(err);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 4770f13b3ca..10c0e62bd5a 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -38,7 +38,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static int __devinit aer_probe (struct pcie_device *dev);
+static int __devinit aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error);
@@ -47,7 +47,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
static struct pci_error_handlers aer_error_handlers = {
.error_detected = aer_error_detected,
- .resume = aer_error_resume,
+ .resume = aer_error_resume,
};
static struct pcie_port_service_driver aerdriver = {
@@ -134,12 +134,12 @@ EXPORT_SYMBOL_GPL(aer_irq);
*
* Invoked when Root Port's AER service is loaded.
**/
-static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
+static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
- if (!(rpc = kzalloc(sizeof(struct aer_rpc),
- GFP_KERNEL)))
+ rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
+ if (!rpc)
return NULL;
/*
@@ -189,26 +189,28 @@ static void aer_remove(struct pcie_device *dev)
*
* Invoked when PCI Express bus loads AER service driver.
**/
-static int __devinit aer_probe (struct pcie_device *dev)
+static int __devinit aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
struct device *device = &dev->device;
/* Init */
- if ((status = aer_init(dev)))
+ status = aer_init(dev);
+ if (status)
return status;
/* Alloc rpc data structure */
- if (!(rpc = aer_alloc_rpc(dev))) {
+ rpc = aer_alloc_rpc(dev);
+ if (!rpc) {
dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
aer_remove(dev);
return -ENOMEM;
}
/* Request IRQ ISR */
- if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv",
- dev))) {
+ status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ if (status) {
dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
aer_remove(dev);
return status;
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bbd7428ca2d..bd833ea3ba4 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -16,12 +16,9 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
-#define AER_UNCORRECTABLE 4
-#define AER_ERROR_MASK 0x001fffff
-#define AER_ERROR(d) (d & AER_ERROR_MASK)
/* Root Error Status Register Bits */
-#define ROOT_ERR_STATUS_MASKS 0x0f
+#define ROOT_ERR_STATUS_MASKS 0x0f
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
@@ -32,8 +29,6 @@
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
-#define AER_SUCCESS 0
-#define AER_UNSUCCESS 1
#define AER_ERROR_SOURCES_MAX 100
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -43,13 +38,6 @@
PCI_ERR_UNC_UNX_COMP| \
PCI_ERR_UNC_MALF_TLP)
-/* AER Error Info Flags */
-#define AER_TLP_HEADER_VALID_FLAG 0x00000001
-#define AER_MULTI_ERROR_VALID_FLAG 0x00000002
-
-#define ERR_CORRECTABLE_ERROR_MASK 0x000031c1
-#define ERR_UNCORRECTABLE_ERROR_MASK 0x001ff010
-
struct header_log_regs {
unsigned int dw0;
unsigned int dw1;
@@ -61,11 +49,20 @@ struct header_log_regs {
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
- u16 id;
- int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
- int flags;
+
+ unsigned int id:16;
+
+ unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
+ unsigned int __pad1:5;
+ unsigned int multi_error_valid:1;
+
+ unsigned int first_error:5;
+ unsigned int __pad2:2;
+ unsigned int tlp_header_valid:1;
+
unsigned int status; /* COR/UNCOR Error Status */
- struct header_log_regs tlp; /* TLP Header */
+ unsigned int mask; /* COR/UNCOR Error Mask */
+ struct header_log_regs tlp; /* TLP Header */
};
struct aer_err_source {
@@ -125,6 +122,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
@@ -136,4 +134,4 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
}
#endif
-#endif //_AERDRV_H_
+#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 3d8872704a5..9f5ccbeb4fa 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -49,10 +49,11 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE;
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
- reg16);
+ pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -68,10 +69,11 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
- reg16);
+ pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+
return 0;
}
+EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -92,6 +94,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
#if 0
int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
@@ -110,7 +113,6 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
}
#endif /* 0 */
-
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -164,8 +166,9 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
e_info->dev[e_info->error_dev_num] = dev;
e_info->error_dev_num++;
return 1;
- } else
- return 0;
+ }
+
+ return 0;
}
@@ -193,7 +196,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* If there is no multiple error, we stop
* or continue based on the id comparing.
*/
- if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ if (!e_info->multi_error_valid)
return result;
/*
@@ -233,24 +236,16 @@ static int find_device_iter(struct pci_dev *dev, void *data)
status = 0;
mask = 0;
if (e_info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev,
- pos + PCI_ERR_COR_STATUS,
- &status);
- pci_read_config_dword(dev,
- pos + PCI_ERR_COR_MASK,
- &mask);
- if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
+ if (status & ~mask) {
add_error_device(e_info, dev);
goto added;
}
} else {
- pci_read_config_dword(dev,
- pos + PCI_ERR_UNCOR_STATUS,
- &status);
- pci_read_config_dword(dev,
- pos + PCI_ERR_UNCOR_MASK,
- &mask);
- if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ if (status & ~mask) {
add_error_device(e_info, dev);
goto added;
}
@@ -259,7 +254,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
return 0;
added:
- if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ if (e_info->multi_error_valid)
return 0;
else
return 1;
@@ -411,8 +406,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
pci_cleanup_aer_uncorrect_error_status(dev);
dev->error_state = pci_channel_io_normal;
}
- }
- else {
+ } else {
/*
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
@@ -473,7 +467,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
udev = dev;
else
- udev= dev->bus->self;
+ udev = dev->bus->self;
data.is_downstream = 0;
data.aer_driver = NULL;
@@ -576,7 +570,7 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pcie_device * aerdev,
+static void handle_error_source(struct pcie_device *aerdev,
struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -682,7 +676,7 @@ static void disable_root_aer(struct aer_rpc *rpc)
*
* Invoked by DPC handler to consume an error.
*/
-static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
+static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
{
struct aer_err_source *e_source;
unsigned long flags;
@@ -702,32 +696,50 @@ static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
return e_source;
}
+/**
+ * get_device_error_info - read error status from dev and store it to info
+ * @dev: pointer to the device expected to have a error record
+ * @info: pointer to structure to store the error record
+ *
+ * Return 1 on success, 0 on error.
+ */
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos;
+ int pos, temp;
+
+ info->status = 0;
+ info->tlp_header_valid = 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
/* The device might not support AER */
if (!pos)
- return AER_SUCCESS;
+ return 1;
if (info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
&info->status);
- if (!(info->status & ERR_CORRECTABLE_ERROR_MASK))
- return AER_UNSUCCESS;
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
} else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
&info->status);
- if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK))
- return AER_UNSUCCESS;
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
+
+ /* Get First Error Pointer */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
+ info->first_error = PCI_ERR_CAP_FEP(temp);
if (info->status & AER_LOG_TLP_MASKS) {
- info->flags |= AER_TLP_HEADER_VALID_FLAG;
+ info->tlp_header_valid = 1;
pci_read_config_dword(dev,
pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
pci_read_config_dword(dev,
@@ -739,7 +751,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
}
}
- return AER_SUCCESS;
+ return 1;
}
static inline void aer_process_err_devices(struct pcie_device *p_device,
@@ -753,14 +765,14 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
e_info->id);
}
+ /* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info) ==
- AER_SUCCESS) {
+ if (get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
- handle_error_source(p_device,
- e_info->dev[i],
- e_info);
- }
+ }
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info))
+ handle_error_source(p_device, e_info->dev[i], e_info);
}
}
@@ -806,7 +818,9 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+ e_info->multi_error_valid = 1;
+
+ aer_print_port_info(p_device->port, e_info);
find_source_device(p_device->port, e_info);
aer_process_err_devices(p_device, e_info);
@@ -863,10 +877,5 @@ int aer_init(struct pcie_device *dev)
if (aer_osc_setup(dev) && !forceload)
return -ENXIO;
- return AER_SUCCESS;
+ return 0;
}
-
-EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
-EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
-EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 0fc29ae80df..44acde72294 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -27,69 +27,70 @@
#define AER_AGENT_COMPLETER 2
#define AER_AGENT_TRANSMITTER 3
-#define AER_AGENT_REQUESTER_MASK (PCI_ERR_UNC_COMP_TIME| \
- PCI_ERR_UNC_UNSUP)
-
-#define AER_AGENT_COMPLETER_MASK PCI_ERR_UNC_COMP_ABORT
-
-#define AER_AGENT_TRANSMITTER_MASK(t, e) (e & (PCI_ERR_COR_REP_ROLL| \
- ((t == AER_CORRECTABLE) ? PCI_ERR_COR_REP_TIMER: 0)))
+#define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
+#define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : PCI_ERR_UNC_COMP_ABORT)
+#define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
#define AER_GET_AGENT(t, e) \
- ((e & AER_AGENT_COMPLETER_MASK) ? AER_AGENT_COMPLETER : \
- (e & AER_AGENT_REQUESTER_MASK) ? AER_AGENT_REQUESTER : \
- (AER_AGENT_TRANSMITTER_MASK(t, e)) ? AER_AGENT_TRANSMITTER : \
+ ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
+ (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
+ (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
AER_AGENT_RECEIVER)
-#define AER_PHYSICAL_LAYER_ERROR_MASK PCI_ERR_COR_RCVR
-#define AER_DATA_LINK_LAYER_ERROR_MASK(t, e) \
- (PCI_ERR_UNC_DLP| \
- PCI_ERR_COR_BAD_TLP| \
- PCI_ERR_COR_BAD_DLLP| \
- PCI_ERR_COR_REP_ROLL| \
- ((t == AER_CORRECTABLE) ? \
- PCI_ERR_COR_REP_TIMER: 0))
-
#define AER_PHYSICAL_LAYER_ERROR 0
#define AER_DATA_LINK_LAYER_ERROR 1
#define AER_TRANSACTION_LAYER_ERROR 2
-#define AER_GET_LAYER_ERROR(t, e) \
- ((e & AER_PHYSICAL_LAYER_ERROR_MASK) ? \
- AER_PHYSICAL_LAYER_ERROR : \
- (e & AER_DATA_LINK_LAYER_ERROR_MASK(t, e)) ? \
- AER_DATA_LINK_LAYER_ERROR : \
- AER_TRANSACTION_LAYER_ERROR)
+#define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ PCI_ERR_COR_RCVR : 0)
+#define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_BAD_TLP| \
+ PCI_ERR_COR_BAD_DLLP| \
+ PCI_ERR_COR_REP_ROLL| \
+ PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
+
+#define AER_GET_LAYER_ERROR(t, e) \
+ ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
+ (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
+ AER_TRANSACTION_LAYER_ERROR)
+
+#define AER_PR(info, pdev, fmt, args...) \
+ printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \
+ KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \
+ dev_name(&pdev->dev), ## args)
/*
* AER error strings
*/
-static char* aer_error_severity_string[] = {
+static char *aer_error_severity_string[] = {
"Uncorrected (Non-Fatal)",
"Uncorrected (Fatal)",
"Corrected"
};
-static char* aer_error_layer[] = {
+static char *aer_error_layer[] = {
"Physical Layer",
"Data Link Layer",
"Transaction Layer"
};
-static char* aer_correctable_error_string[] = {
- "Receiver Error ", /* Bit Position 0 */
+static char *aer_correctable_error_string[] = {
+ "Receiver Error ", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
- "Bad TLP ", /* Bit Position 6 */
- "Bad DLLP ", /* Bit Position 7 */
- "RELAY_NUM Rollover ", /* Bit Position 8 */
+ "Bad TLP ", /* Bit Position 6 */
+ "Bad DLLP ", /* Bit Position 7 */
+ "RELAY_NUM Rollover ", /* Bit Position 8 */
NULL,
NULL,
NULL,
- "Replay Timer Timeout ", /* Bit Position 12 */
- "Advisory Non-Fatal ", /* Bit Position 13 */
+ "Replay Timer Timeout ", /* Bit Position 12 */
+ "Advisory Non-Fatal ", /* Bit Position 13 */
NULL,
NULL,
NULL,
@@ -110,7 +111,7 @@ static char* aer_correctable_error_string[] = {
NULL,
};
-static char* aer_uncorrectable_error_string[] = {
+static char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
@@ -123,10 +124,10 @@ static char* aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
- "Poisoned TLP ", /* Bit Position 12 */
+ "Poisoned TLP ", /* Bit Position 12 */
"Flow Control Protocol ", /* Bit Position 13 */
- "Completion Timeout ", /* Bit Position 14 */
- "Completer Abort ", /* Bit Position 15 */
+ "Completion Timeout ", /* Bit Position 14 */
+ "Completer Abort ", /* Bit Position 15 */
"Unexpected Completion ", /* Bit Position 16 */
"Receiver Overflow ", /* Bit Position 17 */
"Malformed TLP ", /* Bit Position 18 */
@@ -145,98 +146,69 @@ static char* aer_uncorrectable_error_string[] = {
NULL,
};
-static char* aer_agent_string[] = {
+static char *aer_agent_string[] = {
"Receiver ID",
"Requester ID",
"Completer ID",
"Transmitter ID"
};
-static char * aer_get_error_source_name(int severity,
- unsigned int status,
- char errmsg_buff[])
+static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
{
- int i;
- char * errmsg = NULL;
+ int i, status;
+ char *errmsg = NULL;
+
+ status = (info->status & ~info->mask);
for (i = 0; i < 32; i++) {
if (!(status & (1 << i)))
continue;
- if (severity == AER_CORRECTABLE)
+ if (info->severity == AER_CORRECTABLE)
errmsg = aer_correctable_error_string[i];
else
errmsg = aer_uncorrectable_error_string[i];
- if (!errmsg) {
- sprintf(errmsg_buff, "Unknown Error Bit %2d ", i);
- errmsg = errmsg_buff;
- }
-
- break;
+ if (errmsg)
+ AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg,
+ info->first_error == i ? " (First)" : "");
+ else
+ AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i,
+ info->first_error == i ? " (First)" : "");
}
-
- return errmsg;
}
-static DEFINE_SPINLOCK(logbuf_lock);
-static char errmsg_buff[100];
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
- char * errmsg;
- int err_layer, agent;
- char * loglevel;
-
- if (info->severity == AER_CORRECTABLE)
- loglevel = KERN_WARNING;
- else
- loglevel = KERN_ERR;
-
- printk("%s+------ PCI-Express Device Error ------+\n", loglevel);
- printk("%sError Severity\t\t: %s\n", loglevel,
- aer_error_severity_string[info->severity]);
-
- if ( info->status == 0) {
- printk("%sPCIE Bus Error type\t: (Unaccessible)\n", loglevel);
- printk("%sUnaccessible Received\t: %s\n", loglevel,
- info->flags & AER_MULTI_ERROR_VALID_FLAG ?
- "Multiple" : "First");
- printk("%sUnregistered Agent ID\t: %04x\n", loglevel,
- (dev->bus->number << 8) | dev->devfn);
+ int id = ((dev->bus->number << 8) | dev->devfn);
+
+ if (info->status == 0) {
+ AER_PR(info, dev,
+ "PCIE Bus Error: severity=%s, type=Unaccessible, "
+ "id=%04x(Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity], id);
} else {
- err_layer = AER_GET_LAYER_ERROR(info->severity, info->status);
- printk("%sPCIE Bus Error type\t: %s\n", loglevel,
- aer_error_layer[err_layer]);
-
- spin_lock(&logbuf_lock);
- errmsg = aer_get_error_source_name(info->severity,
- info->status,
- errmsg_buff);
- printk("%s%s\t: %s\n", loglevel, errmsg,
- info->flags & AER_MULTI_ERROR_VALID_FLAG ?
- "Multiple" : "First");
- spin_unlock(&logbuf_lock);
+ int layer, agent;
+ layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- printk("%s%s\t\t: %04x\n", loglevel,
- aer_agent_string[agent],
- (dev->bus->number << 8) | dev->devfn);
-
- printk("%sVendorID=%04xh, DeviceID=%04xh,"
- " Bus=%02xh, Device=%02xh, Function=%02xh\n",
- loglevel,
- dev->vendor,
- dev->device,
- dev->bus->number,
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
- if (info->flags & AER_TLP_HEADER_VALID_FLAG) {
+
+ AER_PR(info, dev,
+ "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], id, aer_agent_string[agent]);
+
+ AER_PR(info, dev,
+ " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device, info->status, info->mask);
+
+ __aer_print_error(info, dev);
+
+ if (info->tlp_header_valid) {
unsigned char *tlp = (unsigned char *) &info->tlp;
- printk("%sTLP Header:\n", loglevel);
- printk("%s%02x%02x%02x%02x %02x%02x%02x%02x"
+ AER_PR(info, dev, " TLP Header:"
+ " %02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
- loglevel,
*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
*(tlp + 11), *(tlp + 10), *(tlp + 9),
@@ -244,5 +216,15 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
*(tlp + 13), *(tlp + 12));
}
}
+
+ if (info->id && info->error_dev_num > 1 && info->id == id)
+ AER_PR(info, dev,
+ " Error of this Agent(%04x) is reported first\n", id);
}
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
+{
+ dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity], info->id);
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3d27c97e048..f289ca9bf18 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,6 +26,13 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
+/* Note: those are not register definitions */
+#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+#define ASPM_STATE_L1 (4) /* L1 state */
+#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
struct aspm_latency {
u32 l0s; /* L0s latency (nsec) */
u32 l1; /* L1 latency (nsec) */
@@ -40,17 +47,20 @@ struct pcie_link_state {
struct list_head link; /* node in parent's children list */
/* ASPM state */
- u32 aspm_support:2; /* Supported ASPM state */
- u32 aspm_enabled:2; /* Enabled ASPM state */
- u32 aspm_default:2; /* Default ASPM state by BIOS */
+ u32 aspm_support:3; /* Supported ASPM state */
+ u32 aspm_enabled:3; /* Enabled ASPM state */
+ u32 aspm_capable:3; /* Capable ASPM state with latency */
+ u32 aspm_default:3; /* Default ASPM state by BIOS */
+ u32 aspm_disable:3; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
- /* Latencies */
- struct aspm_latency latency; /* Exit latency */
+ /* Exit latencies */
+ struct aspm_latency latency_up; /* Upstream direction exit latency */
+ struct aspm_latency latency_dw; /* Downstream direction exit latency */
/*
* Endpoint acceptable latencies. A pcie downstream port only
* has one slot under it, so at most there are 8 functions.
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return ASPM_STATE_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
-{
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
- return true;
- }
- return false;
-}
-
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -288,71 +286,133 @@ static u32 calc_l1_acceptable(u32 encoding)
return (1000 << encoding);
}
-static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- u32 *l0s, u32 *l1, u32 *enabled)
+struct aspm_register_info {
+ u32 support:2;
+ u32 enabled:2;
+ u32 latency_encoding_l0s;
+ u32 latency_encoding_l1;
+};
+
+static void pcie_get_aspm_reg(struct pci_dev *pdev,
+ struct aspm_register_info *info)
{
int pos;
u16 reg16;
- u32 reg32, encoding;
+ u32 reg32;
- *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
- *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
- *state = 0;
- if (*state == 0)
+ info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
+ /* 00b and 10b are defined as "Reserved". */
+ if (info->support == PCIE_LINK_STATE_L1)
+ info->support = 0;
+ info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
+}
+
+static void pcie_aspm_check_latency(struct pci_dev *endpoint)
+{
+ u32 latency, l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ /* Device not in D0 doesn't need latency check */
+ if ((endpoint->current_state != PCI_D0) &&
+ (endpoint->current_state != PCI_UNKNOWN))
return;
- encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_l0s_latency(encoding);
- if (*state & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_l1_latency(encoding);
+ link = endpoint->bus->self->link_state;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link) {
+ /* Check upstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ (link->latency_up.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+
+ /* Check downstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ (link->latency_dw.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ /*
+ * Check L1 latency.
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ if ((link->aspm_capable & ASPM_STATE_L1) &&
+ (latency + l1_switch_latency > acceptable->l1))
+ link->aspm_capable &= ~ASPM_STATE_L1;
+ l1_switch_latency += 1000;
+
+ link = link->parent;
}
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- u32 support, l0s, l1, enabled;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
+ struct aspm_register_info upreg, dwreg;
if (blacklist) {
- /* Set support state to 0, so we will disable ASPM later */
- link->aspm_support = 0;
- link->aspm_default = 0;
- link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ /* Set enabled/disable so that we will disable ASPM later */
+ link->aspm_enabled = ASPM_STATE_ALL;
+ link->aspm_disable = ASPM_STATE_ALL;
return;
}
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* upstream component states */
- pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
- link->aspm_support = support;
- link->latency.l0s = l0s;
- link->latency.l1 = l1;
- link->aspm_enabled = enabled;
-
- /* downstream component states, all functions have the same setting */
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
- pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
- link->aspm_support &= support;
- link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
- link->latency.l1 = max_t(u32, link->latency.l1, l1);
+ pcie_get_aspm_reg(child, &dwreg);
- if (!link->aspm_support)
- return;
-
- link->aspm_enabled &= link->aspm_support;
+ /*
+ * Setup L0s state
+ *
+ * Note that we must not enable L0s in either direction on a
+ * given link unless components on both sides of the link each
+ * support L0s.
+ */
+ if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ link->aspm_support |= ASPM_STATE_L0S;
+ if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
+ link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+
+ /* Setup L1 state */
+ if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ link->aspm_support |= ASPM_STATE_L1;
+ if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+ link->aspm_enabled |= ASPM_STATE_L1;
+ link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
+ link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+
+ /* Save default state */
link->aspm_default = link->aspm_enabled;
- /* ENDPOINT states*/
+ /* Setup initial capable state. Will be updated later */
+ link->aspm_capable = link->aspm_support;
+ /*
+ * If the downstream component has pci bridge function, don't
+ * do ASPM for now.
+ */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+ link->aspm_disable = ASPM_STATE_ALL;
+ break;
+ }
+ }
+
+ /* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
u32 reg32, encoding;
@@ -365,109 +425,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
pos = pci_find_capability(child, PCI_CAP_ID_EXP);
pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ /* Calculate endpoint L0s acceptable latency */
encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
acceptable->l0s = calc_l0s_acceptable(encoding);
- if (link->aspm_support & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- acceptable->l1 = calc_l1_acceptable(encoding);
- }
- }
-}
-
-/**
- * __pcie_aspm_check_state_one - check latency for endpoint device.
- * @endpoint: pointer to the struct pci_dev of endpoint device
- *
- * TBD: The latency from the endpoint to root complex vary per switch's
- * upstream link state above the device. Here we just do a simple check
- * which assumes all links above the device can be in L1 state, that
- * is we just consider the worst case. If switch's upstream link can't
- * be put into L0S/L1, then our check is too strictly.
- */
-static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
-{
- u32 l1_switch_latency = 0;
- struct aspm_latency *acceptable;
- struct pcie_link_state *link;
-
- link = endpoint->bus->self->link_state;
- state &= link->aspm_support;
- acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
- while (link && state) {
- if ((state & PCIE_LINK_STATE_L0S) &&
- (link->latency.l0s > acceptable->l0s))
- state &= ~PCIE_LINK_STATE_L0S;
- if ((state & PCIE_LINK_STATE_L1) &&
- (link->latency.l1 + l1_switch_latency > acceptable->l1))
- state &= ~PCIE_LINK_STATE_L1;
- link = link->parent;
- /*
- * Every switch on the path to root complex need 1
- * more microsecond for L1. Spec doesn't mention L0s.
- */
- l1_switch_latency += 1000;
- }
- return state;
-}
-
-static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
-{
- pci_power_t power_state;
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- /* If no child, ignore the link */
- if (list_empty(&linkbus->devices))
- return state;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- */
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child->pcie_type != PCI_EXP_TYPE_LEG_END))
- continue;
- /* Device not in D0 doesn't need check latency */
- power_state = child->current_state;
- if (power_state == PCI_D1 || power_state == PCI_D2 ||
- power_state == PCI_D3hot || power_state == PCI_D3cold)
- continue;
- state = __pcie_aspm_check_state_one(child, state);
+ pcie_aspm_check_latency(child);
}
- return state;
}
-static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
+static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
u16 reg16;
int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~0x3;
- reg16 |= state;
+ reg16 |= val;
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
+static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
{
+ u32 upstream = 0, dwstream = 0;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
- /* If no child, disable the link */
- if (list_empty(&linkbus->devices))
- state = 0;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return;
+ /* Nothing to do if the link is already in the requested state */
+ state &= (link->aspm_capable & ~link->aspm_disable);
+ if (link->aspm_enabled == state)
+ return;
+ /* Convert ASPM state to upstream/downstream ASPM register state */
+ if (state & ASPM_STATE_L0S_UP)
+ dwstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L0S_DW)
+ upstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L1) {
+ upstream |= PCIE_LINK_STATE_L1;
+ dwstream |= PCIE_LINK_STATE_L1;
}
/*
* Spec 2.0 suggests all functions should be configured the
@@ -475,67 +472,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
- if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(parent, state);
-
+ if (state & ASPM_STATE_L1)
+ pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
- __pcie_aspm_config_one_dev(child, state);
-
- if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(parent, state);
+ pcie_config_aspm_dev(child, dwstream);
+ if (!(state & ASPM_STATE_L1))
+ pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
}
-/* Check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
+static void pcie_config_aspm_path(struct pcie_link_state *link)
{
- struct pcie_link_state *leaf, *root = link->root;
-
- state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
-
- /* Check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibling) {
- if (!list_empty(&leaf->children) || (leaf->root != root))
- continue;
- state = pcie_aspm_check_state(leaf, state);
- }
- /* Check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root, state);
- if (link->aspm_enabled == state)
- return;
- /*
- * We must change the hierarchy. See comments in
- * __pcie_aspm_config_link for the order
- **/
- if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
- } else {
- list_for_each_entry_reverse(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
+ while (link) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ link = link->parent;
}
}
-/*
- * pcie_aspm_configure_link_state: enable/disable PCI express link state
- * @pdev: the root port or switch downstream port
- */
-static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
-{
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(link, state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
static void free_link_state(struct pcie_link_state *link)
{
link->pdev->link_state = NULL;
@@ -570,10 +524,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
-static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
{
struct pcie_link_state *link;
- int blacklist = !!pcie_aspm_sanity_check(pdev);
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
@@ -599,15 +552,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
link->root = link->parent->root;
list_add(&link->sibling, &link_list);
-
pdev->link_state = link;
-
- /* Check ASPM capability */
- pcie_aspm_cap_init(link, blacklist);
-
- /* Check Clock PM capability */
- pcie_clkpm_cap_init(link, blacklist);
-
return link;
}
@@ -618,8 +563,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- u32 state;
struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
@@ -637,47 +582,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
goto out;
mutex_lock(&aspm_lock);
- link = pcie_aspm_setup_link_state(pdev);
+ link = alloc_pcie_link_state(pdev);
if (!link)
goto unlock;
/*
- * Setup initial ASPM state
- *
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. But we must
- * make sure BIOS doesn't set unsupported link state.
+ * Setup initial ASPM state. Note that we need to configure
+ * upstream links also because capable state of them can be
+ * update through pcie_aspm_cap_init().
*/
- if (pcie_aspm_downstream_has_switch(link)) {
- state = pcie_aspm_check_state(link, link->aspm_default);
- __pcie_aspm_config_link(link, state);
- } else {
- state = policy_to_aspm_state(link);
- __pcie_aspm_configure_link_state(link, state);
- }
+ pcie_aspm_cap_init(link, blacklist);
+ pcie_config_aspm_path(link);
/* Setup initial Clock PM state */
- state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
- pcie_set_clkpm(link, state);
+ pcie_clkpm_cap_init(link, blacklist);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
}
+/* Recheck latencies and update aspm_capable for links under the root */
+static void pcie_update_aspm_capable(struct pcie_link_state *root)
+{
+ struct pcie_link_state *link;
+ BUG_ON(root->parent);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ link->aspm_capable = link->aspm_support;
+ }
+ list_for_each_entry(link, &link_list, sibling) {
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+ if (link->root != root)
+ continue;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) &&
+ (child->pcie_type != PCI_EXP_TYPE_LEG_END))
+ continue;
+ pcie_aspm_check_latency(child);
+ }
+ }
+}
+
/* @pdev: the endpoint device */
void pcie_aspm_exit_link_state(struct pci_dev *pdev)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state = parent->link_state;
+ struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pdev->is_pcie || !parent || !link_state)
+ if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
return;
- if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
+
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
-
/*
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
@@ -685,13 +647,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
goto out;
+ link = parent->link_state;
+ root = link->root;
+ parent_link = link->parent;
+
/* All functions are removed, so just disable ASPM for the link */
- __pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibling);
- list_del(&link_state->link);
+ pcie_config_aspm_link(link, 0);
+ list_del(&link->sibling);
+ list_del(&link->link);
/* Clock PM is for endpoint device */
+ free_link_state(link);
- free_link_state(link_state);
+ /* Recheck latencies and configure upstream links */
+ pcie_update_aspm_capable(root);
+ pcie_config_aspm_path(parent_link);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -700,18 +669,23 @@ out:
/* @pdev: the root port or switch downstream port */
void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pdev->is_pcie || !pdev->link_state)
+ if (aspm_disabled || !pdev->is_pcie || !link)
return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
/*
- * devices changed PM state, we should recheck if latency meets all
- * functions' requirement
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
*/
- pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
}
/*
@@ -721,7 +695,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie)
return;
@@ -733,12 +707,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link_state = parent->link_state;
- link_state->aspm_support &= ~state;
- __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ link = parent->link_state;
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= ASPM_STATE_L1;
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
if (state & PCIE_LINK_STATE_CLKPM) {
- link_state->clkpm_capable = 0;
- pcie_set_clkpm(link_state, 0);
+ link->clkpm_capable = 0;
+ pcie_set_clkpm(link, 0);
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -748,7 +726,7 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
@@ -761,10 +739,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibling) {
- __pcie_aspm_configure_link_state(link_state,
- policy_to_aspm_state(link_state));
- pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
+ list_for_each_entry(link, &link_list, sibling) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -802,18 +779,28 @@ static ssize_t link_state_store(struct device *dev,
size_t n)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int state;
+ struct pcie_link_state *link, *root = pdev->link_state->root;
+ u32 val = buf[0] - '0', state = 0;
- if (n < 1)
+ if (n < 1 || val > 3)
return -EINVAL;
- state = buf[0]-'0';
- if (state >= 0 && state <= 3) {
- /* setup link aspm state */
- pcie_aspm_configure_link_state(pdev->link_state, state);
- return n;
- }
- return -EINVAL;
+ /* Convert requested state to ASPM state */
+ if (val & PCIE_LINK_STATE_L0S)
+ state |= ASPM_STATE_L0S;
+ if (val & PCIE_LINK_STATE_L1)
+ state |= ASPM_STATE_L1;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ pcie_config_aspm_link(link, state);
+ }
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ return n;
}
static ssize_t clk_ctl_show(struct device *dev,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 13ffdc35ea0..52f84fca9f7 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -187,14 +187,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
*/
static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
{
- struct pcie_port_data *port_data = pci_get_drvdata(dev);
int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
int i;
- /* Check MSI quirk */
- if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
- goto Fallback;
-
/* Try to use MSI-X if supported */
if (!pcie_port_enable_msix(dev, vectors, mask))
return PCIE_PORT_MSIX_MODE;
@@ -203,7 +198,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (!pci_enable_msi(dev))
interrupt_mode = PCIE_PORT_MSI_MODE;
- Fallback:
if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
interrupt_mode = PCIE_PORT_INTx_MODE;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 091ce70051e..6df5c984a79 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -205,6 +205,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
+ dev->state_saved = true;
pci_restore_state(dev);
pcie_portdrv_restore_config(dev);
pci_enable_pcie_error_reporting(dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 40e75f6a505..8105e32117f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->start = l64;
res->end = l64 + sz64;
dev_printk(KERN_DEBUG, &dev->dev,
- "reg %x 64bit mmio: %pR\n", pos, res);
+ "reg %x %s: %pR\n", pos,
+ (res->flags & IORESOURCE_PREFETCH) ?
+ "64bit mmio pref" : "64bit mmio",
+ res);
}
res->flags |= IORESOURCE_MEM_64;
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
- (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
+ (res->flags & IORESOURCE_IO) ? "io port" :
+ ((res->flags & IORESOURCE_PREFETCH) ?
+ "32bit mmio pref" : "32bit mmio"),
res);
}
@@ -692,6 +697,23 @@ static void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
}
+static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+ u32 reg32;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ if (!(reg16 & PCI_EXP_FLAGS_SLOT))
+ return;
+ pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
+ if (reg32 & PCI_EXP_SLTCAP_HPC)
+ pdev->is_hotplug_bridge = 1;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -799,6 +821,7 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ set_pcie_hotplug_bridge(dev);
break;
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
@@ -1009,6 +1032,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
+ /* Clear the state_saved flag. */
+ dev->state_saved = false;
+
/* Initialize various capabilities */
pci_init_capabilities(dev);
@@ -1061,8 +1087,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (dev && !dev->is_added) /* new device? */
nr++;
- if ((dev && dev->multifunction) ||
- (!dev && pcibios_scan_all_fns(bus, devfn))) {
+ if (dev && dev->multifunction) {
for (fn = 1; fn < 8; fn++) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 85ce23997be..6099facecd7 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -31,8 +31,6 @@ int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
-int pcie_mch_quirk;
-EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_PCI_QUIRKS
/*
@@ -1203,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
+ case 0x00ba: /* Compaq Evo D510 USDT */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs and on-board VGA
* controller is disabled if an AGP card is
@@ -1501,7 +1500,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a
static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
{
- pcie_mch_quirk = 1;
+ pci_msi_off(pdev);
+ pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
@@ -1569,10 +1569,8 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
return;
dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
-
- printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n",
- dev->vendor, dev->device);
- return;
+ dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
@@ -1614,8 +1612,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
- printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n",
- dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
@@ -1647,8 +1645,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
- printk(KERN_INFO "disabled boot interrupts on PCI device"
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
@@ -1678,8 +1676,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
pci_config_dword &= ~AMD_813X_NOIOAMODE;
pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
- printk(KERN_INFO "disabled boot interrupts on PCI device "
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
@@ -1695,14 +1693,13 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
- printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x "
- "already disabled\n",
- dev->vendor, dev->device);
+ dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
+ "already disabled\n", dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
- printk(KERN_INFO "disabled boot interrupts on PCI device "
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
@@ -2384,8 +2381,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
@@ -2494,6 +2493,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index e8cb5051c31..ec415352d9b 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from)
return b;
}
-#ifdef CONFIG_PCI_LEGACY
-/**
- * pci_find_device - begin or continue searching for a PCI device by vendor/device id
- * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
- * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
- * @from: Previous PCI device found in search, or %NULL for new search.
- *
- * Iterates through the list of known PCI devices. If a PCI device is found
- * with a matching @vendor and @device, a pointer to its device structure is
- * returned. Otherwise, %NULL is returned.
- * A new search is initiated by passing %NULL as the @from argument.
- * Otherwise if @from is not %NULL, searches continue from next device
- * on the global list.
- *
- * NOTE: Do not use this function any more; use pci_get_device() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from)
-{
- struct pci_dev *pdev;
-
- pci_dev_get(from);
- pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
- pci_dev_put(pdev);
- return pdev;
-}
-EXPORT_SYMBOL(pci_find_device);
-#endif /* CONFIG_PCI_LEGACY */
-
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7c443b4583a..cb1a027eb55 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -309,7 +309,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
since these windows have 4K granularity and the IO ranges
of non-bridge PCI devices are limited to 256 bytes.
We must be careful with the ISA aliasing though. */
-static void pbus_size_io(struct pci_bus *bus)
+static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
@@ -336,6 +336,8 @@ static void pbus_size_io(struct pci_bus *bus)
size1 += r_size;
}
}
+ if (size < min_size)
+ size = min_size;
/* To be fixed in 2.5: we should have sort of HAVE_ISA
flag in the struct pci_bus. */
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
@@ -354,7 +356,8 @@ static void pbus_size_io(struct pci_bus *bus)
/* Calculate the size of the bus and minimal alignment which
guarantees that all child resources fit in this size. */
-static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
+static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ unsigned long type, resource_size_t min_size)
{
struct pci_dev *dev;
resource_size_t min_align, align, size;
@@ -404,6 +407,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
+ if (size < min_size)
+ size = min_size;
align = 0;
min_align = 0;
@@ -483,6 +488,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
+ resource_size_t min_mem_size = 0, min_io_size = 0;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -512,8 +518,12 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
case PCI_CLASS_BRIDGE_PCI:
pci_bridge_check_ranges(bus);
+ if (bus->self->is_hotplug_bridge) {
+ min_io_size = pci_hotplug_io_size;
+ min_mem_size = pci_hotplug_mem_size;
+ }
default:
- pbus_size_io(bus);
+ pbus_size_io(bus, min_io_size);
/* If the bridge supports prefetchable range, size it
separately. If it doesn't, or its prefetchable window
has already been allocated by arch code, try
@@ -521,9 +531,11 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
resources. */
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask))
+ if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
mask = prefmask; /* Success, size non-prefetch only. */
- pbus_size_mem(bus, mask, IORESOURCE_MEM);
+ else
+ min_mem_size += min_mem_size;
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
break;
}
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 88cdd1a937d..706f82d8111 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return err;
}
+EXPORT_SYMBOL(pci_claim_resource);
#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 3ecd7c99d8e..737fe5d87c4 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -622,11 +622,12 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end)
{
- struct resource *root, *res;
+ struct pci_dev *dev = socket->dev;
+ struct resource *res;
struct pci_bus_region region;
unsigned mask;
- res = socket->dev->resource + PCI_BRIDGE_RESOURCES + nr;
+ res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
/* Already allocated? */
if (res->parent)
return 0;
@@ -636,17 +637,16 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
if (type & IORESOURCE_IO)
mask = ~3;
- res->name = socket->dev->subordinate->name;
+ res->name = dev->subordinate->name;
res->flags = type;
region.start = config_readl(socket, addr_start) & mask;
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
- pcibios_bus_to_resource(socket->dev, res, &region);
- root = pci_find_parent_resource(socket->dev, res);
- if (root && (request_resource(root, res) == 0))
+ pcibios_bus_to_resource(dev, res, &region);
+ if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
return 0;
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"Preassigned resource %d busy or not available, "
"reconfiguring...\n",
nr);
@@ -672,7 +672,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
return 1;
}
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"no resource of type %x available, trying to continue...\n",
type);
res->start = res->end = res->flags = 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1999b183481..cef3e1d9b92 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -7,6 +7,8 @@ menu "Graphics support"
source "drivers/char/agp/Kconfig"
+source "drivers/gpu/vga/Kconfig"
+
source "drivers/gpu/drm/Kconfig"
config VGASTATE