summaryrefslogtreecommitdiffstats
path: root/drivers/pci/access.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2011-11-04 09:45:59 +0100
committerJesse Barnes <jbarnes@virtuousgeek.org>2012-01-06 12:10:33 -0800
commitfb51ccbf217c1c994607b6519c7d85250928553d (patch)
treed08ba9a0278da0e75b6c6714e9453e46068e27b4 /drivers/pci/access.c
parentae5cd86455381282ece162966183d3f208c6fad7 (diff)
PCI: Rework config space blocking services
pci_block_user_cfg_access was designed for the use case that a single context, the IPR driver, temporarily delays user space accesses to the config space via sysfs. This assumption became invalid by the time pci_dev_reset was added as locking instance. Today, if you run two loops in parallel that reset the same device via sysfs, you end up with a kernel BUG as pci_block_user_cfg_access detect the broken assumption. This reworks the pci_block_user_cfg_access to a sleeping service pci_cfg_access_lock and an atomic-compatible variant called pci_cfg_access_trylock. The former not only blocks user space access as before but also waits if access was already locked. The latter service just returns false in this case, allowing the caller to resolve the conflict instead of raising a BUG. Adaptions of the ipr driver were originally written by Brian King. Acked-by: Brian King <brking@linux.vnet.ibm.com> Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/pci/access.c')
-rw-r--r--drivers/pci/access.c74
1 files changed, 48 insertions, 26 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42aac7c..0c4c71712df 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -127,20 +127,20 @@ EXPORT_SYMBOL(pci_write_vpd);
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
-static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
-static noinline void pci_wait_ucfg(struct pci_dev *dev)
+static noinline void pci_wait_cfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue(&pci_ucfg_wait, &wait);
+ __add_wait_queue(&pci_cfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
schedule();
raw_spin_lock_irq(&pci_lock);
- } while (dev->block_ucfg_access);
- __remove_wait_queue(&pci_ucfg_wait, &wait);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
@@ -153,7 +153,8 @@ int pci_user_read_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
@@ -172,7 +173,8 @@ int pci_user_write_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
@@ -401,36 +403,56 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
EXPORT_SYMBOL(pci_vpd_truncate);
/**
- * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+ * pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
*
- * When user access is blocked, any reads or writes to config space will
- * sleep until access is unblocked again. We don't allow nesting of
- * block/unblock calls.
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
*/
-void pci_block_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_lock(struct pci_dev *dev)
+{
+ might_sleep();
+
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
+
+/**
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
+ */
+bool pci_cfg_access_trylock(struct pci_dev *dev)
{
unsigned long flags;
- int was_blocked;
+ bool locked = true;
raw_spin_lock_irqsave(&pci_lock, flags);
- was_blocked = dev->block_ucfg_access;
- dev->block_ucfg_access = 1;
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
raw_spin_unlock_irqrestore(&pci_lock, flags);
- /* If we BUG() inside the pci_lock, we're guaranteed to hose
- * the machine */
- BUG_ON(was_blocked);
+ return locked;
}
-EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
/**
- * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
* @dev: pci device struct
*
- * This function allows userspace PCI config accesses to resume.
+ * This function allows PCI config accesses to resume.
*/
-void pci_unblock_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_unlock(struct pci_dev *dev)
{
unsigned long flags;
@@ -438,10 +460,10 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
- WARN_ON(!dev->block_ucfg_access);
+ WARN_ON(!dev->block_cfg_access);
- dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
-EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);