summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorPaul Fulghum <paulkf@microgate.com>2010-10-27 15:34:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 18:03:14 -0700
commitffd7d6baa65e6161cfd996a59d55c48571c2a5f3 (patch)
treed54651c55afcdc57a23b6aae48782b616219f2e3 /drivers/char
parentdcade5ed16cce572e375bf4e63dd2150c351bf49 (diff)
synclink_gt: fix per device locking
Fix a long standing bug with per device locking. Each device has an associated spinlock for synchronizing access to hardware and state information with the ISR. A single hardware card has one or more devices. Bug: Non ISR code correctly acquires and releases the per device lock. ISR incorrectly always acquires and releases the lock of the first device on the card. The decoupled and list based nature of the ISR and deferred processing interaction allowed this to work in normal operation. Exceptional events like an application forcing hardware shutdown, reset, or reconfiguration while active can trigger the bug. Fixed ISR to acquire and release the per device lock. One exception is manipulation of the GPIO card resource which is global and effectively owned by the first device of the card. Non-ISR access to GPIO resource is changed to use lock of first device on card. Signed-off-by: Paul Fulghum <paulkf@microgate.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/synclink_gt.c28
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 1746d91205f..11999784383 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -2357,26 +2357,27 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
- spin_lock(&info->lock);
-
while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
info->irq_occurred = true;
for(i=0; i < info->port_count ; i++) {
if (info->port_array[i] == NULL)
continue;
+ spin_lock(&info->port_array[i]->lock);
if (gsr & (BIT8 << i))
isr_serial(info->port_array[i]);
if (gsr & (BIT16 << (i*2)))
isr_rdma(info->port_array[i]);
if (gsr & (BIT17 << (i*2)))
isr_tdma(info->port_array[i]);
+ spin_unlock(&info->port_array[i]->lock);
}
}
if (info->gpio_present) {
unsigned int state;
unsigned int changed;
+ spin_lock(&info->lock);
while ((changed = rd_reg32(info, IOSR)) != 0) {
DBGISR(("%s iosr=%08x\n", info->device_name, changed));
/* read latched state of GPIO signals */
@@ -2388,22 +2389,24 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
isr_gpio(info->port_array[i], changed, state);
}
}
+ spin_unlock(&info->lock);
}
for(i=0; i < info->port_count ; i++) {
struct slgt_info *port = info->port_array[i];
-
- if (port && (port->port.count || port->netcount) &&
+ if (port == NULL)
+ continue;
+ spin_lock(&port->lock);
+ if ((port->port.count || port->netcount) &&
port->pending_bh && !port->bh_running &&
!port->bh_requested) {
DBGISR(("%s bh queued\n", port->device_name));
schedule_work(&port->task);
port->bh_requested = true;
}
+ spin_unlock(&port->lock);
}
- spin_unlock(&info->lock);
-
DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
return IRQ_HANDLED;
}
@@ -2906,7 +2909,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
info->device_name, gpio.state, gpio.smask,
gpio.dir, gpio.dmask));
- spin_lock_irqsave(&info->lock,flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
if (gpio.dmask) {
data = rd_reg32(info, IODR);
data |= gpio.dmask & gpio.dir;
@@ -2919,7 +2922,7 @@ static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
data &= ~(gpio.smask & ~gpio.state);
wr_reg32(info, IOVR, data);
}
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
return 0;
}
@@ -3020,7 +3023,7 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
return -EINVAL;
init_cond_wait(&wait, gpio.smask);
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
/* enable interrupts for watched pins */
wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
/* get current pin states */
@@ -3032,20 +3035,20 @@ static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
} else {
/* wait for target state */
add_cond_wait(&info->gpio_wait_q, &wait);
- spin_unlock_irqrestore(&info->lock, flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
schedule();
if (signal_pending(current))
rc = -ERESTARTSYS;
else
gpio.state = wait.data;
- spin_lock_irqsave(&info->lock, flags);
+ spin_lock_irqsave(&info->port_array[0]->lock, flags);
remove_cond_wait(&info->gpio_wait_q, &wait);
}
/* disable all GPIO interrupts if no waiting processes */
if (info->gpio_wait_q == NULL)
wr_reg32(info, IOER, 0);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_unlock_irqrestore(&info->port_array[0]->lock, flags);
if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
rc = -EFAULT;
@@ -3578,7 +3581,6 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
/* copy resource information from first port to others */
for (i = 1; i < port_count; ++i) {
- port_array[i]->lock = port_array[0]->lock;
port_array[i]->irq_level = port_array[0]->irq_level;
port_array[i]->reg_addr = port_array[0]->reg_addr;
alloc_dma_bufs(port_array[i]);