summaryrefslogtreecommitdiffstats
path: root/drivers/net/bonding/bond_sysfs.c
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2010-10-13 16:01:50 +0000
committerDavid S. Miller <davem@davemloft.net>2010-10-18 08:32:07 -0700
commite843fa50887582c867d8b7995f81fe9c1a076806 (patch)
tree5a8e34f5afaeeed539d519b0e91b7a4c92268b11 /drivers/net/bonding/bond_sysfs.c
parentc2355e1ab910278a94d487b78590ee3c8eecd08a (diff)
bonding: Fix deadlock in bonding driver resulting from internal locking when using netpoll
The monitoring paths in the bonding driver take write locks that are shared by the tx path. If netconsole is in use, these paths can call printk which puts us in the netpoll tx path, which, if netconsole is attached to the bonding driver, result in deadlock (the xmit_lock guards are useless in netpoll_send_skb, as the monitor paths in the bonding driver don't claim the xmit_lock, nor should they). The solution is to use a per cpu flag internal to the driver to indicate when a cpu is holding the lock in a path that might recusrse into the tx path for the driver via netconsole. By checking this flag on transmit, we can defer the sending of the netconsole frames until a later time using the retransmit feature of netpoll_send_skb that is triggered on the return code NETDEV_TX_BUSY. I've tested this and am able to transmit via netconsole while causing failover conditions on the bond slave links. Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding/bond_sysfs.c')
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 01b4c3f5d9e..8fd0174c538 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1066,6 +1066,7 @@ static ssize_t bonding_store_primary(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
+ block_netpoll_tx();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1101,6 +1102,7 @@ static ssize_t bonding_store_primary(struct device *d,
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
+ unblock_netpoll_tx();
rtnl_unlock();
return count;
@@ -1146,11 +1148,13 @@ static ssize_t bonding_store_primary_reselect(struct device *d,
bond->dev->name, pri_reselect_tbl[new_value].modename,
new_value);
+ block_netpoll_tx();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
+ unblock_netpoll_tx();
out:
rtnl_unlock();
return ret;
@@ -1232,6 +1236,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
if (!rtnl_trylock())
return restart_syscall();
+
+ block_netpoll_tx();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1288,6 +1294,8 @@ static ssize_t bonding_store_active_slave(struct device *d,
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
+ unblock_netpoll_tx();
+
rtnl_unlock();
return count;