diff options
author | Francois Romieu <romieu@fr.zoreil.com> | 2005-11-07 01:50:03 +0100 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-11-07 03:37:05 -0500 |
commit | c719369350bc566d2643067421fbf05f4b90e70b (patch) | |
tree | 3526675bd6fc522268915ea6536e10a62c74367b | |
parent | b78612b796b0d6cdfba553d456eff008278830e3 (diff) |
[PATCH] b44: b44_start_xmit returns with a lock held when it fails allocating
The patch simply factors out the release of the lock.
Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
-rw-r--r-- | drivers/net/b44.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 0ee3e27969c..b334cc310bc 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -948,6 +948,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct b44 *bp = netdev_priv(dev); struct sk_buff *bounce_skb; + int rc = NETDEV_TX_OK; dma_addr_t mapping; u32 len, entry, ctrl; @@ -957,10 +958,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { netif_stop_queue(dev); - spin_unlock_irq(&bp->lock); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); - return 1; + goto err_out; } mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); @@ -971,7 +971,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (!bounce_skb) - return NETDEV_TX_BUSY; + goto err_out; mapping = pci_map_single(bp->pdev, bounce_skb->data, len, PCI_DMA_TODEVICE); @@ -979,7 +979,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); dev_kfree_skb_any(bounce_skb); - return NETDEV_TX_BUSY; + goto err_out; } memcpy(skb_put(bounce_skb, len), skb->data, skb->len); @@ -1019,11 +1019,16 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); + dev->trans_start = jiffies; + +out_unlock: spin_unlock_irq(&bp->lock); - dev->trans_start = jiffies; + return rc; - return 0; +err_out: + rc = NETDEV_TX_BUSY; + goto out_unlock; } static int b44_change_mtu(struct net_device *dev, int new_mtu) |