summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-rx.c
diff options
context:
space:
mode:
authorReinette Chatre <reinette.chatre@intel.com>2009-09-11 10:38:12 -0700
committerJohn W. Linville <linville@tuxdriver.com>2009-09-23 11:35:40 -0400
commitde0bd50845eb5935ce3d503c5d2f565d6cb9ece1 (patch)
tree11403d40a5985886bf5fa04f6006654e11130c83 /drivers/net/wireless/iwlwifi/iwl-rx.c
parentc929c5a1281b666fabc9aa94c607932ec6d28c6d (diff)
iwlwifi: fix potential rx buffer loss
RX handling maintains a few lists that keep track of the RX buffers. Buffers move from one list to the other as they are used, replenished, and again made available for usage. In one such instance, when a buffer is used it enters the "rx_used" list. When buffers are replenished an skb is attached to the buffer and it is moved to the "rx_free" list. The problem here is that the buffer is first removed from the "rx_used" list _before_ the skb is allocated. Thus, if the skb allocation fails this buffer remains removed from the "rx_used" list and is thus lost for future usage. Fix this by first allocating the skb before trying to attach it to a list. We add an additional check to not do this unnecessarily. Reported-by: Rick Farrington <rickdic@hotmail.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c24
1 files changed, 17 insertions, 7 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 8150c5c3a16..b90adcb73b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -239,26 +239,22 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
+ struct sk_buff *skb;
unsigned long flags;
while (1) {
spin_lock_irqsave(&rxq->lock, flags);
-
if (list_empty(&rxq->rx_used)) {
spin_unlock_irqrestore(&rxq->lock, flags);
return;
}
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
spin_unlock_irqrestore(&rxq->lock, flags);
/* Alloc a new receive buffer */
- rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
+ skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
priority);
- if (!rxb->skb) {
+ if (!skb) {
IWL_CRIT(priv, "Can not allocate SKB buffers\n");
/* We don't reschedule replenish work here -- we will
* call the restock method and if it still needs
@@ -266,6 +262,20 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
break;
}
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ rxb->skb = skb;
/* Get physical address of RB/SKB */
rxb->real_dma_addr = pci_map_single(
priv->pci_dev,