diff options
author | Zhu Yi <yi.zhu@intel.com> | 2010-02-25 14:15:28 +0800 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-03-10 17:09:38 -0500 |
commit | c03c6aefdc2c1f5785a5b0d1a3f7e48eeaae3505 (patch) | |
tree | 5f00ed890b35708e04da459cfdd37ab9a7f46c23 /drivers/net/wireless/iwmc3200wifi/rx.c | |
parent | 34dd5feb8b8b15654714731e1dbb34a6d37fb34e (diff) |
iwmc3200wifi: protect rx_tickets and rx_packets[] lists
Protect rx_tickets and rx_packets[] lists with spinlocks to fix the
race condition for concurrent list operations. In iwmc3200wifi both
sdio_isr_worker and rx_worker workqueues can access the rx ticket
and packets lists at the same time under high rx load.
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwmc3200wifi/rx.c')
-rw-r--r-- | drivers/net/wireless/iwmc3200wifi/rx.c | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 38d950b0f24..c8a31be1688 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -344,10 +344,15 @@ static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id) u8 id_hash = IWM_RX_ID_GET_HASH(id); struct iwm_rx_packet *packet; + spin_lock(&iwm->packet_lock[id_hash]); list_for_each_entry(packet, &iwm->rx_packets[id_hash], node) - if (packet->id == id) + if (packet->id == id) { + list_del(&packet->node); + spin_unlock(&iwm->packet_lock[id_hash]); return packet; + } + spin_unlock(&iwm->packet_lock[id_hash]); return NULL; } @@ -385,18 +390,22 @@ void iwm_rx_free(struct iwm_priv *iwm) struct iwm_rx_packet *packet, *np; int i; + spin_lock(&iwm->ticket_lock); list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) { list_del(&ticket->node); iwm_rx_ticket_node_free(ticket); } + spin_unlock(&iwm->ticket_lock); for (i = 0; i < IWM_RX_ID_HASH; i++) { + spin_lock(&iwm->packet_lock[i]); list_for_each_entry_safe(packet, np, &iwm->rx_packets[i], node) { list_del(&packet->node); kfree_skb(packet->skb); kfree(packet); } + spin_unlock(&iwm->packet_lock[i]); } } @@ -424,7 +433,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf, ticket->action == IWM_RX_TICKET_RELEASE ? "RELEASE" : "DROP", ticket->id); + spin_lock(&iwm->ticket_lock); list_add_tail(&ticket_node->node, &iwm->rx_tickets); + spin_unlock(&iwm->ticket_lock); /* * We received an Rx ticket, most likely there's @@ -457,6 +468,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf, struct iwm_rx_packet *packet; u16 id, buf_offset; u32 packet_size; + u8 id_hash; IWM_DBG_RX(iwm, DBG, "\n"); @@ -474,7 +486,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf, if (IS_ERR(packet)) return PTR_ERR(packet); - list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]); + id_hash = IWM_RX_ID_GET_HASH(id); + spin_lock(&iwm->packet_lock[id_hash]); + list_add_tail(&packet->node, &iwm->rx_packets[id_hash]); + spin_unlock(&iwm->packet_lock[id_hash]); /* We might (unlikely) have received the packet _after_ the ticket */ queue_work(iwm->rx_wq, &iwm->rx_worker); @@ -1664,6 +1679,7 @@ void iwm_rx_worker(struct work_struct *work) * We stop whenever a ticket is missing its packet, as we're * supposed to send the packets in order. */ + spin_lock(&iwm->ticket_lock); list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { struct iwm_rx_packet *packet = iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id)); @@ -1672,12 +1688,12 @@ void iwm_rx_worker(struct work_struct *work) IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d " "to be handled first\n", le16_to_cpu(ticket->ticket->id)); - return; + break; } list_del(&ticket->node); - list_del(&packet->node); iwm_rx_process_packet(iwm, packet, ticket); } + spin_unlock(&iwm->ticket_lock); } |