summaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-03-22 13:42:43 -0700
committerJohn W. Linville <linville@tuxdriver.com>2010-03-30 15:37:28 -0400
commit7236fe29fd72d17074574ba312e7f1bb9d10abaa (patch)
tree6afd1fed2871daca52f5a8b3070337ae1b6ada0d /net/mac80211/tx.c
parent05a9a1617026977422c7c5ed3aeac6f46fa2132c (diff)
mac80211: move netdev queue enabling to correct spot
"mac80211: fix skb buffering issue" still left a race between enabling the hardware queues and the virtual interface queues. In hindsight it's totally obvious that enabling the netdev queues for a hardware queue when the hardware queue is enabled is wrong, because it could well possible that we can fill the hw queue with packets we already have pending. Thus, we must only enable the netdev queues once all the pending packets have been processed and sent off to the device. In testing, I haven't been able to trigger this race condition, but it's clearly there, possibly only when aggregation is being enabled. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Cc: stable@kernel.org Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cbe53ed4fb0..cfc473e1b05 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1991,6 +1991,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
+ struct ieee80211_sub_if_data *sdata;
unsigned long flags;
int i;
bool txok;
@@ -2029,6 +2030,11 @@ void ieee80211_tx_pending(unsigned long data)
if (!txok)
break;
}
+
+ if (skb_queue_empty(&local->pending[i]))
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(
+ netdev_get_tx_queue(sdata->dev, i));
}
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);