summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2011-04-17 23:28:09 +0200
committerJohn W. Linville <linville@tuxdriver.com>2011-04-19 15:38:06 -0400
commit5519541d5a5f19893546883547e2f0f2e5934df7 (patch)
treeae581edfed4ce3382b22076cb03174756e004830 /drivers/net/wireless/ath/ath9k/xmit.c
parent8e22ad323fb5b7cefb572bd8730e3abef95cdf90 (diff)
ath9k: fix powersave frame filtering/buffering in AP mode
This patch fixes a long standing issue of pending packets in the queue being sent (and retransmitted many times) to sleeping stations. This was made worse by aggregation through driver-internal retransmitting of A-MDPU subframes. Previously the hardware tx filter was cleared unconditionally for every single packet - with this patch it uses the IEEE80211_TX_CTL_CLEAR_PS_FILT for unaggregated frames. A sta_notify driver op is added to stop aggregation for stations when they enter powersave mode. Subframes stay buffered inside the driver, to ensure that the BlockAck window keeps a sane state. Since the driver uses software aggregation, the clearing of the tx filter needs to be handled by the driver instead of mac80211 for aggregated frames. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c101
1 files changed, 89 insertions, 12 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5943bdc4c8f..48ff8c22ba1 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -357,6 +357,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_frame_info *fi;
int nframes;
u8 tidno;
+ bool clear_filter;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -441,22 +442,24 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/* transmit completion */
acked_cnt++;
} else {
- if (!(tid->state & AGGR_CLEANUP) && retry) {
- if (fi->retries < ATH_MAX_SW_RETRIES) {
- ath_tx_set_retry(sc, txq, bf->bf_mpdu);
- txpending = 1;
- } else {
- bf->bf_state.bf_type |= BUF_XRETRY;
- txfail = 1;
- sendbar = 1;
- txfail_cnt++;
- }
- } else {
+ if ((tid->state & AGGR_CLEANUP) || !retry) {
/*
* cleanup in progress, just fail
* the un-acked sub-frames
*/
txfail = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
+ !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu);
+
+ clear_filter = true;
+ txpending = 1;
+ } else {
+ bf->bf_state.bf_type |= BUF_XRETRY;
+ txfail = 1;
+ sendbar = 1;
+ txfail_cnt++;
}
}
@@ -496,6 +499,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
!txfail, sendbar);
} else {
/* retry the un-acked ones */
+ ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
if (bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf;
@@ -546,7 +550,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
/* prepend un-acked frames to the beginning of the pending frame queue */
if (!list_empty(&bf_pending)) {
+ if (an->sleeping)
+ ieee80211_sta_set_tim(sta);
+
spin_lock_bh(&txq->axq_lock);
+ if (clear_filter)
+ tid->ac->clear_ps_filter = true;
list_splice(&bf_pending, &tid->buf_q);
ath_tx_queue_tid(txq, tid);
spin_unlock_bh(&txq->axq_lock);
@@ -816,6 +825,11 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(&bf_q, struct ath_buf, list);
bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
+ if (tid->ac->clear_ps_filter) {
+ tid->ac->clear_ps_filter = false;
+ ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
+ }
+
/* if only one frame, send as non-aggregate */
if (bf == bf->bf_lastbf) {
fi = get_frame_info(bf->bf_mpdu);
@@ -896,6 +910,67 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_tx_flush_tid(sc, txtid);
}
+bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ bool buffered = false;
+ int tidno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < WME_NUM_TID; tidno++, tid++) {
+
+ if (!tid->sched)
+ continue;
+
+ ac = tid->ac;
+ txq = ac->txq;
+
+ spin_lock_bh(&txq->axq_lock);
+
+ if (!list_empty(&tid->buf_q))
+ buffered = true;
+
+ tid->sched = false;
+ list_del(&tid->list);
+
+ if (ac->sched) {
+ ac->sched = false;
+ list_del(&ac->list);
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+ return buffered;
+}
+
+void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
+{
+ struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac;
+ struct ath_txq *txq;
+ int tidno;
+
+ for (tidno = 0, tid = &an->tid[tidno];
+ tidno < WME_NUM_TID; tidno++, tid++) {
+
+ ac = tid->ac;
+ txq = ac->txq;
+
+ spin_lock_bh(&txq->axq_lock);
+ ac->clear_ps_filter = true;
+
+ if (!list_empty(&tid->buf_q) && !tid->paused) {
+ ath_tx_queue_tid(txq, tid);
+ ath_txq_schedule(sc, txq);
+ }
+
+ spin_unlock_bh(&txq->axq_lock);
+ }
+}
+
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
{
struct ath_atx_tid *txtid;
@@ -1491,7 +1566,6 @@ static int setup_tx_flags(struct sk_buff *skb)
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
int flags = 0;
- flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
flags |= ATH9K_TXDESC_INTREQ;
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
@@ -1754,6 +1828,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
if (txctl->paprd)
bf->bf_state.bfs_paprd_timestamp = jiffies;
+ if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+ ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
+
ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
}