diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/xmit.c | 894 |
1 files changed, 468 insertions, 426 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index cc595712f51..03b0a651a59 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -48,19 +48,23 @@ static u16 bits_per_symbol[][2] = { #define IS_HT_RATE(_rate) ((_rate) & 0x80) static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid, - struct list_head *bf_head); + struct ath_atx_tid *tid, struct sk_buff *skb); +static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, + int tx_flags, struct ath_txq *txq); static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, struct ath_tx_status *ts, int txok, int sendbar); static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head, bool internal); -static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len); static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, - int txok, bool update_rc); + int txok); static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, int seqno); +static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + struct sk_buff *skb); enum { MCS_HT20, @@ -129,7 +133,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) spin_lock_bh(&txq->axq_lock); tid->paused = false; - if (list_empty(&tid->buf_q)) + if (skb_queue_empty(&tid->buf_q)) goto unlock; ath_tx_queue_tid(txq, tid); @@ -149,6 +153,7 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { struct ath_txq *txq = tid->ac->txq; + struct sk_buff *skb; struct ath_buf *bf; struct list_head bf_head; struct ath_tx_status ts; @@ -159,17 +164,17 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) memset(&ts, 0, sizeof(ts)); spin_lock_bh(&txq->axq_lock); - while (!list_empty(&tid->buf_q)) { - bf = list_first_entry(&tid->buf_q, struct ath_buf, list); - list_move_tail(&bf->list, &bf_head); + while ((skb = __skb_dequeue(&tid->buf_q))) { + fi = get_frame_info(skb); + bf = fi->bf; spin_unlock_bh(&txq->axq_lock); - fi = get_frame_info(bf->bf_mpdu); - if (fi->retries) { - ath_tx_update_baw(sc, tid, fi->seqno); + if (bf && fi->retries) { + list_add_tail(&bf->list, &bf_head); + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); } else { - ath_tx_send_normal(sc, txq, NULL, &bf_head); + ath_tx_send_normal(sc, txq, NULL, skb); } spin_lock_bh(&txq->axq_lock); } @@ -219,6 +224,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) { + struct sk_buff *skb; struct ath_buf *bf; struct list_head bf_head; struct ath_tx_status ts; @@ -227,16 +233,21 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, memset(&ts, 0, sizeof(ts)); INIT_LIST_HEAD(&bf_head); - for (;;) { - if (list_empty(&tid->buf_q)) - break; + while ((skb = __skb_dequeue(&tid->buf_q))) { + fi = get_frame_info(skb); + bf = fi->bf; - bf = list_first_entry(&tid->buf_q, struct ath_buf, list); - list_move_tail(&bf->list, &bf_head); + if (!bf) { + spin_unlock(&txq->axq_lock); + ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); + spin_lock(&txq->axq_lock); + continue; + } + + list_add_tail(&bf->list, &bf_head); - fi = get_frame_info(bf->bf_mpdu); if (fi->retries) - ath_tx_update_baw(sc, tid, fi->seqno); + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); spin_unlock(&txq->axq_lock); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); @@ -251,6 +262,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, struct sk_buff *skb) { struct ath_frame_info *fi = get_frame_info(skb); + struct ath_buf *bf = fi->bf; struct ieee80211_hdr *hdr; TX_STAT_INC(txq->axq_qnum, a_retries); @@ -259,6 +271,8 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, hdr = (struct ieee80211_hdr *)skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); + dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, + sizeof(*hdr), DMA_TO_DEVICE); } static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) @@ -326,7 +340,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf, while (bf) { fi = get_frame_info(bf->bf_mpdu); - ba_index = ATH_BA_INDEX(seq_st, fi->seqno); + ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno); (*nframes)++; if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) @@ -349,7 +363,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ieee80211_tx_info *tx_info; struct ath_atx_tid *tid = NULL; struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; - struct list_head bf_head, bf_pending; + struct list_head bf_head; + struct sk_buff_head bf_pending; u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; @@ -358,7 +373,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_frame_info *fi; int nframes; u8 tidno; - bool clear_filter; + bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); skb = bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -377,11 +392,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, while (bf) { bf_next = bf->bf_next; - bf->bf_state.bf_type |= BUF_XRETRY; if (!bf->bf_stale || bf_next != NULL) list_move_tail(&bf->list, &bf_head); - ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false); ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0, 0); @@ -422,11 +435,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } } - INIT_LIST_HEAD(&bf_pending); - INIT_LIST_HEAD(&bf_head); + __skb_queue_head_init(&bf_pending); ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); while (bf) { + u16 seqno = bf->bf_state.seqno; + txfail = txpending = sendbar = 0; bf_next = bf->bf_next; @@ -434,7 +448,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); fi = get_frame_info(skb); - if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) { + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) { /* transmit completion, subframe is * acked by block ack */ acked_cnt++; @@ -448,15 +462,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * the un-acked sub-frames */ txfail = 1; + } else if (flush) { + txpending = 1; } else if (fi->retries < ATH_MAX_SW_RETRIES) { - if (!(ts->ts_status & ATH9K_TXERR_FILT) || - !an->sleeping) + if (txok || !an->sleeping) ath_tx_set_retry(sc, txq, bf->bf_mpdu); - clear_filter = true; txpending = 1; } else { - bf->bf_state.bf_type |= BUF_XRETRY; txfail = 1; sendbar = 1; txfail_cnt++; @@ -467,10 +480,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * Make sure the last desc is reclaimed if it * not a holding desc. */ - if (!bf_last->bf_stale || bf_next != NULL) + INIT_LIST_HEAD(&bf_head); + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || + bf_next != NULL || !bf_last->bf_stale) list_move_tail(&bf->list, &bf_head); - else - INIT_LIST_HEAD(&bf_head); if (!txpending || (tid->state & AGGR_CLEANUP)) { /* @@ -478,22 +491,19 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * block-ack window */ spin_lock_bh(&txq->axq_lock); - ath_tx_update_baw(sc, tid, fi->seqno); + ath_tx_update_baw(sc, tid, seqno); spin_unlock_bh(&txq->axq_lock); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { memcpy(tx_info->control.rates, rates, sizeof(rates)); - ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true); + ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok); rc_update = false; - } else { - ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false); } ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, !txfail, sendbar); } else { /* retry the un-acked ones */ - ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false); if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { if (bf->bf_next == NULL && bf_last->bf_stale) { struct ath_buf *tbf; @@ -506,29 +516,17 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, */ if (!tbf) { spin_lock_bh(&txq->axq_lock); - ath_tx_update_baw(sc, tid, fi->seqno); + ath_tx_update_baw(sc, tid, seqno); spin_unlock_bh(&txq->axq_lock); - bf->bf_state.bf_type |= - BUF_XRETRY; - ath_tx_rc_status(sc, bf, ts, nframes, - nbad, 0, false); ath_tx_complete_buf(sc, bf, txq, &bf_head, - ts, 0, 0); + ts, 0, + !flush); break; } - ath9k_hw_cleartxdesc(sc->sc_ah, - tbf->bf_desc); - list_add_tail(&tbf->list, &bf_head); - } else { - /* - * Clear descriptor status words for - * software retry - */ - ath9k_hw_cleartxdesc(sc->sc_ah, - bf->bf_desc); + fi->bf = tbf; } } @@ -536,22 +534,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, * Put this buffer to the temporary pending * queue to retain ordering */ - list_splice_tail_init(&bf_head, &bf_pending); + __skb_queue_tail(&bf_pending, skb); } bf = bf_next; } /* prepend un-acked frames to the beginning of the pending frame queue */ - if (!list_empty(&bf_pending)) { + if (!skb_queue_empty(&bf_pending)) { if (an->sleeping) - ieee80211_sta_set_tim(sta); + ieee80211_sta_set_buffered(sta, tid->tidno, true); spin_lock_bh(&txq->axq_lock); - if (clear_filter) - tid->ac->clear_ps_filter = true; - list_splice(&bf_pending, &tid->buf_q); - ath_tx_queue_tid(txq, tid); + skb_queue_splice(&bf_pending, &tid->buf_q); + if (!an->sleeping) { + ath_tx_queue_tid(txq, tid); + + if (ts->ts_status & ATH9K_TXERR_FILT) + tid->ac->clear_ps_filter = true; + } spin_unlock_bh(&txq->axq_lock); } @@ -566,8 +567,32 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, rcu_read_unlock(); - if (needreset) - ath_reset(sc, false); + if (needreset) { + RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR); + ieee80211_queue_work(sc->hw, &sc->hw_reset_work); + } +} + +static bool ath_lookup_legacy(struct ath_buf *bf) +{ + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ieee80211_tx_rate *rates; + int i; + + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + rates = tx_info->control.rates; + + for (i = 0; i < 4; i++) { + if (!rates[i].count || rates[i].idx < 0) + break; + + if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) + return true; + } + + return false; } static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, @@ -643,8 +668,10 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, * meet the minimum required mpdudensity. */ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, - struct ath_buf *bf, u16 frmlen) + struct ath_buf *bf, u16 frmlen, + bool first_subfrm) { +#define FIRST_DESC_NDELIMS 60 struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); u32 nsymbits, nsymbols; @@ -667,6 +694,14 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, ndelim += ATH_AGGR_ENCRYPTDELIM; /* + * Add delimiter when using RTS/CTS with aggregation + * and non enterprise AR9003 card + */ + if (first_subfrm && !AR_SREV_9580_10_OR_LATER(sc->sc_ah) && + (sc->sc_ah->ent_mode & AR_ENT_OTP_MIN_PKT_SIZE_DISABLE)) + ndelim = max(ndelim, FIRST_DESC_NDELIMS); + + /* * Convert desired mpdu density from microeconds to bytes based * on highest rate in rate series (i.e. first rate) to determine * required minimum length for subframe. Take into account @@ -711,22 +746,33 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, int *aggr_len) { #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) - struct ath_buf *bf, *bf_first, *bf_prev = NULL; + struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL; int rl = 0, nframes = 0, ndelim, prev_al = 0; u16 aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw = tid->baw_size / 2; enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; - - bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); + struct sk_buff *skb; + u16 seqno; do { - bf = list_first_entry(&tid->buf_q, struct ath_buf, list); - fi = get_frame_info(bf->bf_mpdu); + skb = skb_peek(&tid->buf_q); + fi = get_frame_info(skb); + bf = fi->bf; + if (!fi->bf) + bf = ath_tx_setup_buffer(sc, txq, tid, skb); + + if (!bf) + continue; + + bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; + seqno = bf->bf_state.seqno; + if (!bf_first) + bf_first = bf; /* do not step over block-ack window */ - if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) { + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { status = ATH_AGGR_BAW_CLOSED; break; } @@ -740,14 +786,14 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, al_delta = ATH_AGGR_DELIM_SZ + fi->framelen; if (nframes && - (aggr_limit < (al + bpad + al_delta + prev_al))) { + ((aggr_limit < (al + bpad + al_delta + prev_al)) || + ath_lookup_legacy(bf))) { status = ATH_AGGR_LIMITED; break; } tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); - if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || - !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))) + if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) break; /* do not exceed subframe limit */ @@ -755,7 +801,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, status = ATH_AGGR_LIMITED; break; } - nframes++; /* add padding for previous frame to aggregation length */ al += bpad + al_delta; @@ -764,25 +809,26 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, * Get the delimiters needed to meet the MPDU * density for this node. */ - ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen); + ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen, + !nframes); bpad = PADBYTES(al_delta) + (ndelim << 2); + nframes++; bf->bf_next = NULL; - ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); /* link buffers of this frame to the aggregate */ if (!fi->retries) - ath_tx_addto_baw(sc, tid, fi->seqno); - ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); - list_move_tail(&bf->list, bf_q); - if (bf_prev) { + ath_tx_addto_baw(sc, tid, seqno); + bf->bf_state.ndelim = ndelim; + + __skb_unlink(skb, &tid->buf_q); + list_add_tail(&bf->list, bf_q); + if (bf_prev) bf_prev->bf_next = bf; - ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, - bf->bf_daddr); - } + bf_prev = bf; - } while (!list_empty(&tid->buf_q)); + } while (!skb_queue_empty(&tid->buf_q)); *aggr_len = al; @@ -790,17 +836,236 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, #undef PADBYTES } +/* + * rix - rate index + * pktlen - total bytes (delims + data + fcs + pads + pad delims) + * width - 0 for 20 MHz, 1 for 40 MHz + * half_gi - to use 4us v/s 3.6 us for symbol time + */ +static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, + int width, int half_gi, bool shortPreamble) +{ + u32 nbits, nsymbits, duration, nsymbols; + int streams; + + /* find number of symbols: PLCP + data */ + streams = HT_RC_2_STREAMS(rix); + nbits = (pktlen << 3) + OFDM_PLCP_BITS; + nsymbits = bits_per_symbol[rix % 8][width] * streams; + nsymbols = (nbits + nsymbits - 1) / nsymbits; + + if (!half_gi) + duration = SYMBOL_TIME(nsymbols); + else + duration = SYMBOL_TIME_HALFGI(nsymbols); + + /* addup duration for legacy/ht training and signal fields */ + duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); + + return duration; +} + +static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, + struct ath_tx_info *info, int len) +{ + struct ath_hw *ah = sc->sc_ah; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ieee80211_tx_rate *rates; + const struct ieee80211_rate *rate; + struct ieee80211_hdr *hdr; + int i; + u8 rix = 0; + + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + rates = tx_info->control.rates; + hdr = (struct ieee80211_hdr *)skb->data; + + /* set dur_update_en for l-sig computation except for PS-Poll frames */ + info->dur_update = !ieee80211_is_pspoll(hdr->frame_control); + + /* + * We check if Short Preamble is needed for the CTS rate by + * checking the BSS's global flag. + * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. + */ + rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); + info->rtscts_rate = rate->hw_value; + if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) + info->rtscts_rate |= rate->hw_value_short; + + for (i = 0; i < 4; i++) { + bool is_40, is_sgi, is_sp; + int phy; + + if (!rates[i].count || (rates[i].idx < 0)) + continue; + + rix = rates[i].idx; + info->rates[i].Tries = rates[i].count; + + if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + info->flags |= ATH9K_TXDESC_RTSENA; + } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + info->flags |= ATH9K_TXDESC_CTSENA; + } + + if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + info->rates[i].RateFlags |= ATH9K_RATESERIES_2040; + if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) + info->rates[i].RateFlags |= ATH9K_RATESERIES_HALFGI; + + is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); + is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); + is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); + + if (rates[i].flags & IEEE80211_TX_RC_MCS) { + /* MCS rates */ + info->rates[i].Rate = rix | 0x80; + info->rates[i].ChSel = ath_txchainmask_reduction(sc, + ah->txchainmask, info->rates[i].Rate); + info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len, + is_40, is_sgi, is_sp); + if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) + info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC; + continue; + } + + /* legacy rates */ + if ((tx_info->band == IEEE80211_BAND_2GHZ) && + !(rate->flags & IEEE80211_RATE_ERP_G)) + phy = WLAN_RC_PHY_CCK; + else + phy = WLAN_RC_PHY_OFDM; + + rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; + info->rates[i].Rate = rate->hw_value; + if (rate->hw_value_short) { + if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + info->rates[i].Rate |= rate->hw_value_short; + } else { + is_sp = false; + } + + if (bf->bf_state.bfs_paprd) + info->rates[i].ChSel = ah->txchainmask; + else + info->rates[i].ChSel = ath_txchainmask_reduction(sc, + ah->txchainmask, info->rates[i].Rate); + + info->rates[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, + phy, rate->bitrate * 100, len, rix, is_sp); + } + + /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ + if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) + info->flags &= ~ATH9K_TXDESC_RTSENA; + + /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ + if (info->flags & ATH9K_TXDESC_RTSENA) + info->flags &= ~ATH9K_TXDESC_CTSENA; +} + +static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + enum ath9k_pkt_type htype; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + if (ieee80211_is_beacon(fc)) + htype = ATH9K_PKT_TYPE_BEACON; + else if (ieee80211_is_probe_resp(fc)) + htype = ATH9K_PKT_TYPE_PROBE_RESP; + else if (ieee80211_is_atim(fc)) + htype = ATH9K_PKT_TYPE_ATIM; + else if (ieee80211_is_pspoll(fc)) + htype = ATH9K_PKT_TYPE_PSPOLL; + else + htype = ATH9K_PKT_TYPE_NORMAL; + + return htype; +} + +static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf, + struct ath_txq *txq, int len) +{ + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); + struct ath_buf *bf_first = bf; + struct ath_tx_info info; + bool aggr = !!(bf->bf_state.bf_type & BUF_AGGR); + + memset(&info, 0, sizeof(info)); + info.is_first = true; + info.is_last = true; + info.txpower = MAX_RATE_POWER; + info.qcu = txq->axq_qnum; + + info.flags = ATH9K_TXDESC_INTREQ; + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + info.flags |= ATH9K_TXDESC_NOACK; + if (tx_info->flags & IEEE80211_TX_CTL_LDPC) + info.flags |= ATH9K_TXDESC_LDPC; + + ath_buf_set_rate(sc, bf, &info, len); + + if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) + info.flags |= ATH9K_TXDESC_CLRDMASK; + + if (bf->bf_state.bfs_paprd) + info.flags |= (u32) bf->bf_state.bfs_paprd << ATH9K_TXDESC_PAPRD_S; + + + while (bf) { + struct sk_buff *skb = bf->bf_mpdu; + struct ath_frame_info *fi = get_frame_info(skb); + + info.type = get_hw_packet_type(skb); + if (bf->bf_next) + info.link = bf->bf_next->bf_daddr; + else + info.link = 0; + + info.buf_addr[0] = bf->bf_buf_addr; + info.buf_len[0] = skb->len; + info.pkt_len = fi->framelen; + info.keyix = fi->keyix; + info.keytype = fi->keytype; + + if (aggr) { + if (bf == bf_first) + info.aggr = AGGR_BUF_FIRST; + else if (!bf->bf_next) + info.aggr = AGGR_BUF_LAST; + else + info.aggr = AGGR_BUF_MIDDLE; + + info.ndelim = bf->bf_state.ndelim; + info.aggr_len = len; + } + + ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); + bf = bf->bf_next; + } +} + static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_buf *bf; enum ATH_AGGR_STATUS status; - struct ath_frame_info *fi; + struct ieee80211_tx_info *tx_info; struct list_head bf_q; int aggr_len; do { - if (list_empty(&tid->buf_q)) + if (skb_queue_empty(&tid->buf_q)) return; INIT_LIST_HEAD(&bf_q); @@ -816,34 +1081,25 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, bf = list_first_entry(&bf_q, struct ath_buf, list); bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); + tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); if (tid->ac->clear_ps_filter) { tid->ac->clear_ps_filter = false; - ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); + tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + } else { + tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT; } /* if only one frame, send as non-aggregate */ if (bf == bf->bf_lastbf) { - fi = get_frame_info(bf->bf_mpdu); - - bf->bf_state.bf_type &= ~BUF_AGGR; - ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); - ath_buf_set_rate(sc, bf, fi->framelen); - ath_tx_txqaddbuf(sc, txq, &bf_q, false); - continue; + aggr_len = get_frame_info(bf->bf_mpdu)->framelen; + bf->bf_state.bf_type = BUF_AMPDU; + } else { + TX_STAT_INC(txq->axq_qnum, a_aggr); } - /* setup first desc of aggregate */ - bf->bf_state.bf_type |= BUF_AGGR; - ath_buf_set_rate(sc, bf, aggr_len); - ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len); - - /* anchor last desc of aggregate */ - ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); - + ath_tx_fill_desc(sc, bf, txq, aggr_len); ath_tx_txqaddbuf(sc, txq, &bf_q, false); - TX_STAT_INC(txq->axq_qnum, a_aggr); - } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH && status != ATH_AGGR_BAW_CLOSED); } @@ -902,12 +1158,13 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) ath_tx_flush_tid(sc, txtid); } -bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an) +void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, + struct ath_node *an) { struct ath_atx_tid *tid; struct ath_atx_ac *ac; struct ath_txq *txq; - bool buffered = false; + bool buffered; int tidno; for (tidno = 0, tid = &an->tid[tidno]; @@ -921,8 +1178,7 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an) spin_lock_bh(&txq->axq_lock); - if (!list_empty(&tid->buf_q)) - buffered = true; + buffered = !skb_queue_empty(&tid->buf_q); tid->sched = false; list_del(&tid->list); @@ -933,9 +1189,9 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an) } spin_unlock_bh(&txq->axq_lock); - } - return buffered; + ieee80211_sta_set_buffered(sta, tidno, buffered); + } } void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) @@ -954,7 +1210,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) spin_lock_bh(&txq->axq_lock); ac->clear_ps_filter = true; - if (!list_empty(&tid->buf_q) && !tid->paused) { + if (!skb_queue_empty(&tid->buf_q) && !tid->paused) { ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); } @@ -1004,7 +1260,6 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc, struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); struct ath9k_tx_queue_info qi; static const int subtype_txq_to_hwq[] = { [WME_AC_BE] = ATH_TXQ_AC_BE, @@ -1054,12 +1309,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) */ return NULL; } - if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) { - ath_err(common, "qnum %u out of range, max %zu!\n", - axq_qnum, ARRAY_SIZE(sc->tx.txq)); - ath9k_hw_releasetxqueue(ah, axq_qnum); - return NULL; - } if (!ATH_TXQ_SETUP(sc, axq_qnum)) { struct ath_txq *txq = &sc->tx.txq[axq_qnum]; @@ -1156,6 +1405,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status ts; memset(&ts, 0, sizeof(ts)); + ts.ts_status = ATH9K_TX_FLUSH; INIT_LIST_HEAD(&bf_head); while (!list_empty(list)) { @@ -1222,7 +1472,8 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_txq *txq; - int i, npend = 0; + int i; + u32 npend = 0; if (sc->sc_flags & SC_OP_INVALID) return true; @@ -1234,11 +1485,12 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) if (!ATH_TXQ_SETUP(sc, i)) continue; - npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum); + if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) + npend |= BIT(i); } if (npend) - ath_err(common, "Failed to stop TX DMA!\n"); + ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { if (!ATH_TXQ_SETUP(sc, i)) @@ -1271,7 +1523,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) struct ath_atx_ac *ac, *ac_tmp, *last_ac; struct ath_atx_tid *tid, *last_tid; - if (list_empty(&txq->axq_acq) || + if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) || txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) return; @@ -1298,7 +1550,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) * add tid to round-robin queue if more frames * are pending for the tid */ - if (!list_empty(&tid->buf_q)) + if (!skb_queue_empty(&tid->buf_q)) ath_tx_queue_tid(txq, tid); if (tid == last_tid || @@ -1390,12 +1642,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, } static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, - struct ath_buf *bf, struct ath_tx_control *txctl) + struct sk_buff *skb, struct ath_tx_control *txctl) { - struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); + struct ath_frame_info *fi = get_frame_info(skb); struct list_head bf_head; - - bf->bf_state.bf_type |= BUF_AMPDU; + struct ath_buf *bf; /* * Do not queue to h/w when any of the following conditions is true: @@ -1404,113 +1655,82 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, * - seqno is not within block-ack window * - h/w queue depth exceeds low water mark */ - if (!list_empty(&tid->buf_q) || tid->paused || - !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) || + if (!skb_queue_empty(&tid->buf_q) || tid->paused || + !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) || txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { /* * Add this frame to software queue for scheduling later * for aggregation. */ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); - list_add_tail(&bf->list, &tid->buf_q); - ath_tx_queue_tid(txctl->txq, tid); + __skb_queue_tail(&tid->buf_q, skb); + if (!txctl->an || !txctl->an->sleeping) + ath_tx_queue_tid(txctl->txq, tid); return; } + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) + return; + + bf->bf_state.bf_type = BUF_AMPDU; INIT_LIST_HEAD(&bf_head); list_add(&bf->list, &bf_head); /* Add sub-frame to BAW */ - if (!fi->retries) - ath_tx_addto_baw(sc, tid, fi->seqno); + ath_tx_addto_baw(sc, tid, bf->bf_state.seqno); /* Queue to h/w without aggregation */ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); bf->bf_lastbf = bf; - ath_buf_set_rate(sc, bf, fi->framelen); + ath_tx_fill_desc(sc, bf, txctl->txq, fi->framelen); ath_tx_txqaddbuf(sc, txctl->txq, &bf_head, false); } static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid, - struct list_head *bf_head) + struct ath_atx_tid *tid, struct sk_buff *skb) { - struct ath_frame_info *fi; + struct ath_frame_info *fi = get_frame_info(skb); + struct list_head bf_head; struct ath_buf *bf; - bf = list_first_entry(bf_head, struct ath_buf, list); - bf->bf_state.bf_type &= ~BUF_AMPDU; + bf = fi->bf; + if (!bf) + bf = ath_tx_setup_buffer(sc, txq, tid, skb); + + if (!bf) + return; + + INIT_LIST_HEAD(&bf_head); + list_add_tail(&bf->list, &bf_head); + bf->bf_state.bf_type = 0; /* update starting sequence number for subsequent ADDBA request */ if (tid) INCR(tid->seq_start, IEEE80211_SEQ_MAX); bf->bf_lastbf = bf; - fi = get_frame_info(bf->bf_mpdu); - ath_buf_set_rate(sc, bf, fi->framelen); - ath_tx_txqaddbuf(sc, txq, bf_head, false); + ath_tx_fill_desc(sc, bf, txq, fi->framelen); + ath_tx_txqaddbuf(sc, txq, &bf_head, false); TX_STAT_INC(txq->axq_qnum, queued); } -static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr; - enum ath9k_pkt_type htype; - __le16 fc; - - hdr = (struct ieee80211_hdr *)skb->data; - fc = hdr->frame_control; - - if (ieee80211_is_beacon(fc)) - htype = ATH9K_PKT_TYPE_BEACON; - else if (ieee80211_is_probe_resp(fc)) - htype = ATH9K_PKT_TYPE_PROBE_RESP; - else if (ieee80211_is_atim(fc)) - htype = ATH9K_PKT_TYPE_ATIM; - else if (ieee80211_is_pspoll(fc)) - htype = ATH9K_PKT_TYPE_PSPOLL; - else - htype = ATH9K_PKT_TYPE_NORMAL; - - return htype; -} - static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, int framelen) { - struct ath_softc *sc = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = tx_info->control.sta; struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath_frame_info *fi = get_frame_info(skb); struct ath_node *an = NULL; - struct ath_atx_tid *tid; enum ath9k_key_type keytype; - u16 seqno = 0; - u8 tidno; keytype = ath9k_cmn_get_hw_crypto_keytype(skb); if (sta) an = (struct ath_node *) sta->drv_priv; - hdr = (struct ieee80211_hdr *)skb->data; - if (an && ieee80211_is_data_qos(hdr->frame_control) && - conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) { - - tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; - - /* - * Override seqno set by upper layer with the one - * in tx aggregation state. - */ - tid = ATH_AN_2_TID(an, tidno); - seqno = tid->seq_next; - hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT); - INCR(tid->seq_next, IEEE80211_SEQ_MAX); - } - memset(fi, 0, sizeof(*fi)); if (hw_key) fi->keyix = hw_key->hw_key_idx; @@ -1520,199 +1740,50 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, fi->keyix = ATH9K_TXKEYIX_INVALID; fi->keytype = keytype; fi->framelen = framelen; - fi->seqno = seqno; -} - -static int setup_tx_flags(struct sk_buff *skb) -{ - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - int flags = 0; - - flags |= ATH9K_TXDESC_INTREQ; - - if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) - flags |= ATH9K_TXDESC_NOACK; - - if (tx_info->flags & IEEE80211_TX_CTL_LDPC) - flags |= ATH9K_TXDESC_LDPC; - - return flags; -} - -/* - * rix - rate index - * pktlen - total bytes (delims + data + fcs + pads + pad delims) - * width - 0 for 20 MHz, 1 for 40 MHz - * half_gi - to use 4us v/s 3.6 us for symbol time - */ -static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen, - int width, int half_gi, bool shortPreamble) -{ - u32 nbits, nsymbits, duration, nsymbols; - int streams; - - /* find number of symbols: PLCP + data */ - streams = HT_RC_2_STREAMS(rix); - nbits = (pktlen << 3) + OFDM_PLCP_BITS; - nsymbits = bits_per_symbol[rix % 8][width] * streams; - nsymbols = (nbits + nsymbits - 1) / nsymbits; - - if (!half_gi) - duration = SYMBOL_TIME(nsymbols); - else - duration = SYMBOL_TIME_HALFGI(nsymbols); - - /* addup duration for legacy/ht training and signal fields */ - duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); - - return duration; } u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate) { struct ath_hw *ah = sc->sc_ah; struct ath9k_channel *curchan = ah->curchan; - if ((sc->sc_flags & SC_OP_ENABLE_APM) && - (curchan->channelFlags & CHANNEL_5GHZ) && - (chainmask == 0x7) && (rate < 0x90)) + if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && + (curchan->channelFlags & CHANNEL_5GHZ) && + (chainmask == 0x7) && (rate < 0x90)) return 0x3; else return chainmask; } -static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath9k_11n_rate_series series[4]; - struct sk_buff *skb; - struct ieee80211_tx_info *tx_info; - struct ieee80211_tx_rate *rates; - const struct ieee80211_rate *rate; - struct ieee80211_hdr *hdr; - int i, flags = 0; - u8 rix = 0, ctsrate = 0; - bool is_pspoll; - - memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); - - skb = bf->bf_mpdu; - tx_info = IEEE80211_SKB_CB(skb); - rates = tx_info->control.rates; - hdr = (struct ieee80211_hdr *)skb->data; - is_pspoll = ieee80211_is_pspoll(hdr->frame_control); - - /* - * We check if Short Preamble is needed for the CTS rate by - * checking the BSS's global flag. - * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used. - */ - rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info); - ctsrate = rate->hw_value; - if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) - ctsrate |= rate->hw_value_short; - - for (i = 0; i < 4; i++) { - bool is_40, is_sgi, is_sp; - int phy; - - if (!rates[i].count || (rates[i].idx < 0)) - continue; - - rix = rates[i].idx; - series[i].Tries = rates[i].count; - - if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) { - series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; - flags |= ATH9K_TXDESC_RTSENA; - } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { - series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; - flags |= ATH9K_TXDESC_CTSENA; - } - - if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) - series[i].RateFlags |= ATH9K_RATESERIES_2040; - if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) - series[i].RateFlags |= ATH9K_RATESERIES_HALFGI; - - is_sgi = !!(rates[i].flags & IEEE80211_TX_RC_SHORT_GI); - is_40 = !!(rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH); - is_sp = !!(rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE); - - if (rates[i].flags & IEEE80211_TX_RC_MCS) { - /* MCS rates */ - series[i].Rate = rix | 0x80; - series[i].ChSel = ath_txchainmask_reduction(sc, - common->tx_chainmask, series[i].Rate); - series[i].PktDuration = ath_pkt_duration(sc, rix, len, - is_40, is_sgi, is_sp); - if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) - series[i].RateFlags |= ATH9K_RATESERIES_STBC; - continue; - } - - /* legacy rates */ - if ((tx_info->band == IEEE80211_BAND_2GHZ) && - !(rate->flags & IEEE80211_RATE_ERP_G)) - phy = WLAN_RC_PHY_CCK; - else - phy = WLAN_RC_PHY_OFDM; - - rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx]; - series[i].Rate = rate->hw_value; - if (rate->hw_value_short) { - if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - series[i].Rate |= rate->hw_value_short; - } else { - is_sp = false; - } - - if (bf->bf_state.bfs_paprd) - series[i].ChSel = common->tx_chainmask; - else - series[i].ChSel = ath_txchainmask_reduction(sc, - common->tx_chainmask, series[i].Rate); - - series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, - phy, rate->bitrate * 100, len, rix, is_sp); - } - - /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ - if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit)) - flags &= ~ATH9K_TXDESC_RTSENA; - - /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ - if (flags & ATH9K_TXDESC_RTSENA) - flags &= ~ATH9K_TXDESC_CTSENA; - - /* set dur_update_en for l-sig computation except for PS-Poll frames */ - ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc, - bf->bf_lastbf->bf_desc, - !is_pspoll, ctsrate, - 0, series, 4, flags); - -} - -static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, +/* + * Assign a descriptor (and sequence number if necessary, + * and map buffer for DMA. Frees skb on error + */ +static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid, struct sk_buff *skb) { - struct ath_softc *sc = hw->priv; - struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_frame_info *fi = get_frame_info(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath_buf *bf; - struct ath_desc *ds; - int frm_type; + u16 seqno; bf = ath_tx_get_buffer(sc); if (!bf) { ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); - return NULL; + goto error; } ATH_TXBUF_RESET(bf); - bf->bf_flags = setup_tx_flags(skb); + if (tid) { + seqno = tid->seq_next; + hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); + INCR(tid->seq_next, IEEE80211_SEQ_MAX); + bf->bf_state.seqno = seqno; + } + bf->bf_mpdu = skb; bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, @@ -1723,38 +1794,26 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, ath_err(ath9k_hw_common(sc->sc_ah), "dma_mapping_error() on TX\n"); ath_tx_return_buffer(sc, bf); - return NULL; + goto error; } - frm_type = get_hw_packet_type(skb); - - ds = bf->bf_desc; - ath9k_hw_set_desc_link(ah, ds, 0); - - ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER, - fi->keyix, fi->keytype, bf->bf_flags); - - ath9k_hw_filltxdesc(ah, ds, - skb->len, /* segment length */ - true, /* first segment */ - true, /* last segment */ - ds, /* first descriptor */ - bf->bf_buf_addr, - txq->axq_qnum); - + fi->bf = bf; return bf; + +error: + dev_kfree_skb_any(skb); + return NULL; } /* FIXME: tx power */ -static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, +static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, struct ath_tx_control *txctl) { - struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct list_head bf_head; struct ath_atx_tid *tid = NULL; + struct ath_buf *bf; u8 tidno; spin_lock_bh(&txctl->txq->axq_lock); @@ -1772,27 +1831,21 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, * Try aggregation if it's a unicast data frame * and the destination is HT capable. */ - ath_tx_send_ampdu(sc, tid, bf, txctl); + ath_tx_send_ampdu(sc, tid, skb, txctl); } else { - INIT_LIST_HEAD(&bf_head); - list_add_tail(&bf->list, &bf_head); + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); + if (!bf) + goto out; - bf->bf_state.bfs_ftype = txctl->frame_type; bf->bf_state.bfs_paprd = txctl->paprd; - if (bf->bf_state.bfs_paprd) - ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc, - bf->bf_state.bfs_paprd); - if (txctl->paprd) bf->bf_state.bfs_paprd_timestamp = jiffies; - if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) - ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); - - ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); + ath_tx_send_normal(sc, txctl->txq, tid, skb); } +out: spin_unlock_bh(&txctl->txq->axq_lock); } @@ -1806,7 +1859,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_vif *vif = info->control.vif; struct ath_softc *sc = hw->priv; struct ath_txq *txq = txctl->txq; - struct ath_buf *bf; int padpos, padsize; int frmlen = skb->len + FCS_LEN; int q; @@ -1839,6 +1891,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, skb_push(skb, padsize); memmove(skb->data, skb->data + padsize, padpos); + hdr = (struct ieee80211_hdr *) skb->data; } if ((vif && vif->type != NL80211_IFTYPE_AP && @@ -1853,10 +1906,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, * info are no longer valid (overwritten by the ath_frame_info data. */ - bf = ath_tx_setup_buffer(hw, txctl->txq, skb); - if (unlikely(!bf)) - return -ENOMEM; - q = skb_get_queue_mapping(skb); spin_lock_bh(&txq->axq_lock); if (txq == sc->tx.txq_map[q] && @@ -1866,8 +1915,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, } spin_unlock_bh(&txq->axq_lock); - ath_tx_start_dma(sc, bf, txctl); - + ath_tx_start_dma(sc, skb, txctl); return 0; } @@ -1876,7 +1924,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, /*****************/ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, - int tx_flags, int ftype, struct ath_txq *txq) + int tx_flags, struct ath_txq *txq) { struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -1889,10 +1937,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, if (tx_flags & ATH_TX_BAR) tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; - if (!(tx_flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { + if (!(tx_flags & ATH_TX_ERROR)) /* Frame was ACKed */ tx_info->flags |= IEEE80211_TX_STAT_ACK; - } padpos = ath9k_cmn_padpos(hdr->frame_control); padsize = padpos & 3; @@ -1936,18 +1983,18 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int txok, int sendbar) { struct sk_buff *skb = bf->bf_mpdu; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); unsigned long flags; int tx_flags = 0; if (sendbar) tx_flags = ATH_TX_BAR; - if (!txok) { + if (!txok) tx_flags |= ATH_TX_ERROR; - if (bf_isxretried(bf)) - tx_flags |= ATH_TX_XRETRY; - } + if (ts->ts_status & ATH9K_TXERR_FILT) + tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE); bf->bf_buf_addr = 0; @@ -1960,9 +2007,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, else complete(&sc->paprd_complete); } else { - ath_debug_stat_tx(sc, bf, ts, txq); - ath_tx_complete(sc, skb, tx_flags, - bf->bf_state.bfs_ftype, txq); + ath_debug_stat_tx(sc, bf, ts, txq, tx_flags); + ath_tx_complete(sc, skb, tx_flags, txq); } /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't * accidentally reference it later. @@ -1979,7 +2025,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_status *ts, int nframes, int nbad, - int txok, bool update_rc) + int txok) { struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -1994,19 +2040,16 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, tx_rateindex = ts->ts_rateindex; WARN_ON(tx_rateindex >= hw->max_rates); - if (ts->ts_status & ATH9K_TXERR_FILT) - tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; - if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_info->flags |= IEEE80211_TX_STAT_AMPDU; BUG_ON(nbad > nframes); - - tx_info->status.ampdu_len = nframes; - tx_info->status.ampdu_ack_len = nframes - nbad; } + tx_info->status.ampdu_len = nframes; + tx_info->status.ampdu_ack_len = nframes - nbad; if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && - (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { + (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) { /* * If an underrun error is seen assume it as an excessive * retry only if max frame trigger level has been reached @@ -2019,9 +2062,9 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, * successfully by eventually preferring slower rates. * This itself should also alleviate congestion on the bus. */ - if (ieee80211_is_data(hdr->frame_control) && - (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | - ATH9K_TX_DELIM_UNDERRUN)) && + if (unlikely(ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN | + ATH9K_TX_DELIM_UNDERRUN)) && + ieee80211_is_data(hdr->frame_control) && ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level) tx_info->status.rates[tx_rateindex].count = hw->max_rate_tries; @@ -2052,13 +2095,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, spin_unlock_bh(&txq->axq_lock); if (!bf_isampdu(bf)) { - /* - * This frame is sent out as a single frame. - * Use hardware retry status for this frame. - */ - if (ts->ts_status & ATH9K_TXERR_XRETRY) - bf->bf_state.bf_type |= BUF_XRETRY; - ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok, true); + ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); @@ -2085,6 +2122,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) spin_lock_bh(&txq->axq_lock); for (;;) { + if (work_pending(&sc->hw_reset_work)) + break; + if (list_empty(&txq->axq_q)) { txq->axq_link = NULL; if (sc->sc_flags & SC_OP_TXAGGR) @@ -2172,9 +2212,8 @@ static void ath_tx_complete_poll_work(struct work_struct *work) if (needreset) { ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, "tx hung, resetting the chip\n"); - spin_lock_bh(&sc->sc_pcu_lock); - ath_reset(sc, true); - spin_unlock_bh(&sc->sc_pcu_lock); + RESET_STAT_INC(sc, RESET_TYPE_TX_HANG); + ieee80211_queue_work(sc->hw, &sc->hw_reset_work); } ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, @@ -2207,6 +2246,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) int status; for (;;) { + if (work_pending(&sc->hw_reset_work)) + break; + status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts); if (status == -EINPROGRESS) break; @@ -2361,7 +2403,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->sched = false; tid->paused = false; tid->state &= ~AGGR_CLEANUP; - INIT_LIST_HEAD(&tid->buf_q); + __skb_queue_head_init(&tid->buf_q); acno = TID_TO_WME_AC(tidno); tid->ac = &an->ac[acno]; tid->state &= ~AGGR_ADDBA_COMPLETE; |