summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c135
1 files changed, 100 insertions, 35 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 7c28cb55610..704fcbcbe20 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -103,9 +103,16 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
ieee80211_tx_status(sc->hw, skb);
}
-static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
+static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
+ struct list_head *list;
+ struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+ struct ath_chanctx *ctx = avp->chanctx;
+
+ if (!ctx)
+ return;
if (tid->sched)
return;
@@ -117,7 +124,9 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
return;
ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+
+ list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+ list_add_tail(&ac->list, list);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -147,21 +156,22 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
struct sk_buff *skb)
{
- int q;
-
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.uapsdq)
- txq = sc->tx.txq_map[q];
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath_frame_info *fi = get_frame_info(skb);
+ int hw_queue;
+ int q = fi->txq;
- if (txq != sc->tx.txq_map[q])
+ if (q < 0)
return;
+ txq = sc->tx.txq_map[q];
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
+ hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
if (txq->stopped &&
txq->pending_frames < sc->tx.txq_max_pending[q]) {
- ieee80211_wake_queue(sc->hw, q);
+ ieee80211_wake_queue(sc->hw, hw_queue);
txq->stopped = false;
}
}
@@ -626,7 +636,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
tid->ac->clear_ps_filter = true;
@@ -1492,7 +1502,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ac->clear_ps_filter = true;
if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1516,7 +1526,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
if (ath_tid_has_buffered(tid)) {
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1651,7 +1661,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_link = NULL;
__skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q);
- INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
txq->axq_depth = 0;
txq->axq_ampdu_depth = 0;
@@ -1695,7 +1704,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int ath_cabq_update(struct ath_softc *sc)
{
struct ath9k_tx_queue_info qi;
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
int qnum = sc->beacon.cabq->axq_qnum;
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1813,7 +1822,7 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
-/* For each axq_acq entry, for each tid, try to schedule packets
+/* For each acq entry, for each tid, try to schedule packets
* for transmit until ampdu_depth has reached min Q depth.
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
@@ -1821,19 +1830,31 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
+ struct list_head *ac_list;
bool sent = false;
+ if (txq->mac80211_qnum < 0)
+ return;
+
+ spin_lock_bh(&sc->chan_lock);
+ ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+ spin_unlock_bh(&sc->chan_lock);
+
if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
- list_empty(&txq->axq_acq))
+ list_empty(ac_list))
return;
+ spin_lock_bh(&sc->chan_lock);
rcu_read_lock();
- last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
- while (!list_empty(&txq->axq_acq)) {
+ last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
+ while (!list_empty(ac_list)) {
bool stop = false;
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+ if (sc->cur_chan->stopped)
+ break;
+
+ ac = list_first_entry(ac_list, struct ath_atx_ac, list);
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
list_del(&ac->list);
ac->sched = false;
@@ -1853,7 +1874,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
* are pending for the tid
*/
if (ath_tid_has_buffered(tid))
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
if (stop || tid == last_tid)
break;
@@ -1861,7 +1882,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (!list_empty(&ac->tid_q) && !ac->sched) {
ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+ list_add_tail(&ac->list, ac_list);
}
if (stop)
@@ -1872,12 +1893,27 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break;
sent = false;
- last_ac = list_entry(txq->axq_acq.prev,
+ last_ac = list_entry(ac_list->prev,
struct ath_atx_ac, list);
}
}
rcu_read_unlock();
+ spin_unlock_bh(&sc->chan_lock);
+}
+
+void ath_txq_schedule_all(struct ath_softc *sc)
+{
+ struct ath_txq *txq;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ txq = sc->tx.txq_map[i];
+
+ spin_lock_bh(&txq->axq_lock);
+ ath_txq_schedule(sc, txq);
+ spin_unlock_bh(&txq->axq_lock);
+ }
}
/***********/
@@ -2008,6 +2044,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
an = (struct ath_node *) sta->drv_priv;
memset(fi, 0, sizeof(*fi));
+ fi->txq = -1;
if (hw_key)
fi->keyix = hw_key->hw_key_idx;
else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
@@ -2159,13 +2196,22 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
+ struct ath_frame_info *fi = get_frame_info(skb);
+ struct ath_vif *avp = NULL;
struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
- int q;
+ bool queue;
+ int q, hw_queue;
int ret;
+ if (vif)
+ avp = (void *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ txctl->force_channel = true;
+
ret = ath_tx_prepare(hw, skb, txctl);
if (ret)
return ret;
@@ -2177,24 +2223,41 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
q = skb_get_queue_mapping(skb);
+ hw_queue = (info->hw_queue >= sc->hw->queues - 2) ? q : info->hw_queue;
ath_txq_lock(sc, txq);
- if (txq == sc->tx.txq_map[q] &&
- ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
- !txq->stopped) {
- ieee80211_stop_queue(sc->hw, q);
- txq->stopped = true;
+ if (txq == sc->tx.txq_map[q]) {
+ fi->txq = q;
+ if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
+ !txq->stopped) {
+ ieee80211_stop_queue(sc->hw, hw_queue);
+ txq->stopped = true;
+ }
+ }
+
+ queue = ieee80211_is_data_present(hdr->frame_control);
+
+ /* Force queueing of all frames that belong to a virtual interface on
+ * a different channel context, to ensure that they are sent on the
+ * correct channel.
+ */
+ if (((avp && avp->chanctx != sc->cur_chan) ||
+ sc->cur_chan->stopped) && !txctl->force_channel) {
+ if (!txctl->an)
+ txctl->an = &avp->mcast_node;
+ info->flags &= ~IEEE80211_TX_CTL_PS_RESPONSE;
+ queue = true;
}
- if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ if (txctl->an && queue)
tid = ath_get_skb_tid(sc, txctl->an, skb);
- if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
+ if (info->flags & (IEEE80211_TX_CTL_PS_RESPONSE |
+ IEEE80211_TX_CTL_TX_OFFCHAN)) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
- } else if (txctl->an &&
- ieee80211_is_data_present(hdr->frame_control)) {
+ } else if (txctl->an && queue) {
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
@@ -2207,7 +2270,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
TX_STAT_INC(txq->axq_qnum, a_queued_sw);
__skb_queue_tail(&tid->buf_q, skb);
if (!txctl->an->sleeping)
- ath_tx_queue_tid(txq, tid);
+ ath_tx_queue_tid(sc, txq, tid);
ath_txq_schedule(sc, txq);
goto out;
@@ -2253,8 +2316,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int max_duration;
max_duration =
- sc->cur_beacon_conf.beacon_interval * 1000 *
- sc->cur_beacon_conf.dtim_period / ATH_BCBUF;
+ sc->cur_chan->beacon.beacon_interval * 1000 *
+ sc->cur_chan->beacon.dtim_period / ATH_BCBUF;
do {
struct ath_frame_info *fi = get_frame_info(skb);
@@ -2569,6 +2632,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+ ath_chanctx_event(sc, NULL,
+ ATH_CHANCTX_EVENT_BEACON_SENT);
ath9k_csa_update(sc);
continue;
}