diff options
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r-- | drivers/net/wireless/rt2x00/rt2x00queue.c | 589 |
1 files changed, 432 insertions, 157 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index e360d287def..ab8c16f8bca 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) * at least 8 bytes bytes available in headroom for IV/EIV * and 8 bytes for ICV data as tailroon. */ - if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { + if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) { head_size += 8; tail_size += 8; } @@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->entry = entry; - if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) { + if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) { skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, @@ -148,19 +148,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb) skb_trim(skb, frame_length); } -void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length) -{ - unsigned int frame_length = skb->len; - unsigned int align = ALIGN_SIZE(skb, header_length); - - if (!align) - return; - - skb_push(skb, align); - memmove(skb->data, skb->data + align, frame_length); - skb_trim(skb, frame_length); -} - void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) { unsigned int payload_length = skb->len - header_length; @@ -199,7 +186,12 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) { - unsigned int l2pad = L2PAD_SIZE(header_length); + /* + * L2 padding is only present if the skb contains more than just the + * IEEE 802.11 header. + */ + unsigned int l2pad = (skb->len > header_length) ? + L2PAD_SIZE(header_length) : 0; if (!l2pad) return; @@ -216,14 +208,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); unsigned long irqflags; - if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || - unlikely(!tx_info->control.vif)) + if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) + return; + + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); + + if (!test_bit(REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->cap_flags)) return; /* - * Hardware should insert sequence counter. - * FIXME: We insert a software sequence counter first for - * hardware that doesn't support hardware sequence counting. + * The hardware is not able to insert a sequence number. Assign a + * software generated one here. * * This is wrong because beacons are not getting sequence * numbers assigned properly. @@ -241,7 +236,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, spin_unlock_irqrestore(&intf->seqlock, irqflags); - __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, @@ -255,6 +249,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, unsigned int duration; unsigned int residual; + /* + * Determine with what IFS priority this frame should be send. + * Set ifs to IFS_SIFS when the this is not the first fragment, + * or this fragment came after RTS/CTS. + */ + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) + txdesc->u.plcp.ifs = IFS_BACKOFF; + else + txdesc->u.plcp.ifs = IFS_SIFS; + /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ data_length = entry->skb->len + 4; data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); @@ -263,12 +267,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, * PLCP setup * Length calculation depends on OFDM/CCK rate. */ - txdesc->signal = hwrate->plcp; - txdesc->service = 0x04; + txdesc->u.plcp.signal = hwrate->plcp; + txdesc->u.plcp.service = 0x04; if (hwrate->flags & DEV_RATE_OFDM) { - txdesc->length_high = (data_length >> 6) & 0x3f; - txdesc->length_low = data_length & 0x3f; + txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; + txdesc->u.plcp.length_low = data_length & 0x3f; } else { /* * Convert length to microseconds. @@ -283,42 +287,113 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, * Check if we need to set the Length Extension */ if (hwrate->bitrate == 110 && residual <= 30) - txdesc->service |= 0x80; + txdesc->u.plcp.service |= 0x80; } - txdesc->length_high = (duration >> 8) & 0xff; - txdesc->length_low = duration & 0xff; + txdesc->u.plcp.length_high = (duration >> 8) & 0xff; + txdesc->u.plcp.length_low = duration & 0xff; /* * When preamble is enabled we should set the * preamble bit for the signal. */ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - txdesc->signal |= 0x08; + txdesc->u.plcp.signal |= 0x08; } } +static void rt2x00queue_create_tx_descriptor_ht(struct queue_entry *entry, + struct txentry_desc *txdesc, + const struct rt2x00_rate *hwrate) +{ + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; + + if (tx_info->control.sta) + txdesc->u.ht.mpdu_density = + tx_info->control.sta->ht_cap.ampdu_density; + + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ + + /* + * Only one STBC stream is supported for now. + */ + if (tx_info->flags & IEEE80211_TX_CTL_STBC) + txdesc->u.ht.stbc = 1; + + /* + * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the + * mcs rate to be used + */ + if (txrate->flags & IEEE80211_TX_RC_MCS) { + txdesc->u.ht.mcs = txrate->idx; + + /* + * MIMO PS should be set to 1 for STA's using dynamic SM PS + * when using more then one tx stream (>MCS7). + */ + if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && + ((tx_info->control.sta->ht_cap.cap & + IEEE80211_HT_CAP_SM_PS) >> + IEEE80211_HT_CAP_SM_PS_SHIFT) == + WLAN_HT_CAP_SM_PS_DYNAMIC) + __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); + } else { + txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); + if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + txdesc->u.ht.mcs |= 0x08; + } + + /* + * This frame is eligible for an AMPDU, however, don't aggregate + * frames that are intended to probe a specific tx rate. + */ + if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && + !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) + __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); + + /* + * Set 40Mhz mode if necessary (for legacy rates this will + * duplicate the frame to both channels). + */ + if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || + txrate->flags & IEEE80211_TX_RC_DUP_DATA) + __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); + if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) + __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); + + /* + * Determine IFS values + * - Use TXOP_BACKOFF for management frames except beacons + * - Use TXOP_SIFS for fragment bursts + * - Use TXOP_HTTXOP for everything else + * + * Note: rt2800 devices won't use CTS protection (if used) + * for frames not transmitted with TXOP_HTTXOP + */ + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) + txdesc->u.ht.txop = TXOP_BACKOFF; + else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) + txdesc->u.ht.txop = TXOP_SIFS; + else + txdesc->u.ht.txop = TXOP_HTTXOP; +} + static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; - struct ieee80211_rate *rate = - ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); - const struct rt2x00_rate *hwrate; + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; + struct ieee80211_rate *rate; + const struct rt2x00_rate *hwrate = NULL; memset(txdesc, 0, sizeof(*txdesc)); /* - * Initialize information from queue - */ - txdesc->qid = entry->queue->qid; - txdesc->cw_min = entry->queue->cw_min; - txdesc->cw_max = entry->queue->cw_max; - txdesc->aifs = entry->queue->aifs; - - /* * Header and frame information. */ txdesc->length = entry->skb->len; @@ -368,42 +443,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, /* * Beacons and probe responses require the tsf timestamp - * to be inserted into the frame, except for a frame that has been injected - * through a monitor interface. This latter is needed for testing a - * monitor interface. + * to be inserted into the frame. */ - if ((ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) && - (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED))) + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); - /* - * Determine with what IFS priority this frame should be send. - * Set ifs to IFS_SIFS when the this is not the first fragment, - * or this fragment came after RTS/CTS. - */ if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && - !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { + !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); - txdesc->ifs = IFS_BACKOFF; - } else - txdesc->ifs = IFS_SIFS; /* * Determine rate modulation. */ - hwrate = rt2x00_get_rate(rate->hw_value); - txdesc->rate_mode = RATE_MODE_CCK; - if (hwrate->flags & DEV_RATE_OFDM) - txdesc->rate_mode = RATE_MODE_OFDM; + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) + txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; + else if (txrate->flags & IEEE80211_TX_RC_MCS) + txdesc->rate_mode = RATE_MODE_HT_MIX; + else { + rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); + hwrate = rt2x00_get_rate(rate->hw_value); + if (hwrate->flags & DEV_RATE_OFDM) + txdesc->rate_mode = RATE_MODE_OFDM; + else + txdesc->rate_mode = RATE_MODE_CCK; + } /* * Apply TX descriptor handling by components */ rt2x00crypto_create_tx_descriptor(entry, txdesc); - rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); rt2x00queue_create_tx_descriptor_seq(entry, txdesc); - rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); + + if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags)) + rt2x00queue_create_tx_descriptor_ht(entry, txdesc, hwrate); + else + rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); } static int rt2x00queue_write_tx_data(struct queue_entry *entry, @@ -440,7 +515,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry, /* * Map the skb to DMA. */ - if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) + if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) rt2x00queue_map_txskb(entry); return 0; @@ -460,12 +535,9 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); } -static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, +static void rt2x00queue_kick_tx_queue(struct data_queue *queue, struct txentry_desc *txdesc) { - struct data_queue *queue = entry->queue; - struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; - /* * Check if we need to kick the queue, there are however a few rules * 1) Don't kick unless this is the last in frame in a burst. @@ -477,7 +549,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, */ if (rt2x00queue_threshold(queue) || !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) - rt2x00dev->ops->lib->kick_tx_queue(queue); + queue->rt2x00dev->ops->lib->kick_queue(queue); } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, @@ -489,8 +561,11 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, struct skb_frame_desc *skbdesc; u8 rate_idx, rate_flags; - if (unlikely(rt2x00queue_full(queue))) + if (unlikely(rt2x00queue_full(queue))) { + ERROR(queue->rt2x00dev, + "Dropping frame due to full tx queue %d.\n", queue->qid); return -ENOBUFS; + } if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))) { @@ -533,23 +608,23 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, */ if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { - if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags)) + if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags)) rt2x00crypto_tx_copy_iv(skb, &txdesc); else rt2x00crypto_tx_remove_iv(skb, &txdesc); } /* - * When DMA allocation is required we should guarentee to the + * When DMA allocation is required we should guarantee to the * driver that the DMA is aligned to a 4-byte boundary. * However some drivers require L2 padding to pad the payload * rather then the header. This could be a requirement for * PCI and USB devices, while header alignment only is valid * for PCI devices. */ - if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags)) + if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags)) rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length); - else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) + else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags)) rt2x00queue_align_frame(entry->skb); /* @@ -565,20 +640,17 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, set_bit(ENTRY_DATA_PENDING, &entry->flags); - rt2x00queue_index_inc(queue, Q_INDEX); + rt2x00queue_index_inc(entry, Q_INDEX); rt2x00queue_write_tx_descriptor(entry, &txdesc); - rt2x00queue_kick_tx_queue(entry, &txdesc); + rt2x00queue_kick_tx_queue(queue, &txdesc); return 0; } -int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, - struct ieee80211_vif *vif, - const bool enable_beacon) +int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); - struct skb_frame_desc *skbdesc; - struct txentry_desc txdesc; if (unlikely(!intf->beacon)) return -ENOBUFS; @@ -590,17 +662,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, */ rt2x00queue_free_skb(intf->beacon); - if (!enable_beacon) { - rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue); - mutex_unlock(&intf->beacon_skb_mutex); - return 0; - } + /* + * Clear beacon (single bssid devices don't need to clear the beacon + * since the beacon queue will get stopped anyway). + */ + if (rt2x00dev->ops->lib->clear_beacon) + rt2x00dev->ops->lib->clear_beacon(intf->beacon); + + mutex_unlock(&intf->beacon_skb_mutex); + + return 0; +} + +int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + struct skb_frame_desc *skbdesc; + struct txentry_desc txdesc; + + if (unlikely(!intf->beacon)) + return -ENOBUFS; + + /* + * Clean up the beacon skb. + */ + rt2x00queue_free_skb(intf->beacon); intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); - if (!intf->beacon->skb) { - mutex_unlock(&intf->beacon_skb_mutex); + if (!intf->beacon->skb) return -ENOMEM; - } /* * Copy all TX descriptor information into txdesc, @@ -617,19 +708,33 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, skbdesc->entry = intf->beacon; /* - * Send beacon to hardware and enable beacon genaration.. + * Send beacon to hardware. */ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); + return 0; + +} + +int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + int ret; + + mutex_lock(&intf->beacon_skb_mutex); + ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); mutex_unlock(&intf->beacon_skb_mutex); - return 0; + return ret; } -void rt2x00queue_for_each_entry(struct data_queue *queue, +bool rt2x00queue_for_each_entry(struct data_queue *queue, enum queue_index start, enum queue_index end, - void (*fn)(struct queue_entry *entry)) + void *data, + bool (*fn)(struct queue_entry *entry, + void *data)) { unsigned long irqflags; unsigned int index_start; @@ -640,7 +745,7 @@ void rt2x00queue_for_each_entry(struct data_queue *queue, ERROR(queue->rt2x00dev, "Entry requested from invalid index range (%d - %d)\n", start, end); - return; + return true; } /* @@ -649,50 +754,35 @@ void rt2x00queue_for_each_entry(struct data_queue *queue, * it should not be kicked during this run, since it * is part of another TX operation. */ - spin_lock_irqsave(&queue->lock, irqflags); + spin_lock_irqsave(&queue->index_lock, irqflags); index_start = queue->index[start]; index_end = queue->index[end]; - spin_unlock_irqrestore(&queue->lock, irqflags); + spin_unlock_irqrestore(&queue->index_lock, irqflags); /* - * Start from the TX done pointer, this guarentees that we will + * Start from the TX done pointer, this guarantees that we will * send out all frames in the correct order. */ if (index_start < index_end) { - for (i = index_start; i < index_end; i++) - fn(&queue->entries[i]); + for (i = index_start; i < index_end; i++) { + if (fn(&queue->entries[i], data)) + return true; + } } else { - for (i = index_start; i < queue->limit; i++) - fn(&queue->entries[i]); + for (i = index_start; i < queue->limit; i++) { + if (fn(&queue->entries[i], data)) + return true; + } - for (i = 0; i < index_end; i++) - fn(&queue->entries[i]); + for (i = 0; i < index_end; i++) { + if (fn(&queue->entries[i], data)) + return true; + } } -} -EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); - -struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, - const enum data_queue_qid queue) -{ - int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); - if (queue == QID_RX) - return rt2x00dev->rx; - - if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) - return &rt2x00dev->tx[queue]; - - if (!rt2x00dev->bcn) - return NULL; - - if (queue == QID_BEACON) - return &rt2x00dev->bcn[0]; - else if (queue == QID_ATIM && atim) - return &rt2x00dev->bcn[1]; - - return NULL; + return false; } -EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); +EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, enum queue_index index) @@ -706,18 +796,19 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, return NULL; } - spin_lock_irqsave(&queue->lock, irqflags); + spin_lock_irqsave(&queue->index_lock, irqflags); entry = &queue->entries[queue->index[index]]; - spin_unlock_irqrestore(&queue->lock, irqflags); + spin_unlock_irqrestore(&queue->index_lock, irqflags); return entry; } EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); -void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) +void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) { + struct data_queue *queue = entry->queue; unsigned long irqflags; if (unlikely(index >= Q_INDEX_MAX)) { @@ -726,13 +817,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) return; } - spin_lock_irqsave(&queue->lock, irqflags); + spin_lock_irqsave(&queue->index_lock, irqflags); queue->index[index]++; if (queue->index[index] >= queue->limit) queue->index[index] = 0; - queue->last_action[index] = jiffies; + entry->last_action = jiffies; if (index == Q_INDEX) { queue->length++; @@ -741,33 +832,218 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index) queue->count++; } - spin_unlock_irqrestore(&queue->lock, irqflags); + spin_unlock_irqrestore(&queue->index_lock, irqflags); } -static void rt2x00queue_reset(struct data_queue *queue) +void rt2x00queue_pause_queue(struct data_queue *queue) { - unsigned long irqflags; - unsigned int i; + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || + !test_bit(QUEUE_STARTED, &queue->flags) || + test_and_set_bit(QUEUE_PAUSED, &queue->flags)) + return; - spin_lock_irqsave(&queue->lock, irqflags); + switch (queue->qid) { + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + /* + * For TX queues, we have to disable the queue + * inside mac80211. + */ + ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); + break; + default: + break; + } +} +EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); - queue->count = 0; - queue->length = 0; +void rt2x00queue_unpause_queue(struct data_queue *queue) +{ + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || + !test_bit(QUEUE_STARTED, &queue->flags) || + !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) + return; - for (i = 0; i < Q_INDEX_MAX; i++) { - queue->index[i] = 0; - queue->last_action[i] = jiffies; + switch (queue->qid) { + case QID_AC_VO: + case QID_AC_VI: + case QID_AC_BE: + case QID_AC_BK: + /* + * For TX queues, we have to enable the queue + * inside mac80211. + */ + ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); + break; + case QID_RX: + /* + * For RX we need to kick the queue now in order to + * receive frames. + */ + queue->rt2x00dev->ops->lib->kick_queue(queue); + default: + break; + } +} +EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); + +void rt2x00queue_start_queue(struct data_queue *queue) +{ + mutex_lock(&queue->status_lock); + + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || + test_and_set_bit(QUEUE_STARTED, &queue->flags)) { + mutex_unlock(&queue->status_lock); + return; } - spin_unlock_irqrestore(&queue->lock, irqflags); + set_bit(QUEUE_PAUSED, &queue->flags); + + queue->rt2x00dev->ops->lib->start_queue(queue); + + rt2x00queue_unpause_queue(queue); + + mutex_unlock(&queue->status_lock); } +EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); + +void rt2x00queue_stop_queue(struct data_queue *queue) +{ + mutex_lock(&queue->status_lock); + + if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { + mutex_unlock(&queue->status_lock); + return; + } + + rt2x00queue_pause_queue(queue); + + queue->rt2x00dev->ops->lib->stop_queue(queue); + + mutex_unlock(&queue->status_lock); +} +EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); + +void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) +{ + bool started; + bool tx_queue = + (queue->qid == QID_AC_VO) || + (queue->qid == QID_AC_VI) || + (queue->qid == QID_AC_BE) || + (queue->qid == QID_AC_BK); + + mutex_lock(&queue->status_lock); + + /* + * If the queue has been started, we must stop it temporarily + * to prevent any new frames to be queued on the device. If + * we are not dropping the pending frames, the queue must + * only be stopped in the software and not the hardware, + * otherwise the queue will never become empty on its own. + */ + started = test_bit(QUEUE_STARTED, &queue->flags); + if (started) { + /* + * Pause the queue + */ + rt2x00queue_pause_queue(queue); + + /* + * If we are not supposed to drop any pending + * frames, this means we must force a start (=kick) + * to the queue to make sure the hardware will + * start transmitting. + */ + if (!drop && tx_queue) + queue->rt2x00dev->ops->lib->kick_queue(queue); + } + + /* + * Check if driver supports flushing, if that is the case we can + * defer the flushing to the driver. Otherwise we must use the + * alternative which just waits for the queue to become empty. + */ + if (likely(queue->rt2x00dev->ops->lib->flush_queue)) + queue->rt2x00dev->ops->lib->flush_queue(queue, drop); + + /* + * The queue flush has failed... + */ + if (unlikely(!rt2x00queue_empty(queue))) + WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); + + /* + * Restore the queue to the previous status + */ + if (started) + rt2x00queue_unpause_queue(queue); + + mutex_unlock(&queue->status_lock); +} +EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); + +void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) +{ + struct data_queue *queue; + + /* + * rt2x00queue_start_queue will call ieee80211_wake_queue + * for each queue after is has been properly initialized. + */ + tx_queue_for_each(rt2x00dev, queue) + rt2x00queue_start_queue(queue); + + rt2x00queue_start_queue(rt2x00dev->rx); +} +EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; - txall_queue_for_each(rt2x00dev, queue) - rt2x00dev->ops->lib->kill_tx_queue(queue); + /* + * rt2x00queue_stop_queue will call ieee80211_stop_queue + * as well, but we are completely shutting doing everything + * now, so it is much safer to stop all TX queues at once, + * and use rt2x00queue_stop_queue for cleaning up. + */ + ieee80211_stop_queues(rt2x00dev->hw); + + tx_queue_for_each(rt2x00dev, queue) + rt2x00queue_stop_queue(queue); + + rt2x00queue_stop_queue(rt2x00dev->rx); +} +EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); + +void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) +{ + struct data_queue *queue; + + tx_queue_for_each(rt2x00dev, queue) + rt2x00queue_flush_queue(queue, drop); + + rt2x00queue_flush_queue(rt2x00dev->rx, drop); +} +EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); + +static void rt2x00queue_reset(struct data_queue *queue) +{ + unsigned long irqflags; + unsigned int i; + + spin_lock_irqsave(&queue->index_lock, irqflags); + + queue->count = 0; + queue->length = 0; + + for (i = 0; i < Q_INDEX_MAX; i++) + queue->index[i] = 0; + + spin_unlock_irqrestore(&queue->index_lock, irqflags); } void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) @@ -778,11 +1054,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) queue_for_each(rt2x00dev, queue) { rt2x00queue_reset(queue); - for (i = 0; i < queue->limit; i++) { + for (i = 0; i < queue->limit; i++) rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); - if (queue->qid == QID_RX) - rt2x00queue_index_inc(queue, Q_INDEX); - } } } @@ -809,8 +1082,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue, return -ENOMEM; #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ - ( ((char *)(__base)) + ((__limit) * (__esize)) + \ - ((__index) * (__psize)) ) + (((char *)(__base)) + ((__limit) * (__esize)) + \ + ((__index) * (__psize))) for (i = 0; i < queue->limit; i++) { entries[i].flags = 0; @@ -875,8 +1148,8 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) if (status) goto exit; - if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { - status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], + if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) { + status = rt2x00queue_alloc_entries(rt2x00dev->atim, rt2x00dev->ops->atim); if (status) goto exit; @@ -911,7 +1184,8 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, enum data_queue_qid qid) { - spin_lock_init(&queue->lock); + mutex_init(&queue->status_lock); + spin_lock_init(&queue->index_lock); queue->rt2x00dev = rt2x00dev; queue->qid = qid; @@ -926,7 +1200,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) struct data_queue *queue; enum data_queue_qid qid; unsigned int req_atim = - !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); + !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); /* * We need the following queues: @@ -949,11 +1223,12 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) rt2x00dev->rx = queue; rt2x00dev->tx = &queue[1]; rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; + rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; /* * Initialize queue parameters. * RX: qid = QID_RX - * TX: qid = QID_AC_BE + index + * TX: qid = QID_AC_VO + index * TX: cw_min: 2^5 = 32. * TX: cw_max: 2^10 = 1024. * BCN: qid = QID_BEACON @@ -961,13 +1236,13 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) */ rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); - qid = QID_AC_BE; + qid = QID_AC_VO; tx_queue_for_each(rt2x00dev, queue) rt2x00queue_init(rt2x00dev, queue, qid++); - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); + rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); if (req_atim) - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); + rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); return 0; } |