summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c272
1 files changed, 178 insertions, 94 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4358051bfe1..29edb9fbe6f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -60,7 +60,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
* at least 8 bytes bytes available in headroom for IV/EIV
* and 8 bytes for ICV data as tailroon.
*/
- if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
+ if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
head_size += 8;
tail_size += 8;
}
@@ -86,7 +86,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
- if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
skb->data,
skb->len,
@@ -148,19 +148,6 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
skb_trim(skb, frame_length);
}
-void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
-{
- unsigned int frame_length = skb->len;
- unsigned int align = ALIGN_SIZE(skb, header_length);
-
- if (!align)
- return;
-
- skb_push(skb, align);
- memmove(skb->data, skb->data + align, frame_length);
- skb_trim(skb, frame_length);
-}
-
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
unsigned int payload_length = skb->len - header_length;
@@ -213,20 +200,20 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
skb_pull(skb, l2pad);
}
-static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
- unsigned long irqflags;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
+ if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags))
return;
/*
@@ -240,23 +227,23 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
* sequence counting per-frame, since those will override the
* sequence counter given by mac80211.
*/
- spin_lock_irqsave(&intf->seqlock, irqflags);
+ spin_lock(&intf->seqlock);
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
intf->seqno += 0x10;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
- spin_unlock_irqrestore(&intf->seqlock, irqflags);
+ spin_unlock(&intf->seqlock);
}
-static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc,
const struct rt2x00_rate *hwrate)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
unsigned int data_length;
unsigned int duration;
@@ -273,8 +260,8 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
txdesc->u.plcp.ifs = IFS_SIFS;
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
- data_length = entry->skb->len + 4;
- data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
+ data_length = skb->len + 4;
+ data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);
/*
* PLCP setup
@@ -315,12 +302,92 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
}
}
-static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
+static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
+ struct txentry_desc *txdesc,
+ const struct rt2x00_rate *hwrate)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (tx_info->control.sta)
+ txdesc->u.ht.mpdu_density =
+ tx_info->control.sta->ht_cap.ampdu_density;
+
+ txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
+
+ /*
+ * Only one STBC stream is supported for now.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_STBC)
+ txdesc->u.ht.stbc = 1;
+
+ /*
+ * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
+ * mcs rate to be used
+ */
+ if (txrate->flags & IEEE80211_TX_RC_MCS) {
+ txdesc->u.ht.mcs = txrate->idx;
+
+ /*
+ * MIMO PS should be set to 1 for STA's using dynamic SM PS
+ * when using more then one tx stream (>MCS7).
+ */
+ if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
+ ((tx_info->control.sta->ht_cap.cap &
+ IEEE80211_HT_CAP_SM_PS) >>
+ IEEE80211_HT_CAP_SM_PS_SHIFT) ==
+ WLAN_HT_CAP_SM_PS_DYNAMIC)
+ __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
+ } else {
+ txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
+ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ txdesc->u.ht.mcs |= 0x08;
+ }
+
+ /*
+ * This frame is eligible for an AMPDU, however, don't aggregate
+ * frames that are intended to probe a specific tx rate.
+ */
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
+ !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+ __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+
+ /*
+ * Set 40Mhz mode if necessary (for legacy rates this will
+ * duplicate the frame to both channels).
+ */
+ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH ||
+ txrate->flags & IEEE80211_TX_RC_DUP_DATA)
+ __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
+ if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
+ __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
+
+ /*
+ * Determine IFS values
+ * - Use TXOP_BACKOFF for management frames except beacons
+ * - Use TXOP_SIFS for fragment bursts
+ * - Use TXOP_HTTXOP for everything else
+ *
+ * Note: rt2800 devices won't use CTS protection (if used)
+ * for frames not transmitted with TXOP_HTTXOP
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_beacon(hdr->frame_control))
+ txdesc->u.ht.txop = TXOP_BACKOFF;
+ else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ txdesc->u.ht.txop = TXOP_SIFS;
+ else
+ txdesc->u.ht.txop = TXOP_HTTXOP;
+}
+
+static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
+ struct sk_buff *skb,
struct txentry_desc *txdesc)
{
- struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_rate *rate;
const struct rt2x00_rate *hwrate = NULL;
@@ -330,8 +397,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Header and frame information.
*/
- txdesc->length = entry->skb->len;
- txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
+ txdesc->length = skb->len;
+ txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb);
/*
* Check whether this frame is to be acked.
@@ -406,13 +473,15 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Apply TX descriptor handling by components
*/
- rt2x00crypto_create_tx_descriptor(entry, txdesc);
- rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
+ rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
+ rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
- if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
- rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
+ if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
+ rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
+ hwrate);
else
- rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
+ rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc,
+ hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@@ -449,7 +518,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
/*
* Map the skb to DMA.
*/
- if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags))
rt2x00queue_map_txskb(entry);
return 0;
@@ -490,30 +559,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local)
{
struct ieee80211_tx_info *tx_info;
- struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ struct queue_entry *entry;
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
u8 rate_idx, rate_flags;
-
- if (unlikely(rt2x00queue_full(queue)))
- return -ENOBUFS;
-
- if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
- &entry->flags))) {
- ERROR(queue->rt2x00dev,
- "Arrived at non-free entry in the non-full queue %d.\n"
- "Please file bug report to %s.\n",
- queue->qid, DRV_PROJECT);
- return -EINVAL;
- }
+ int ret = 0;
/*
* Copy all TX descriptor information into txdesc,
* after that we are free to use the skb->cb array
* for our information.
*/
- entry->skb = skb;
- rt2x00queue_create_tx_descriptor(entry, &txdesc);
+ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc);
/*
* All information is retrieved from the skb->cb array,
@@ -525,7 +582,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
rate_flags = tx_info->control.rates[0].flags;
skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
- skbdesc->entry = entry;
skbdesc->tx_rate_idx = rate_idx;
skbdesc->tx_rate_flags = rate_flags;
@@ -539,7 +595,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
*/
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
- if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
+ if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
rt2x00crypto_tx_copy_iv(skb, &txdesc);
else
rt2x00crypto_tx_remove_iv(skb, &txdesc);
@@ -553,10 +609,34 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
* PCI and USB devices, while header alignment only is valid
* for PCI devices.
*/
- if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
- rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
- else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
- rt2x00queue_align_frame(entry->skb);
+ if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_insert_l2pad(skb, txdesc.header_length);
+ else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
+ rt2x00queue_align_frame(skb);
+
+ spin_lock(&queue->tx_lock);
+
+ if (unlikely(rt2x00queue_full(queue))) {
+ ERROR(queue->rt2x00dev,
+ "Dropping frame due to full tx queue %d.\n", queue->qid);
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+
+ if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA,
+ &entry->flags))) {
+ ERROR(queue->rt2x00dev,
+ "Arrived at non-free entry in the non-full queue %d.\n"
+ "Please file bug report to %s.\n",
+ queue->qid, DRV_PROJECT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skbdesc->entry = entry;
+ entry->skb = skb;
/*
* It could be possible that the queue was corrupted and this
@@ -566,16 +646,19 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
entry->skb = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out;
}
set_bit(ENTRY_DATA_PENDING, &entry->flags);
- rt2x00queue_index_inc(queue, Q_INDEX);
+ rt2x00queue_index_inc(entry, Q_INDEX);
rt2x00queue_write_tx_descriptor(entry, &txdesc);
rt2x00queue_kick_tx_queue(queue, &txdesc);
- return 0;
+out:
+ spin_unlock(&queue->tx_lock);
+ return ret;
}
int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
@@ -629,7 +712,7 @@ int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
* after that we are free to use the skb->cb array
* for our information.
*/
- rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
+ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc);
/*
* Fill in skb descriptor
@@ -660,10 +743,12 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
return ret;
}
-void rt2x00queue_for_each_entry(struct data_queue *queue,
+bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void (*fn)(struct queue_entry *entry))
+ void *data,
+ bool (*fn)(struct queue_entry *entry,
+ void *data))
{
unsigned long irqflags;
unsigned int index_start;
@@ -674,7 +759,7 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
ERROR(queue->rt2x00dev,
"Entry requested from invalid index range (%d - %d)\n",
start, end);
- return;
+ return true;
}
/*
@@ -693,15 +778,23 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
* send out all frames in the correct order.
*/
if (index_start < index_end) {
- for (i = index_start; i < index_end; i++)
- fn(&queue->entries[i]);
+ for (i = index_start; i < index_end; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
} else {
- for (i = index_start; i < queue->limit; i++)
- fn(&queue->entries[i]);
+ for (i = index_start; i < queue->limit; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
- for (i = 0; i < index_end; i++)
- fn(&queue->entries[i]);
+ for (i = 0; i < index_end; i++) {
+ if (fn(&queue->entries[i], data))
+ return true;
+ }
}
+
+ return false;
}
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
@@ -727,8 +820,9 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
-void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
+void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
{
+ struct data_queue *queue = entry->queue;
unsigned long irqflags;
if (unlikely(index >= Q_INDEX_MAX)) {
@@ -743,7 +837,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
if (queue->index[index] >= queue->limit)
queue->index[index] = 0;
- queue->last_action[index] = jiffies;
+ entry->last_action = jiffies;
if (index == Q_INDEX) {
queue->length++;
@@ -848,7 +942,6 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
{
- unsigned int i;
bool started;
bool tx_queue =
(queue->qid == QID_AC_VO) ||
@@ -883,20 +976,12 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
}
/*
- * Check if driver supports flushing, we can only guarantee
- * full support for flushing if the driver is able
- * to cancel all pending frames (drop = true).
+ * Check if driver supports flushing, if that is the case we can
+ * defer the flushing to the driver. Otherwise we must use the
+ * alternative which just waits for the queue to become empty.
*/
- if (drop && queue->rt2x00dev->ops->lib->flush_queue)
- queue->rt2x00dev->ops->lib->flush_queue(queue);
-
- /*
- * When we don't want to drop any frames, or when
- * the driver doesn't fully flush the queue correcly,
- * we must wait for the queue to become empty.
- */
- for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
- msleep(10);
+ if (likely(queue->rt2x00dev->ops->lib->flush_queue))
+ queue->rt2x00dev->ops->lib->flush_queue(queue, drop);
/*
* The queue flush has failed...
@@ -969,10 +1054,8 @@ static void rt2x00queue_reset(struct data_queue *queue)
queue->count = 0;
queue->length = 0;
- for (i = 0; i < Q_INDEX_MAX; i++) {
+ for (i = 0; i < Q_INDEX_MAX; i++)
queue->index[i] = 0;
- queue->last_action[i] = jiffies;
- }
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
@@ -1079,7 +1162,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
if (status)
goto exit;
- if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
+ if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
status = rt2x00queue_alloc_entries(rt2x00dev->atim,
rt2x00dev->ops->atim);
if (status)
@@ -1116,6 +1199,7 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
struct data_queue *queue, enum data_queue_qid qid)
{
mutex_init(&queue->status_lock);
+ spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->index_lock);
queue->rt2x00dev = rt2x00dev;
@@ -1131,7 +1215,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
enum data_queue_qid qid;
unsigned int req_atim =
- !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
+ !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
/*
* We need the following queues: