summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c105
1 files changed, 56 insertions, 49 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 4afae144658..342de780a36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -98,9 +98,9 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
/**
* iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
-void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
int write_ptr = txq->q.write_ptr;
@@ -112,21 +112,19 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
- if (txq_id != priv->cmd_queue) {
- sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
- sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += WEP_IV_LEN + WEP_ICV_LEN;
- break;
- }
+ sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
+ sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += WEP_IV_LEN + WEP_ICV_LEN;
+ break;
}
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
@@ -138,8 +136,8 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
-void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
+static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
+ struct iwl_tx_queue *txq)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
int txq_id = txq->q.id;
@@ -539,7 +537,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int txq_id;
- dma_addr_t phys_addr;
+ dma_addr_t phys_addr = 0;
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
@@ -566,7 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
+ goto drop_unlock_priv;
}
fc = hdr->frame_control;
@@ -591,7 +589,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
- goto drop_unlock;
+ goto drop_unlock_priv;
}
}
@@ -635,10 +633,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (ieee80211_is_data_qos(fc)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
+
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
+ goto drop_unlock_sta;
+
seq_number = priv->stations[sta_id].tid[tid].seq_number;
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl = hdr->seq_ctrl &
@@ -656,18 +654,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq = &priv->txq[txq_id];
q = &txq->q;
- if (unlikely(iwl_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
+ if (unlikely(iwl_queue_space(q) < q->high_mark))
+ goto drop_unlock_sta;
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@@ -731,12 +719,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txcmd_phys = pci_map_single(priv->pci_dev,
&out_cmd->hdr, firstlen,
PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys)))
+ goto drop_unlock_sta;
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
@@ -751,10 +737,30 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (secondlen > 0) {
phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
secondlen, PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
+ pci_unmap_single(priv->pci_dev,
+ dma_unmap_addr(out_meta, mapping),
+ dma_unmap_len(out_meta, len),
+ PCI_DMA_BIDIRECTIONAL);
+ goto drop_unlock_sta;
+ }
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ priv->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ priv->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&priv->sta_lock);
+
+ /* Attach buffers to TFD */
+ priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
+ txcmd_phys, firstlen, 1, 0);
+ if (secondlen > 0)
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, secondlen,
0, 0);
- }
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -773,8 +779,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up entry for this TFD in Tx byte-count array */
if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
+ iwlagn_txq_update_byte_cnt_tbl(priv, txq,
+ le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
firstlen, PCI_DMA_BIDIRECTIONAL);
@@ -820,7 +826,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
return 0;
-drop_unlock:
+drop_unlock_sta:
+ spin_unlock(&priv->sta_lock);
+drop_unlock_priv:
spin_unlock_irqrestore(&priv->lock, flags);
return -1;
}
@@ -1253,8 +1261,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
tx_info->skb = NULL;
- if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
- priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
+ iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
}