diff options
Diffstat (limited to 'drivers/net/wireless/mwifiex/wmm.c')
-rw-r--r-- | drivers/net/wireless/mwifiex/wmm.c | 208 |
1 files changed, 74 insertions, 134 deletions
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 32adc878041..4be3d33ceae 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -191,9 +191,6 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra) } list_add_tail(&ra_list->list, &priv->wmm.tid_tbl_ptr[i].ra_list); - - if (!priv->wmm.tid_tbl_ptr[i].ra_list_curr) - priv->wmm.tid_tbl_ptr[i].ra_list_curr = ra_list; } } @@ -424,7 +421,6 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i]; priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i]; priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i]; - priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL; } priv->aggr_prio_tbl[6].amsdu @@ -436,10 +432,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) = priv->aggr_prio_tbl[7].ampdu_user = BA_STREAM_NOT_ALLOWED; - priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT; - priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE; - priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE; - + mwifiex_set_ba_params(priv); mwifiex_reset_11n_rx_seq_num(priv); atomic_set(&priv->wmm.tx_pkts_queued, 0); @@ -533,8 +526,6 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) } INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list); - - priv->wmm.tid_tbl_ptr[i].ra_list_curr = NULL; } } @@ -688,13 +679,13 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, ra_list->total_pkts_size += skb->len; ra_list->pkt_count++; - atomic_inc(&priv->wmm.tx_pkts_queued); - if (atomic_read(&priv->wmm.highest_queued_prio) < tos_to_tid_inv[tid_down]) atomic_set(&priv->wmm.highest_queued_prio, tos_to_tid_inv[tid_down]); + atomic_inc(&priv->wmm.tx_pkts_queued); + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } @@ -886,128 +877,65 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, struct mwifiex_private **priv, int *tid) { struct mwifiex_private *priv_tmp; - struct mwifiex_ra_list_tbl *ptr, *head; - struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head; + struct mwifiex_ra_list_tbl *ptr; struct mwifiex_tid_tbl *tid_ptr; atomic_t *hqp; - int is_list_empty; - unsigned long flags; + unsigned long flags_bss, flags_ra; int i, j; + /* check the BSS with highest priority first */ for (j = adapter->priv_num - 1; j >= 0; --j) { spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock, - flags); - is_list_empty = list_empty(&adapter->bss_prio_tbl[j] - .bss_prio_head); - spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, - flags); - if (is_list_empty) - continue; + flags_bss); - if (adapter->bss_prio_tbl[j].bss_prio_cur == - (struct mwifiex_bss_prio_node *) - &adapter->bss_prio_tbl[j].bss_prio_head) { - adapter->bss_prio_tbl[j].bss_prio_cur = - list_first_entry(&adapter->bss_prio_tbl[j] - .bss_prio_head, - struct mwifiex_bss_prio_node, - list); - } + /* iterate over BSS with the equal priority */ + list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur, + &adapter->bss_prio_tbl[j].bss_prio_head, + list) { - bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur; - bssprio_head = bssprio_node; + priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - do { - priv_tmp = bssprio_node->priv; - hqp = &priv_tmp->wmm.highest_queued_prio; + if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0) + continue; + /* iterate over the WMM queues of the BSS */ + hqp = &priv_tmp->wmm.highest_queued_prio; for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { + spin_lock_irqsave(&priv_tmp->wmm. + ra_list_spinlock, flags_ra); + tid_ptr = &(priv_tmp)->wmm. tid_tbl_ptr[tos_to_tid[i]]; - /* For non-STA ra_list_curr may be NULL */ - if (!tid_ptr->ra_list_curr) - continue; - - spin_lock_irqsave(&tid_ptr->tid_tbl_lock, - flags); - is_list_empty = - list_empty(&adapter->bss_prio_tbl[j] - .bss_prio_head); - spin_unlock_irqrestore(&tid_ptr->tid_tbl_lock, - flags); - if (is_list_empty) - continue; - - /* - * Always choose the next ra we transmitted - * last time, this way we pick the ra's in - * round robin fashion. - */ - ptr = list_first_entry( - &tid_ptr->ra_list_curr->list, - struct mwifiex_ra_list_tbl, - list); - - head = ptr; - if (ptr == (struct mwifiex_ra_list_tbl *) - &tid_ptr->ra_list) { - /* Get next ra */ - ptr = list_first_entry(&ptr->list, - struct mwifiex_ra_list_tbl, list); - head = ptr; - } - - do { - is_list_empty = - skb_queue_empty(&ptr->skb_head); + /* iterate over receiver addresses */ + list_for_each_entry(ptr, &tid_ptr->ra_list, + list) { - if (!is_list_empty) + if (!skb_queue_empty(&ptr->skb_head)) + /* holds both locks */ goto found; + } - /* Get next ra */ - ptr = list_first_entry(&ptr->list, - struct mwifiex_ra_list_tbl, - list); - if (ptr == - (struct mwifiex_ra_list_tbl *) - &tid_ptr->ra_list) - ptr = list_first_entry( - &ptr->list, - struct mwifiex_ra_list_tbl, - list); - } while (ptr != head); + spin_unlock_irqrestore(&priv_tmp->wmm. + ra_list_spinlock, + flags_ra); } + } - /* No packet at any TID for this priv. Mark as such - * to skip checking TIDs for this priv (until pkt is - * added). - */ - atomic_set(hqp, NO_PKT_PRIO_TID); - - /* Get next bss priority node */ - bssprio_node = list_first_entry(&bssprio_node->list, - struct mwifiex_bss_prio_node, - list); - - if (bssprio_node == - (struct mwifiex_bss_prio_node *) - &adapter->bss_prio_tbl[j].bss_prio_head) - /* Get next bss priority node */ - bssprio_node = list_first_entry( - &bssprio_node->list, - struct mwifiex_bss_prio_node, - list); - } while (bssprio_node != bssprio_head); + spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, + flags_bss); } + return NULL; found: - spin_lock_irqsave(&priv_tmp->wmm.ra_list_spinlock, flags); + /* holds bss_prio_lock / ra_list_spinlock */ if (atomic_read(hqp) > i) atomic_set(hqp, i); - spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags); + spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); + spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, + flags_bss); *priv = priv_tmp; *tid = tos_to_tid[i]; @@ -1015,6 +943,42 @@ found: return ptr; } +/* This functions rotates ra and bss lists so packets are picked round robin. + * + * After a packet is successfully transmitted, rotate the ra list, so the ra + * next to the one transmitted, will come first in the list. This way we pick + * the ra' in a round robin fashion. Same applies to bss nodes of equal + * priority. + * + * Function also increments wmm.packets_out counter. + */ +void mwifiex_rotate_priolists(struct mwifiex_private *priv, + struct mwifiex_ra_list_tbl *ra, + int tid) +{ + struct mwifiex_adapter *adapter = priv->adapter; + struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; + struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid]; + unsigned long flags; + + spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); + /* + * dirty trick: we remove 'head' temporarily and reinsert it after + * curr bss node. imagine list to stay fixed while head is moved + */ + list_move(&tbl[priv->bss_priority].bss_prio_head, + &tbl[priv->bss_priority].bss_prio_cur->list); + spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); + + spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); + if (mwifiex_is_ralist_valid(priv, ra, tid)) { + priv->wmm.packets_out[tid]++; + /* same as above */ + list_move(&tid_ptr->ra_list, &ra->list); + } + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); +} + /* * This function checks if 11n aggregation is possible. */ @@ -1101,20 +1065,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); } else { - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); - if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { - priv->wmm.packets_out[ptr_index]++; - priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr; - } - adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = - list_first_entry( - &adapter->bss_prio_tbl[priv->bss_priority] - .bss_prio_cur->list, - struct mwifiex_bss_prio_node, - list); + mwifiex_rotate_priolists(priv, ptr, ptr_index); atomic_dec(&priv->wmm.tx_pkts_queued); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); } } @@ -1218,20 +1170,8 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, break; } if (ret != -EBUSY) { - spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); - if (mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { - priv->wmm.packets_out[ptr_index]++; - priv->wmm.tid_tbl_ptr[ptr_index].ra_list_curr = ptr; - } - adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur = - list_first_entry( - &adapter->bss_prio_tbl[priv->bss_priority] - .bss_prio_cur->list, - struct mwifiex_bss_prio_node, - list); + mwifiex_rotate_priolists(priv, ptr, ptr_index); atomic_dec(&priv->wmm.tx_pkts_queued); - spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, - ra_list_flags); } } |