diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 21:04:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 21:04:47 -0700 |
commit | 3b59bf081622b6446db77ad06c93fe23677bc533 (patch) | |
tree | 3f4bb5a27c90cc86994a1f6d3c53fbf9208003cb /drivers/net/wireless/ath/ath6kl/htc.c | |
parent | e45836fafe157df137a837093037f741ad8f4c90 (diff) | |
parent | bbdb32cb5b73597386913d052165423b9d736145 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking merge from David Miller:
"1) Move ixgbe driver over to purely page based buffering on receive.
From Alexander Duyck.
2) Add receive packet steering support to e1000e, from Bruce Allan.
3) Convert TCP MD5 support over to RCU, from Eric Dumazet.
4) Reduce cpu usage in handling out-of-order TCP packets on modern
systems, also from Eric Dumazet.
5) Support the IP{,V6}_UNICAST_IF socket options, making the wine
folks happy, from Erich Hoover.
6) Support VLAN trunking from guests in hyperv driver, from Haiyang
Zhang.
7) Support byte-queue-limtis in r8169, from Igor Maravic.
8) Outline code intended for IP_RECVTOS in IP_PKTOPTIONS existed but
was never properly implemented, Jiri Benc fixed that.
9) 64-bit statistics support in r8169 and 8139too, from Junchang Wang.
10) Support kernel side dump filtering by ctmark in netfilter
ctnetlink, from Pablo Neira Ayuso.
11) Support byte-queue-limits in gianfar driver, from Paul Gortmaker.
12) Add new peek socket options to assist with socket migration, from
Pavel Emelyanov.
13) Add sch_plug packet scheduler whose queue is controlled by
userland daemons using explicit freeze and release commands. From
Shriram Rajagopalan.
14) Fix FCOE checksum offload handling on transmit, from Yi Zou."
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1846 commits)
Fix pppol2tp getsockname()
Remove printk from rds_sendmsg
ipv6: fix incorrent ipv6 ipsec packet fragment
cpsw: Hook up default ndo_change_mtu.
net: qmi_wwan: fix build error due to cdc-wdm dependecy
netdev: driver: ethernet: Add TI CPSW driver
netdev: driver: ethernet: add cpsw address lookup engine support
phy: add am79c874 PHY support
mlx4_core: fix race on comm channel
bonding: send igmp report for its master
fs_enet: Add MPC5125 FEC support and PHY interface selection
net: bpf_jit: fix BPF_S_LDX_B_MSH compilation
net: update the usage of CHECKSUM_UNNECESSARY
fcoe: use CHECKSUM_UNNECESSARY instead of CHECKSUM_PARTIAL on tx
net: do not do gso for CHECKSUM_UNNECESSARY in netif_needs_gso
ixgbe: Fix issues with SR-IOV loopback when flow control is disabled
net/hyperv: Fix the code handling tx busy
ixgbe: fix namespace issues when FCoE/DCB is not enabled
rtlwifi: Remove unused ETH_ADDR_LEN defines
igbvf: Use ETH_ALEN
...
Fix up fairly trivial conflicts in drivers/isdn/gigaset/interface.c and
drivers/net/usb/{Kconfig,qmi_wwan.c} as per David.
Diffstat (limited to 'drivers/net/wireless/ath/ath6kl/htc.c')
-rw-r--r-- | drivers/net/wireless/ath/ath6kl/htc.c | 213 |
1 files changed, 155 insertions, 58 deletions
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c index f3b63ca25c7..4849d99cce7 100644 --- a/drivers/net/wireless/ath/ath6kl/htc.c +++ b/drivers/net/wireless/ath/ath6kl/htc.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +23,9 @@ #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) +/* threshold to re-enable Tx bundling for an AC*/ +#define TX_RESUME_BUNDLE_THRESHOLD 1500 + /* Functions for Tx credit handling */ static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist, @@ -168,31 +172,29 @@ static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, struct list_head *epdist_list) { - struct htc_endpoint_credit_dist *cur_dist_list; + struct htc_endpoint_credit_dist *cur_list; - list_for_each_entry(cur_dist_list, epdist_list, list) { - if (cur_dist_list->endpoint == ENDPOINT_0) + list_for_each_entry(cur_list, epdist_list, list) { + if (cur_list->endpoint == ENDPOINT_0) continue; - if (cur_dist_list->cred_to_dist > 0) { - cur_dist_list->credits += - cur_dist_list->cred_to_dist; - cur_dist_list->cred_to_dist = 0; - if (cur_dist_list->credits > - cur_dist_list->cred_assngd) + if (cur_list->cred_to_dist > 0) { + cur_list->credits += cur_list->cred_to_dist; + cur_list->cred_to_dist = 0; + + if (cur_list->credits > cur_list->cred_assngd) ath6kl_credit_reduce(cred_info, - cur_dist_list, - cur_dist_list->cred_assngd); + cur_list, + cur_list->cred_assngd); - if (cur_dist_list->credits > - cur_dist_list->cred_norm) - ath6kl_credit_reduce(cred_info, cur_dist_list, - cur_dist_list->cred_norm); + if (cur_list->credits > cur_list->cred_norm) + ath6kl_credit_reduce(cred_info, cur_list, + cur_list->cred_norm); - if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) { - if (cur_dist_list->txq_depth == 0) + if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { + if (cur_list->txq_depth == 0) ath6kl_credit_reduce(cred_info, - cur_dist_list, 0); + cur_list, 0); } } } @@ -460,8 +462,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target, INIT_LIST_HEAD(&tx_compq); ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx scat complete len %d entries %d\n", - scat_req->len, scat_req->scat_entries); + "htc tx scat complete len %d entries %d\n", + scat_req->len, scat_req->scat_entries); if (scat_req->status) ath6kl_err("send scatter req failed: %d\n", scat_req->status); @@ -599,8 +601,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target, list); ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx got packet 0x%p queue depth %d\n", - packet, get_queue_depth(&endpoint->txq)); + "htc tx got packet 0x%p queue depth %d\n", + packet, get_queue_depth(&endpoint->txq)); len = CALC_TXRX_PADDED_LEN(target, packet->act_len + HTC_HDR_LENGTH); @@ -670,6 +672,7 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, struct htc_packet *packet; int i, len, rem_scat, cred_pad; int status = 0; + u8 flags; rem_scat = target->max_tx_bndl_sz; @@ -696,9 +699,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, scat_req->scat_list[i].packet = packet; /* prepare packet and flag message as part of a send bundle */ - ath6kl_htc_tx_prep_pkt(packet, - packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE, - cred_pad, packet->info.tx.seqno); + flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE; + ath6kl_htc_tx_prep_pkt(packet, flags, + cred_pad, packet->info.tx.seqno); /* Make sure the buffer is 4-byte aligned */ ath6kl_htc_tx_buf_align(&packet->buf, packet->act_len + HTC_HDR_LENGTH); @@ -744,6 +747,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, struct hif_scatter_req *scat_req = NULL; int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; int status; + u32 txb_mask; + u8 ac = WMM_NUM_AC; + + if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) || + (WMI_CONTROL_SVC != endpoint->svc_id)) + ac = target->dev->ar->ep2ac_map[endpoint->eid]; while (true) { status = 0; @@ -759,10 +768,35 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, if (!scat_req) { /* no scatter resources */ ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx no more scatter resources\n"); + "htc tx no more scatter resources\n"); break; } + if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { + if (WMM_AC_BE == ac) + /* + * BE, BK have priorities and bit + * positions reversed + */ + txb_mask = (1 << WMM_AC_BK); + else + /* + * any AC with priority lower than + * itself + */ + txb_mask = ((1 << ac) - 1); + /* + * when the scatter request resources drop below a + * certain threshold, disable Tx bundling for all + * AC's with priority lower than the current requesting + * AC. Otherwise re-enable Tx bundling for them + */ + if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS) + target->tx_bndl_mask &= ~txb_mask; + else + target->tx_bndl_mask |= txb_mask; + } + ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", n_scat); @@ -806,6 +840,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, struct htc_packet *packet; int bundle_sent; int n_pkts_bundle; + u8 ac = WMM_NUM_AC; spin_lock_bh(&target->tx_lock); @@ -823,6 +858,10 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, */ INIT_LIST_HEAD(&txq); + if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) || + (WMI_CONTROL_SVC != endpoint->svc_id)) + ac = target->dev->ar->ep2ac_map[endpoint->eid]; + while (true) { if (list_empty(&endpoint->txq)) @@ -840,15 +879,18 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, while (true) { /* try to send a bundle on each pass */ - if ((target->tx_bndl_enable) && + if ((target->tx_bndl_mask) && (get_queue_depth(&txq) >= HTC_MIN_HTC_MSGS_TO_BUNDLE)) { int temp1 = 0, temp2 = 0; - ath6kl_htc_tx_bundle(endpoint, &txq, - &temp1, &temp2); - bundle_sent += temp1; - n_pkts_bundle += temp2; + /* check if bundling is enabled for an AC */ + if (target->tx_bndl_mask & (1 << ac)) { + ath6kl_htc_tx_bundle(endpoint, &txq, + &temp1, &temp2); + bundle_sent += temp1; + n_pkts_bundle += temp2; + } } if (list_empty(&txq)) @@ -867,6 +909,26 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target, endpoint->ep_st.tx_bundles += bundle_sent; endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; + + /* + * if an AC has bundling disabled and no tx bundling + * has occured continously for a certain number of TX, + * enable tx bundling for this AC + */ + if (!bundle_sent) { + if (!(target->tx_bndl_mask & (1 << ac)) && + (ac < WMM_NUM_AC)) { + if (++target->ac_tx_count[ac] >= + TX_RESUME_BUNDLE_THRESHOLD) { + target->ac_tx_count[ac] = 0; + target->tx_bndl_mask |= (1 << ac); + } + } + } else { + /* tx bundling will reset the counter */ + if (ac < WMM_NUM_AC) + target->ac_tx_count[ac] = 0; + } } endpoint->tx_proc_cnt = 0; @@ -979,8 +1041,8 @@ static int htc_setup_tx_complete(struct htc_target *target) memcpy(&setup_comp_ext->flags, &flags, sizeof(setup_comp_ext->flags)); set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, - sizeof(struct htc_setup_comp_ext_msg), - ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + sizeof(struct htc_setup_comp_ext_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } else { struct htc_setup_comp_msg *setup_comp; @@ -988,8 +1050,8 @@ static int htc_setup_tx_complete(struct htc_target *target) memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, - sizeof(struct htc_setup_comp_msg), - ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); + sizeof(struct htc_setup_comp_msg), + ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); } /* we want synchronous operation */ @@ -1088,9 +1150,9 @@ void ath6kl_htc_flush_txep(struct htc_target *target, packet->status = -ECANCELED; list_del(&packet->list); ath6kl_dbg(ATH6KL_DBG_HTC, - "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", - packet, packet->act_len, - packet->endpoint, packet->info.tx.tag); + "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", + packet, packet->act_len, + packet->endpoint, packet->info.tx.tag); INIT_LIST_HEAD(&container); list_add_tail(&packet->list, &container); @@ -1490,7 +1552,7 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) if (packets->act_len > 0) { ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", - packets->act_len + HTC_HDR_LENGTH); + packets->act_len + HTC_HDR_LENGTH); ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx unexpected endpoint 0 message", "", @@ -1609,8 +1671,8 @@ static int htc_parse_trailer(struct htc_target *target, } lk_ahd = (struct htc_lookahead_report *) record_buf; - if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) - && next_lk_ahds) { + if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && + next_lk_ahds) { ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", @@ -2038,13 +2100,13 @@ fail_rx: list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { list_del(&packet->list); htc_reclaim_rxbuf(target, packet, - &target->endpoint[packet->endpoint]); + &target->endpoint[packet->endpoint]); } list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { list_del(&packet->list); htc_reclaim_rxbuf(target, packet, - &target->endpoint[packet->endpoint]); + &target->endpoint[packet->endpoint]); } return status; @@ -2062,6 +2124,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, enum htc_endpoint_id id; int n_fetched = 0; + INIT_LIST_HEAD(&comp_pktq); *num_pkts = 0; /* @@ -2175,11 +2238,11 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) u32 look_ahead; if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, - HTC_TARGET_RESPONSE_TIMEOUT)) + HTC_TARGET_RESPONSE_TIMEOUT)) return NULL; ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); + "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); htc_hdr = (struct htc_frame_hdr *)&look_ahead; @@ -2244,7 +2307,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, depth = get_queue_depth(pkt_queue); ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx add multiple ep id %d cnt %d len %d\n", + "htc rx add multiple ep id %d cnt %d len %d\n", first_pkt->endpoint, depth, first_pkt->buf_len); endpoint = &target->endpoint[first_pkt->endpoint]; @@ -2270,8 +2333,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { if (target->ep_waiting == first_pkt->endpoint) { ath6kl_dbg(ATH6KL_DBG_HTC, - "htc rx blocked on ep %d, unblocking\n", - target->ep_waiting); + "htc rx blocked on ep %d, unblocking\n", + target->ep_waiting); target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; target->ep_waiting = ENDPOINT_MAX; rx_unblock = true; @@ -2308,7 +2371,21 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target) "htc rx flush pkt 0x%p len %d ep %d\n", packet, packet->buf_len, packet->endpoint); - dev_kfree_skb(packet->pkt_cntxt); + /* + * packets in rx_bufq of endpoint 0 have originally + * been queued from target->free_ctrl_rxbuf where + * packet and packet->buf_start are allocated + * separately using kmalloc(). For other endpoint + * rx_bufq, it is allocated as skb where packet is + * skb->head. Take care of this difference while freeing + * the memory. + */ + if (packet->endpoint == ENDPOINT_0) { + kfree(packet->buf_start); + kfree(packet); + } else { + dev_kfree_skb(packet->pkt_cntxt); + } spin_lock_bh(&target->rx_lock); } spin_unlock_bh(&target->rx_lock); @@ -2327,6 +2404,7 @@ int ath6kl_htc_conn_service(struct htc_target *target, enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; unsigned int max_msg_sz = 0; int status = 0; + u16 msg_id; ath6kl_dbg(ATH6KL_DBG_HTC, "htc connect service target 0x%p service id 0x%x\n", @@ -2370,9 +2448,10 @@ int ath6kl_htc_conn_service(struct htc_target *target, } resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; + msg_id = le16_to_cpu(resp_msg->msg_id); - if ((le16_to_cpu(resp_msg->msg_id) != HTC_MSG_CONN_SVC_RESP_ID) - || (rx_pkt->act_len < sizeof(*resp_msg))) { + if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) || + (rx_pkt->act_len < sizeof(*resp_msg))) { status = -ENOMEM; goto fail_tx; } @@ -2419,6 +2498,15 @@ int ath6kl_htc_conn_service(struct htc_target *target, endpoint->cred_dist.endpoint = assigned_ep; endpoint->cred_dist.cred_sz = target->tgt_cred_sz; + switch (endpoint->svc_id) { + case WMI_DATA_BK_SVC: + endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3; + break; + default: + endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; + break; + } + if (conn_req->max_rxmsg_sz) { /* * Override cred_per_msg calculation, this optimizes @@ -2516,7 +2604,8 @@ static void htc_setup_msg_bndl(struct htc_target *target) target->max_rx_bndl_sz, target->max_tx_bndl_sz); if (target->max_tx_bndl_sz) - target->tx_bndl_enable = true; + /* tx_bndl_mask is enabled per AC, each has 1 bit */ + target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; if (target->max_rx_bndl_sz) target->rx_bndl_enable = true; @@ -2531,7 +2620,7 @@ static void htc_setup_msg_bndl(struct htc_target *target) * padding will spill into the next credit buffer * which is fatal. */ - target->tx_bndl_enable = false; + target->tx_bndl_mask = 0; } } @@ -2543,6 +2632,12 @@ int ath6kl_htc_wait_target(struct htc_target *target) struct htc_service_connect_resp resp; int status; + /* FIXME: remove once USB support is implemented */ + if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) { + ath6kl_err("HTC doesn't support USB yet. Patience!\n"); + return -EOPNOTSUPP; + } + /* we should be getting 1 control message that the target is ready */ packet = htc_wait_for_ctrl_msg(target); @@ -2582,8 +2677,8 @@ int ath6kl_htc_wait_target(struct htc_target *target) } ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", - (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", - target->htc_tgt_ver); + (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", + target->htc_tgt_ver); if (target->msg_per_bndl_max > 0) htc_setup_msg_bndl(target); @@ -2772,17 +2867,19 @@ void ath6kl_htc_cleanup(struct htc_target *target) { struct htc_packet *packet, *tmp_packet; - ath6kl_hif_cleanup_scatter(target->dev->ar); + /* FIXME: remove check once USB support is implemented */ + if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB) + ath6kl_hif_cleanup_scatter(target->dev->ar); list_for_each_entry_safe(packet, tmp_packet, - &target->free_ctrl_txbuf, list) { + &target->free_ctrl_txbuf, list) { list_del(&packet->list); kfree(packet->buf_start); kfree(packet); } list_for_each_entry_safe(packet, tmp_packet, - &target->free_ctrl_rxbuf, list) { + &target->free_ctrl_rxbuf, list) { list_del(&packet->list); kfree(packet->buf_start); kfree(packet); |