diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 11:47:58 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-04 11:47:58 -0700 |
commit | 6ba74014c1ab0e37af7de6f64b4eccbbae3cb9e7 (patch) | |
tree | 8f3892fc44f1e403675a6d7e88fda5c70e56ee4c /drivers/net/wireless/ath/ath5k/base.c | |
parent | 5abd9ccced7a726c817dd6b5b96bc933859138d1 (diff) | |
parent | 3ff1c25927e3af61c6bf0e4ed959504058ae4565 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1443 commits)
phy/marvell: add 88ec048 support
igb: Program MDICNFG register prior to PHY init
e1000e: correct MAC-PHY interconnect register offset for 82579
hso: Add new product ID
can: Add driver for esd CAN-USB/2 device
l2tp: fix export of header file for userspace
can-raw: Fix skb_orphan_try handling
Revert "net: remove zap_completion_queue"
net: cleanup inclusion
phy/marvell: add 88e1121 interface mode support
u32: negative offset fix
net: Fix a typo from "dev" to "ndev"
igb: Use irq_synchronize per vector when using MSI-X
ixgbevf: fix null pointer dereference due to filter being set for VLAN 0
e1000e: Fix irq_synchronize in MSI-X case
e1000e: register pm_qos request on hardware activation
ip_fragment: fix subtracting PPPOE_SES_HLEN from mtu twice
net: Add getsockopt support for TCP thin-streams
cxgb4: update driver version
cxgb4: add new PCI IDs
...
Manually fix up conflicts in:
- drivers/net/e1000e/netdev.c: due to pm_qos registration
infrastructure changes
- drivers/net/phy/marvell.c: conflict between adding 88ec048 support
and cleaning up the IDs
- drivers/net/wireless/ipw2x00/ipw2100.c: trivial ipw2100_pm_qos_req
conflict (registration change vs marking it static)
Diffstat (limited to 'drivers/net/wireless/ath/ath5k/base.c')
-rw-r--r-- | drivers/net/wireless/ath/ath5k/base.c | 438 |
1 files changed, 245 insertions, 193 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 648972df369..0d5de2574dd 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -311,7 +311,8 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc, static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, struct ath5k_txq *txq, int padsize); -static inline void ath5k_txbuf_free(struct ath5k_softc *sc, + +static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf) { BUG_ON(!bf); @@ -321,9 +322,11 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc, PCI_DMA_TODEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; + bf->skbaddr = 0; + bf->desc->ds_data = 0; } -static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, +static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf) { struct ath5k_hw *ah = sc->ah; @@ -336,6 +339,8 @@ static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; + bf->skbaddr = 0; + bf->desc->ds_data = 0; } @@ -352,7 +357,6 @@ static void ath5k_txq_release(struct ath5k_softc *sc); static int ath5k_rx_start(struct ath5k_softc *sc); static void ath5k_rx_stop(struct ath5k_softc *sc); static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, - struct ath5k_desc *ds, struct sk_buff *skb, struct ath5k_rx_status *rs); static void ath5k_tasklet_rx(unsigned long data); @@ -384,7 +388,7 @@ static int ath5k_init(struct ath5k_softc *sc); static int ath5k_stop_locked(struct ath5k_softc *sc); static int ath5k_stop_hw(struct ath5k_softc *sc); static irqreturn_t ath5k_intr(int irq, void *dev_id); -static void ath5k_tasklet_reset(unsigned long data); +static void ath5k_reset_work(struct work_struct *work); static void ath5k_tasklet_calibrate(unsigned long data); @@ -578,7 +582,7 @@ ath5k_pci_probe(struct pci_dev *pdev, spin_lock_init(&sc->block); /* Set private data */ - pci_set_drvdata(pdev, hw); + pci_set_drvdata(pdev, sc); /* Setup interrupt handler */ ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); @@ -694,25 +698,23 @@ err: static void __devexit ath5k_pci_remove(struct pci_dev *pdev) { - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(pdev); ath5k_debug_finish_device(sc); - ath5k_detach(pdev, hw); + ath5k_detach(pdev, sc->hw); ath5k_hw_detach(sc->ah); kfree(sc->ah); free_irq(pdev->irq, sc); pci_iounmap(pdev, sc->iobase); pci_release_region(pdev, 0); pci_disable_device(pdev); - ieee80211_free_hw(hw); + ieee80211_free_hw(sc->hw); } #ifdef CONFIG_PM_SLEEP static int ath5k_pci_suspend(struct device *dev) { - struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev)); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev)); ath5k_led_off(sc); return 0; @@ -721,8 +723,7 @@ static int ath5k_pci_suspend(struct device *dev) static int ath5k_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); - struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath5k_softc *sc = hw->priv; + struct ath5k_softc *sc = pci_get_drvdata(pdev); /* * Suspend/Resume resets the PCI configuration space, so we have to @@ -768,7 +769,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ - ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); + if (ret < 0) goto err; if (ret > 0) @@ -829,11 +831,12 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); - tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc); + INIT_WORK(&sc->reset_work, ath5k_reset_work); + ret = ath5k_eeprom_read_mac(ah, mac); if (ret) { ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", @@ -864,6 +867,8 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) ath5k_init_leds(sc); + ath5k_sysfs_register(sc); + return 0; err_queues: ath5k_txq_release(sc); @@ -899,6 +904,7 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); ath5k_unregister_leds(sc); + ath5k_sysfs_unregister(sc); /* * NB: can't reclaim these until after ieee80211_ifdetach * returns because we'll get called back to reclaim node @@ -1111,8 +1117,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw) static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) { - ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", - sc->curchan->center_freq, chan->center_freq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "channel set, resetting (%u -> %u MHz)\n", + sc->curchan->center_freq, chan->center_freq); /* * To switch channels clear any pending DMA operations; @@ -1228,21 +1235,23 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * - * To insure the last descriptor is self-linked we create + * To ensure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked - * entry is ``fixed'' naturally. This should be safe even + * entry is "fixed" naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the - * descriptor list. This insures the hardware always has + * descriptor list. This ensures the hardware always has * someplace to write a new frame. */ ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; - ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); - if (ret) + ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); + if (ret) { + ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__); return ret; + } if (sc->rxlink != NULL) *sc->rxlink = bf->daddr; @@ -1347,7 +1356,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, mrr_tries[i] = info->control.rates[i + 1].count; } - ah->ah_setup_mrr_tx_desc(ah, ds, + ath5k_hw_setup_mrr_tx_desc(ah, ds, mrr_rate[0], mrr_tries[0], mrr_rate[1], mrr_tries[1], mrr_rate[2], mrr_tries[2]); @@ -1443,17 +1452,20 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) { struct ath5k_buf *bf; - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); list_for_each_entry(bf, &sc->txbuf, list) - ath5k_txbuf_free(sc, bf); + ath5k_txbuf_free_skb(sc, bf); list_for_each_entry(bf, &sc->rxbuf, list) - ath5k_rxbuf_free(sc, bf); + ath5k_rxbuf_free_skb(sc, bf); /* Free memory associated with all descriptors */ pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); + sc->desc = NULL; + sc->desc_daddr = 0; kfree(sc->bufptr); sc->bufptr = NULL; + sc->bbuf = NULL; } @@ -1602,7 +1614,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) list_for_each_entry_safe(bf, bf0, &txq->q, list) { ath5k_debug_printtxbuf(sc, bf); - ath5k_txbuf_free(sc, bf); + ath5k_txbuf_free_skb(sc, bf); spin_lock_bh(&sc->txbuflock); list_move_tail(&bf->list, &sc->txbuf); @@ -1716,13 +1728,11 @@ ath5k_rx_stop(struct ath5k_softc *sc) ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ ath5k_debug_printrxbuffs(sc, ah); - - sc->rxlink = NULL; /* just in case */ } static unsigned int -ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, - struct sk_buff *skb, struct ath5k_rx_status *rs) +ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb, + struct ath5k_rx_status *rs) { struct ath5k_hw *ah = sc->ah; struct ath_common *common = ath5k_hw_common(ah); @@ -1889,9 +1899,138 @@ static int ath5k_remove_padding(struct sk_buff *skb) } static void -ath5k_tasklet_rx(unsigned long data) +ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, + struct ath5k_rx_status *rs) { struct ieee80211_rx_status *rxs; + + /* The MAC header is padded to have 32-bit boundary if the + * packet payload is non-zero. The general calculation for + * padsize would take into account odd header lengths: + * padsize = (4 - hdrlen % 4) % 4; However, since only + * even-length headers are used, padding can only be 0 or 2 + * bytes and we can optimize this a bit. In addition, we must + * not try to remove padding from short control frames that do + * not have payload. */ + ath5k_remove_padding(skb); + + rxs = IEEE80211_SKB_RXCB(skb); + + rxs->flag = 0; + if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) + rxs->flag |= RX_FLAG_MMIC_ERROR; + + /* + * always extend the mac timestamp, since this information is + * also needed for proper IBSS merging. + * + * XXX: it might be too late to do it here, since rs_tstamp is + * 15bit only. that means TSF extension has to be done within + * 32768usec (about 32ms). it might be necessary to move this to + * the interrupt handler, like it is done in madwifi. + * + * Unfortunately we don't know when the hardware takes the rx + * timestamp (beginning of phy frame, data frame, end of rx?). + * The only thing we know is that it is hardware specific... + * On AR5213 it seems the rx timestamp is at the end of the + * frame, but i'm not sure. + * + * NOTE: mac80211 defines mactime at the beginning of the first + * data symbol. Since we don't have any time references it's + * impossible to comply to that. This affects IBSS merge only + * right now, so it's not too bad... + */ + rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); + rxs->flag |= RX_FLAG_TSFT; + + rxs->freq = sc->curchan->center_freq; + rxs->band = sc->curband->band; + + rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; + + rxs->antenna = rs->rs_antenna; + + if (rs->rs_antenna > 0 && rs->rs_antenna < 5) + sc->stats.antenna_rx[rs->rs_antenna]++; + else + sc->stats.antenna_rx[0]++; /* invalid */ + + rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate); + rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); + + if (rxs->rate_idx >= 0 && rs->rs_rate == + sc->curband->bitrates[rxs->rate_idx].hw_value_short) + rxs->flag |= RX_FLAG_SHORTPRE; + + ath5k_debug_dump_skb(sc, skb, "RX ", 0); + + ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); + + /* check beacons in IBSS mode */ + if (sc->opmode == NL80211_IFTYPE_ADHOC) + ath5k_check_ibss_tsf(sc, skb, rxs); + + ieee80211_rx(sc->hw, skb); +} + +/** ath5k_frame_receive_ok() - Do we want to receive this frame or not? + * + * Check if we want to further process this frame or not. Also update + * statistics. Return true if we want this frame, false if not. + */ +static bool +ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs) +{ + sc->stats.rx_all_count++; + + if (unlikely(rs->rs_status)) { + if (rs->rs_status & AR5K_RXERR_CRC) + sc->stats.rxerr_crc++; + if (rs->rs_status & AR5K_RXERR_FIFO) + sc->stats.rxerr_fifo++; + if (rs->rs_status & AR5K_RXERR_PHY) { + sc->stats.rxerr_phy++; + if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) + sc->stats.rxerr_phy_code[rs->rs_phyerr]++; + return false; + } + if (rs->rs_status & AR5K_RXERR_DECRYPT) { + /* + * Decrypt error. If the error occurred + * because there was no hardware key, then + * let the frame through so the upper layers + * can process it. This is necessary for 5210 + * parts which have no way to setup a ``clear'' + * key cache entry. + * + * XXX do key cache faulting + */ + sc->stats.rxerr_decrypt++; + if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && + !(rs->rs_status & AR5K_RXERR_CRC)) + return true; + } + if (rs->rs_status & AR5K_RXERR_MIC) { + sc->stats.rxerr_mic++; + return true; + } + + /* let crypto-error packets fall through in MNTR */ + if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || + sc->opmode != NL80211_IFTYPE_MONITOR) + return false; + } + + if (unlikely(rs->rs_more)) { + sc->stats.rxerr_jumbo++; + return false; + } + return true; +} + +static void +ath5k_tasklet_rx(unsigned long data) +{ struct ath5k_rx_status rs = {}; struct sk_buff *skb, *next_skb; dma_addr_t next_skb_addr; @@ -1901,7 +2040,6 @@ ath5k_tasklet_rx(unsigned long data) struct ath5k_buf *bf; struct ath5k_desc *ds; int ret; - int rx_flag; spin_lock(&sc->rxbuflock); if (list_empty(&sc->rxbuf)) { @@ -1909,8 +2047,6 @@ ath5k_tasklet_rx(unsigned long data) goto unlock; } do { - rx_flag = 0; - bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); BUG_ON(bf->skb == NULL); skb = bf->skb; @@ -1926,137 +2062,30 @@ ath5k_tasklet_rx(unsigned long data) else if (unlikely(ret)) { ATH5K_ERR(sc, "error in processing rx descriptor\n"); sc->stats.rxerr_proc++; - spin_unlock(&sc->rxbuflock); - return; + break; } - sc->stats.rx_all_count++; - - if (unlikely(rs.rs_status)) { - if (rs.rs_status & AR5K_RXERR_CRC) - sc->stats.rxerr_crc++; - if (rs.rs_status & AR5K_RXERR_FIFO) - sc->stats.rxerr_fifo++; - if (rs.rs_status & AR5K_RXERR_PHY) { - sc->stats.rxerr_phy++; - if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32) - sc->stats.rxerr_phy_code[rs.rs_phyerr]++; - goto next; - } - if (rs.rs_status & AR5K_RXERR_DECRYPT) { - /* - * Decrypt error. If the error occurred - * because there was no hardware key, then - * let the frame through so the upper layers - * can process it. This is necessary for 5210 - * parts which have no way to setup a ``clear'' - * key cache entry. - * - * XXX do key cache faulting - */ - sc->stats.rxerr_decrypt++; - if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && - !(rs.rs_status & AR5K_RXERR_CRC)) - goto accept; - } - if (rs.rs_status & AR5K_RXERR_MIC) { - rx_flag |= RX_FLAG_MMIC_ERROR; - sc->stats.rxerr_mic++; - goto accept; - } + if (ath5k_receive_frame_ok(sc, &rs)) { + next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); - /* let crypto-error packets fall through in MNTR */ - if ((rs.rs_status & - ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || - sc->opmode != NL80211_IFTYPE_MONITOR) + /* + * If we can't replace bf->skb with a new skb under + * memory pressure, just skip this packet + */ + if (!next_skb) goto next; - } - if (unlikely(rs.rs_more)) { - sc->stats.rxerr_jumbo++; - goto next; + pci_unmap_single(sc->pdev, bf->skbaddr, + common->rx_bufsize, + PCI_DMA_FROMDEVICE); - } -accept: - next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); + skb_put(skb, rs.rs_datalen); - /* - * If we can't replace bf->skb with a new skb under memory - * pressure, just skip this packet - */ - if (!next_skb) - goto next; - - pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, - PCI_DMA_FROMDEVICE); - skb_put(skb, rs.rs_datalen); - - /* The MAC header is padded to have 32-bit boundary if the - * packet payload is non-zero. The general calculation for - * padsize would take into account odd header lengths: - * padsize = (4 - hdrlen % 4) % 4; However, since only - * even-length headers are used, padding can only be 0 or 2 - * bytes and we can optimize this a bit. In addition, we must - * not try to remove padding from short control frames that do - * not have payload. */ - ath5k_remove_padding(skb); - - rxs = IEEE80211_SKB_RXCB(skb); - - /* - * always extend the mac timestamp, since this information is - * also needed for proper IBSS merging. - * - * XXX: it might be too late to do it here, since rs_tstamp is - * 15bit only. that means TSF extension has to be done within - * 32768usec (about 32ms). it might be necessary to move this to - * the interrupt handler, like it is done in madwifi. - * - * Unfortunately we don't know when the hardware takes the rx - * timestamp (beginning of phy frame, data frame, end of rx?). - * The only thing we know is that it is hardware specific... - * On AR5213 it seems the rx timestamp is at the end of the - * frame, but i'm not sure. - * - * NOTE: mac80211 defines mactime at the beginning of the first - * data symbol. Since we don't have any time references it's - * impossible to comply to that. This affects IBSS merge only - * right now, so it's not too bad... - */ - rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp); - rxs->flag = rx_flag | RX_FLAG_TSFT; - - rxs->freq = sc->curchan->center_freq; - rxs->band = sc->curband->band; - - rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi; - - rxs->antenna = rs.rs_antenna; - - if (rs.rs_antenna > 0 && rs.rs_antenna < 5) - sc->stats.antenna_rx[rs.rs_antenna]++; - else - sc->stats.antenna_rx[0]++; /* invalid */ - - rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); - rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); + ath5k_receive_frame(sc, skb, &rs); - if (rxs->rate_idx >= 0 && rs.rs_rate == - sc->curband->bitrates[rxs->rate_idx].hw_value_short) - rxs->flag |= RX_FLAG_SHORTPRE; - - ath5k_debug_dump_skb(sc, skb, "RX ", 0); - - ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi); - - /* check beacons in IBSS mode */ - if (sc->opmode == NL80211_IFTYPE_ADHOC) - ath5k_check_ibss_tsf(sc, skb, rxs); - - ieee80211_rx(sc->hw, skb); - - bf->skb = next_skb; - bf->skbaddr = next_skb_addr; + bf->skb = next_skb; + bf->skbaddr = next_skb_addr; + } next: list_move_tail(&bf->list, &sc->rxbuf); } while (ath5k_rxbuf_setup(sc, bf) == 0); @@ -2065,8 +2094,6 @@ unlock: } - - /*************\ * TX Handling * \*************/ @@ -2266,8 +2293,8 @@ err_unmap: * frame contents are done as needed and the slot time is * also adjusted based on current state. * - * This is called from software irq context (beacontq or restq - * tasklets) or user context from ath5k_beacon_config. + * This is called from software irq context (beacontq tasklets) + * or user context from ath5k_beacon_config. */ static void ath5k_beacon_send(struct ath5k_softc *sc) @@ -2298,7 +2325,9 @@ ath5k_beacon_send(struct ath5k_softc *sc) ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "stuck beacon time (%u missed)\n", sc->bmisscount); - tasklet_schedule(&sc->restq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "stuck beacon, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); } return; } @@ -2602,12 +2631,20 @@ ath5k_stop_locked(struct ath5k_softc *sc) if (!test_bit(ATH_STAT_INVALID, sc->status)) { ath5k_rx_stop(sc); ath5k_hw_phy_disable(ah); - } else - sc->rxlink = NULL; + } return 0; } +static void stop_tasklets(struct ath5k_softc *sc) +{ + tasklet_kill(&sc->rxtq); + tasklet_kill(&sc->txtq); + tasklet_kill(&sc->calib); + tasklet_kill(&sc->beacontq); + tasklet_kill(&sc->ani_tasklet); +} + /* * Stop the device, grabbing the top-level lock to protect * against concurrent entry through ath5k_init (which can happen @@ -2647,17 +2684,12 @@ ath5k_stop_hw(struct ath5k_softc *sc) ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "putting device to sleep\n"); } - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); mmiowb(); mutex_unlock(&sc->lock); - tasklet_kill(&sc->rxtq); - tasklet_kill(&sc->txtq); - tasklet_kill(&sc->restq); - tasklet_kill(&sc->calib); - tasklet_kill(&sc->beacontq); - tasklet_kill(&sc->ani_tasklet); + stop_tasklets(sc); ath5k_rfkill_hw_stop(sc->ah); @@ -2705,7 +2737,9 @@ ath5k_intr(int irq, void *dev_id) * Fatal errors are unrecoverable. * Typically these are caused by DMA errors. */ - tasklet_schedule(&sc->restq); + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "fatal int, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); } else if (unlikely(status & AR5K_INT_RXORN)) { /* * Receive buffers are full. Either the bus is busy or @@ -2717,8 +2751,11 @@ ath5k_intr(int irq, void *dev_id) * this guess is copied from the HAL. */ sc->stats.rxorn_intr++; - if (ah->ah_mac_srev < AR5K_SREV_AR5212) - tasklet_schedule(&sc->restq); + if (ah->ah_mac_srev < AR5K_SREV_AR5212) { + ATH5K_DBG(sc, ATH5K_DEBUG_RESET, + "rx overrun, resetting\n"); + ieee80211_queue_work(sc->hw, &sc->reset_work); + } else tasklet_schedule(&sc->rxtq); } else { @@ -2731,7 +2768,7 @@ ath5k_intr(int irq, void *dev_id) * RXE bit is written, but it doesn't work at * least on older hardware revs. */ - sc->rxlink = NULL; + sc->stats.rxeol_intr++; } if (status & AR5K_INT_TXURN) { /* bump tx trigger level */ @@ -2764,14 +2801,6 @@ ath5k_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static void -ath5k_tasklet_reset(unsigned long data) -{ - struct ath5k_softc *sc = (void *)data; - - ath5k_reset(sc, sc->curchan); -} - /* * Periodically recalibrate the PHY to account * for temperature/environment changes. @@ -2785,10 +2814,6 @@ ath5k_tasklet_calibrate(unsigned long data) /* Only full calibration for now */ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; - /* Stop queues so that calibration - * doesn't interfere with tx */ - ieee80211_stop_queues(sc->hw); - ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(sc->curchan->center_freq), sc->curchan->hw_value); @@ -2799,15 +2824,23 @@ ath5k_tasklet_calibrate(unsigned long data) * to load new gain values. */ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); - ath5k_reset(sc, sc->curchan); + ieee80211_queue_work(sc->hw, &sc->reset_work); } if (ath5k_hw_phy_calibrate(ah, sc->curchan)) ATH5K_ERR(sc, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( sc->curchan->center_freq)); - /* Wake queues */ - ieee80211_wake_queues(sc->hw); + /* Noise floor calibration interrupts rx/tx path while I/Q calibration + * doesn't. We stop the queues so that calibration doesn't interfere + * with TX and don't run it as often */ + if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { + ah->ah_cal_next_nf = jiffies + + msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); + ieee80211_stop_queues(sc->hw); + ath5k_hw_update_noise_floor(ah); + ieee80211_wake_queues(sc->hw); + } ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; } @@ -2895,6 +2928,8 @@ drop_packet: /* * Reset the hardware. If chan is not NULL, then also pause rx/tx * and change to the given channel. + * + * This should be called with sc->lock. */ static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) @@ -2904,8 +2939,11 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); + ath5k_hw_set_imr(ah, 0); + synchronize_irq(sc->pdev->irq); + stop_tasklets(sc); + if (chan) { - ath5k_hw_set_imr(ah, 0); ath5k_txq_cleanup(sc); ath5k_rx_stop(sc); @@ -2926,6 +2964,10 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode); + ah->ah_cal_next_full = jiffies; + ah->ah_cal_next_ani = jiffies; + ah->ah_cal_next_nf = jiffies; + /* * Change channels and update the h/w rate map if we're switching; * e.g. 11a to 11b/g. @@ -2947,6 +2989,16 @@ err: return ret; } +static void ath5k_reset_work(struct work_struct *work) +{ + struct ath5k_softc *sc = container_of(work, struct ath5k_softc, + reset_work); + + mutex_lock(&sc->lock); + ath5k_reset(sc, sc->curchan); + mutex_unlock(&sc->lock); +} + static int ath5k_start(struct ieee80211_hw *hw) { return ath5k_init(hw->priv); @@ -3360,7 +3412,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) ath5k_debug_dump_skb(sc, skb, "BC ", 1); - ath5k_txbuf_free(sc, sc->bbuf); + ath5k_txbuf_free_skb(sc, sc->bbuf); sc->bbuf->skb = skb; ret = ath5k_beacon_setup(sc, sc->bbuf); if (ret) |