diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
commit | 6be35c700f742e911ecedd07fcc43d4439922334 (patch) | |
tree | ca9f37214d204465fcc2d79c82efd291e357c53c /drivers/net/ethernet/realtek/8139cp.c | |
parent | e37aa63e87bd581f9be5555ed0ba83f5295c92fc (diff) | |
parent | 520dfe3a3645257bf83660f672c47f8558f3d4c4 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller:
1) Allow to dump, monitor, and change the bridge multicast database
using netlink. From Cong Wang.
2) RFC 5961 TCP blind data injection attack mitigation, from Eric
Dumazet.
3) Networking user namespace support from Eric W. Biederman.
4) tuntap/virtio-net multiqueue support by Jason Wang.
5) Support for checksum offload of encapsulated packets (basically,
tunneled traffic can still be checksummed by HW). From Joseph
Gasparakis.
6) Allow BPF filter access to VLAN tags, from Eric Dumazet and
Daniel Borkmann.
7) Bridge port parameters over netlink and BPDU blocking support
from Stephen Hemminger.
8) Improve data access patterns during inet socket demux by rearranging
socket layout, from Eric Dumazet.
9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and
Jon Maloy.
10) Update TCP socket hash sizing to be more in line with current day
realities. The existing heurstics were choosen a decade ago.
From Eric Dumazet.
11) Fix races, queue bloat, and excessive wakeups in ATM and
associated drivers, from Krzysztof Mazur and David Woodhouse.
12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions
in VXLAN driver, from David Stevens.
13) Add "oops_only" mode to netconsole, from Amerigo Wang.
14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also
allow DCB netlink to work on namespaces other than the initial
namespace. From John Fastabend.
15) Support PTP in the Tigon3 driver, from Matt Carlson.
16) tun/vhost zero copy fixes and improvements, plus turn it on
by default, from Michael S. Tsirkin.
17) Support per-association statistics in SCTP, from Michele
Baldessari.
And many, many, driver updates, cleanups, and improvements. Too
numerous to mention individually.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits)
net/mlx4_en: Add support for destination MAC in steering rules
net/mlx4_en: Use generic etherdevice.h functions.
net: ethtool: Add destination MAC address to flow steering API
bridge: add support of adding and deleting mdb entries
bridge: notify mdb changes via netlink
ndisc: Unexport ndisc_{build,send}_skb().
uapi: add missing netconf.h to export list
pkt_sched: avoid requeues if possible
solos-pci: fix double-free of TX skb in DMA mode
bnx2: Fix accidental reversions.
bna: Driver Version Updated to 3.1.2.1
bna: Firmware update
bna: Add RX State
bna: Rx Page Based Allocation
bna: TX Intr Coalescing Fix
bna: Tx and Rx Optimizations
bna: Code Cleanup and Enhancements
ath9k: check pdata variable before dereferencing it
ath5k: RX timestamp is reported at end of frame
ath9k_htc: RX timestamp is reported at end of frame
...
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r-- | drivers/net/ethernet/realtek/8139cp.c | 75 |
1 files changed, 44 insertions, 31 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 609125a249d..cb6fc5a743c 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp) { unsigned tx_head = cp->tx_head; unsigned tx_tail = cp->tx_tail; + unsigned bytes_compl = 0, pkts_compl = 0; while (tx_tail != tx_head) { struct cp_desc *txd = cp->tx_ring + tx_tail; @@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp) le32_to_cpu(txd->opts1) & 0xffff, PCI_DMA_TODEVICE); + bytes_compl += skb->len; + pkts_compl++; + if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { netif_dbg(cp, tx_err, cp->dev, @@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp) cp->tx_tail = tx_tail; + netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(cp->dev); } @@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, wmb(); } cp->tx_head = entry; + + netdev_sent_queue(dev, skb->len); netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", entry, skb->len); if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) @@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp) cp->rx_tail = 0; cp->tx_head = cp->tx_tail = 0; + + netdev_reset_queue(cp->dev); } static void cp_reset_hw (struct cp_private *cp) @@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_private *cp) static inline void cp_start_hw (struct cp_private *cp) { + dma_addr_t ring_dma; + cpw16(CpCmd, cp->cpcmd); + + /* + * These (at least TxRingAddr) need to be configured after the + * corresponding bits in CpCmd are enabled. Datasheet v1.6 ยง6.33 + * (C+ Command Register) recommends that these and more be configured + * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware + * it's been observed that the TxRingAddr is actually reset to garbage + * when C+ mode Tx is enabled in CpCmd. + */ + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + + /* + * Strictly speaking, the datasheet says this should be enabled + * *before* setting the descriptor addresses. But what, then, would + * prevent it from doing DMA to random unconfigured addresses? + * This variant appears to work fine. + */ cpw8(Cmd, RxOn | TxOn); + + netdev_reset_queue(cp->dev); } static void cp_enable_irq(struct cp_private *cp) @@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_private *cp) static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; - dma_addr_t ring_dma; cp_reset_hw(cp); @@ -992,17 +1030,6 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); @@ -1197,18 +1224,16 @@ static void cp_tx_timeout(struct net_device *dev) cp_clean_rings(cp); rc = cp_init_rings(cp); cp_start_hw(cp); + cp_enable_irq(cp); netif_wake_queue(dev); spin_unlock_irqrestore(&cp->lock, flags); } -#ifdef BROKEN static int cp_change_mtu(struct net_device *dev, int new_mtu) { struct cp_private *cp = netdev_priv(dev); - int rc; - unsigned long flags; /* check for invalid MTU, according to hardware limits */ if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) @@ -1221,22 +1246,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu) return 0; } - spin_lock_irqsave(&cp->lock, flags); - - cp_stop_hw(cp); /* stop h/w and free rings */ - cp_clean_rings(cp); - + /* network IS up, close it, reset MTU, and come up again. */ + cp_close(dev); dev->mtu = new_mtu; - cp_set_rxbufsize(cp); /* set new rx buf size */ - - rc = cp_init_rings(cp); /* realloc and restart h/w */ - cp_start_hw(cp); - - spin_unlock_irqrestore(&cp->lock, flags); - - return rc; + cp_set_rxbufsize(cp); + return cp_open(dev); } -#endif /* BROKEN */ static const char mii_2_8139_map[8] = { BasicModeCtrl, @@ -1812,9 +1827,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, -#ifdef BROKEN .ndo_change_mtu = cp_change_mtu, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cp_poll_controller, |