summaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2010-12-10 01:24:16 +0000
committerBen Hutchings <bhutchings@solarflare.com>2010-12-10 19:53:46 +0000
commitc04bfc6b223662c42a77727342c1df7d39e686a2 (patch)
tree96f8623e13366b677f8437ba678f617231942d58 /drivers/net/sfc/efx.c
parent6ecfd0c70c05531b2850649d0cec46833cd6c381 (diff)
sfc: Remove ancient support for nesting of TX stop
Long before this driver went into mainline, it had support for multiple TX queues per port, with lockless TX enabled. Since Linux did not know anything of this, filling up any hardware TX queue would stop the core TX queue and multiple hardware TX queues could fill up before the scheduler reacted. Thus it was necessary to keep a count of how many TX queues were stopped and to wake the core TX queue only when all had free space again. The driver also previously (ab)used the per-hardware-queue stopped flag as a counter to deal with various things that can inhibit TX, but it no longer does that. Remove the per-channel tx_stop_count, tx_stop_lock and per-hardware-queue stopped count and just use the networking core queue state directly. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 2166c1d0a53..711449c6e67 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -461,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
}
}
- spin_lock_init(&channel->tx_stop_lock);
- atomic_set(&channel->tx_stop_count, 1);
-
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
@@ -1406,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- efx_for_each_channel(channel, efx) {
- if (efx_dev_registered(efx))
- efx_wake_queue(channel);
+ if (efx_dev_registered(efx))
+ netif_tx_wake_all_queues(efx->net_dev);
+
+ efx_for_each_channel(channel, efx)
efx_start_channel(channel);
- }
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
@@ -1498,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */
if (efx_dev_registered(efx)) {
- struct efx_channel *channel;
- efx_for_each_channel(channel, efx)
- efx_stop_queue(channel);
+ netif_tx_stop_all_queues(efx->net_dev);
netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev);
}
@@ -1896,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
+ struct efx_channel *channel;
int rc;
net_dev->watchdog_timeo = 5 * HZ;
@@ -1918,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
if (rc)
goto fail_locked;
+ efx_for_each_channel(channel, efx) {
+ struct efx_tx_queue *tx_queue;
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->core_txq = netdev_get_tx_queue(
+ efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
+ }
+ }
+
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(efx->net_dev);