summaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c304
-rw-r--r--drivers/net/sfc/efx.h9
-rw-r--r--drivers/net/sfc/ethtool.c167
-rw-r--r--drivers/net/sfc/falcon.c200
-rw-r--r--drivers/net/sfc/falcon_boards.c35
-rw-r--r--drivers/net/sfc/falcon_xmac.c5
-rw-r--r--drivers/net/sfc/io.h37
-rw-r--r--drivers/net/sfc/mcdi.c98
-rw-r--r--drivers/net/sfc/mcdi_mac.c8
-rw-r--r--drivers/net/sfc/mcdi_phy.c41
-rw-r--r--drivers/net/sfc/mdio_10g.c39
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/mtd.c23
-rw-r--r--drivers/net/sfc/net_driver.h88
-rw-r--r--drivers/net/sfc/nic.c553
-rw-r--r--drivers/net/sfc/nic.h9
-rw-r--r--drivers/net/sfc/qt202x_phy.c42
-rw-r--r--drivers/net/sfc/rx.c469
-rw-r--r--drivers/net/sfc/selftest.c154
-rw-r--r--drivers/net/sfc/siena.c68
-rw-r--r--drivers/net/sfc/tenxpress.c12
-rw-r--r--drivers/net/sfc/tx.c41
-rw-r--r--drivers/net/sfc/workarounds.h2
23 files changed, 1527 insertions, 880 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 15646052723..ba674c5ca29 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -27,6 +27,7 @@
#include "nic.h"
#include "mcdi.h"
+#include "workarounds.h"
/**************************************************************************
*
@@ -92,13 +93,6 @@ const char *efx_reset_type_names[] = {
#define EFX_MAX_MTU (9 * 1024)
-/* RX slow fill workqueue. If memory allocation fails in the fast path,
- * a work item is pushed onto this work queue to retry the allocation later,
- * to avoid the NIC being starved of RX buffers. Since this is a per cpu
- * workqueue, there is nothing to be gained in making it per NIC
- */
-static struct workqueue_struct *refill_workqueue;
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -195,6 +189,13 @@ module_param(irq_adapt_high_thresh, uint, 0644);
MODULE_PARM_DESC(irq_adapt_high_thresh,
"Threshold score for increasing IRQ moderation");
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
/**************************************************************************
*
* Utility functions and prototypes
@@ -278,16 +279,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
{
struct efx_channel *channel =
container_of(napi, struct efx_channel, napi_str);
+ struct efx_nic *efx = channel->efx;
int spent;
- EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
- channel->channel, raw_smp_processor_id());
+ netif_vdbg(efx, intr, efx->net_dev,
+ "channel %d NAPI poll executing on CPU %d\n",
+ channel->channel, raw_smp_processor_id());
spent = efx_process_channel(channel, budget);
if (spent < budget) {
- struct efx_nic *efx = channel->efx;
-
if (channel->channel < efx->n_rx_channels &&
efx->irq_rx_adaptive &&
unlikely(++channel->irq_count == 1000)) {
@@ -363,7 +364,8 @@ void efx_process_channel_now(struct efx_channel *channel)
*/
static int efx_probe_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "chan %d create event queue\n", channel->channel);
return efx_nic_probe_eventq(channel);
}
@@ -371,7 +373,8 @@ static int efx_probe_eventq(struct efx_channel *channel)
/* Prepare channel's event queue */
static void efx_init_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
channel->eventq_read_ptr = 0;
@@ -380,14 +383,16 @@ static void efx_init_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
}
static void efx_remove_eventq(struct efx_channel *channel)
{
- EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "chan %d remove event queue\n", channel->channel);
efx_nic_remove_eventq(channel);
}
@@ -404,7 +409,8 @@ static int efx_probe_channel(struct efx_channel *channel)
struct efx_rx_queue *rx_queue;
int rc;
- EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
+ netif_dbg(channel->efx, probe, channel->efx->net_dev,
+ "creating channel %d\n", channel->channel);
rc = efx_probe_eventq(channel);
if (rc)
@@ -474,12 +480,15 @@ static void efx_init_channels(struct efx_nic *efx)
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_hash_size +
efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len);
+ efx->rx_buffer_order = get_order(efx->rx_buffer_len +
+ sizeof(struct efx_rx_page_state));
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "init chan %d\n", channel->channel);
efx_init_eventq(channel);
@@ -506,7 +515,8 @@ static void efx_start_channel(struct efx_channel *channel)
{
struct efx_rx_queue *rx_queue;
- EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
+ netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+ "starting chan %d\n", channel->channel);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
@@ -515,11 +525,11 @@ static void efx_start_channel(struct efx_channel *channel)
channel->enabled = true;
smp_wmb();
- napi_enable(&channel->napi_str);
-
- /* Load up RX descriptors */
+ /* Fill the queues before enabling NAPI */
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fast_push_rx_descriptors(rx_queue);
+
+ napi_enable(&channel->napi_str);
}
/* This disables event queue processing and packet transmission.
@@ -528,21 +538,14 @@ static void efx_start_channel(struct efx_channel *channel)
*/
static void efx_stop_channel(struct efx_channel *channel)
{
- struct efx_rx_queue *rx_queue;
-
if (!channel->enabled)
return;
- EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
+ netif_dbg(channel->efx, ifdown, channel->efx->net_dev,
+ "stop chan %d\n", channel->channel);
channel->enabled = false;
napi_disable(&channel->napi_str);
-
- /* Ensure that any worker threads have exited or will be no-ops */
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- spin_lock_bh(&rx_queue->add_lock);
- spin_unlock_bh(&rx_queue->add_lock);
- }
}
static void efx_fini_channels(struct efx_nic *efx)
@@ -556,13 +559,24 @@ static void efx_fini_channels(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
rc = efx_nic_flush_queues(efx);
- if (rc)
- EFX_ERR(efx, "failed to flush queues\n");
- else
- EFX_LOG(efx, "successfully flushed all queues\n");
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset. */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
efx_for_each_channel(channel, efx) {
- EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "shut down chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
@@ -577,7 +591,8 @@ static void efx_remove_channel(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
+ netif_dbg(channel->efx, drv, channel->efx->net_dev,
+ "destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
@@ -586,9 +601,9 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_remove_eventq(channel);
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
- queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+ mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
}
/**************************************************************************
@@ -628,12 +643,13 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up) {
- EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
- link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ netif_info(efx, link, efx->net_dev,
+ "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ link_state->speed, link_state->fd ? "full" : "half",
+ efx->net_dev->mtu,
+ (efx->promiscuous ? " [PROMISC]" : ""));
} else {
- EFX_INFO(efx, "link down\n");
+ netif_info(efx, link, efx->net_dev, "link down\n");
}
}
@@ -737,7 +753,7 @@ static int efx_probe_port(struct efx_nic *efx)
{
int rc;
- EFX_LOG(efx, "create port\n");
+ netif_dbg(efx, probe, efx->net_dev, "create port\n");
if (phy_flash_cfg)
efx->phy_mode = PHY_MODE_SPECIAL;
@@ -751,15 +767,16 @@ static int efx_probe_port(struct efx_nic *efx)
if (is_valid_ether_addr(efx->mac_address)) {
memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
} else {
- EFX_ERR(efx, "invalid MAC address %pM\n",
- efx->mac_address);
+ netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
+ efx->mac_address);
if (!allow_bad_hwaddr) {
rc = -EINVAL;
goto err;
}
random_ether_addr(efx->net_dev->dev_addr);
- EFX_INFO(efx, "using locally-generated MAC %pM\n",
- efx->net_dev->dev_addr);
+ netif_info(efx, probe, efx->net_dev,
+ "using locally-generated MAC %pM\n",
+ efx->net_dev->dev_addr);
}
return 0;
@@ -773,7 +790,7 @@ static int efx_init_port(struct efx_nic *efx)
{
int rc;
- EFX_LOG(efx, "init port\n");
+ netif_dbg(efx, drv, efx->net_dev, "init port\n");
mutex_lock(&efx->mac_lock);
@@ -804,7 +821,7 @@ fail1:
static void efx_start_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "start port\n");
+ netif_dbg(efx, ifup, efx->net_dev, "start port\n");
BUG_ON(efx->port_enabled);
mutex_lock(&efx->mac_lock);
@@ -821,7 +838,7 @@ static void efx_start_port(struct efx_nic *efx)
/* Prevent efx_mac_work() and efx_monitor() from working */
static void efx_stop_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "stop port\n");
+ netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
mutex_lock(&efx->mac_lock);
efx->port_enabled = false;
@@ -836,7 +853,7 @@ static void efx_stop_port(struct efx_nic *efx)
static void efx_fini_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "shut down port\n");
+ netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
if (!efx->port_initialized)
return;
@@ -850,7 +867,7 @@ static void efx_fini_port(struct efx_nic *efx)
static void efx_remove_port(struct efx_nic *efx)
{
- EFX_LOG(efx, "destroying port\n");
+ netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
efx->type->remove_port(efx);
}
@@ -868,11 +885,12 @@ static int efx_init_io(struct efx_nic *efx)
dma_addr_t dma_mask = efx->type->max_dma_mask;
int rc;
- EFX_LOG(efx, "initialising I/O\n");
+ netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
rc = pci_enable_device(pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to enable PCI device\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to enable PCI device\n");
goto fail1;
}
@@ -890,39 +908,45 @@ static int efx_init_io(struct efx_nic *efx)
dma_mask >>= 1;
}
if (rc) {
- EFX_ERR(efx, "could not find a suitable DMA mask\n");
+ netif_err(efx, probe, efx->net_dev,
+ "could not find a suitable DMA mask\n");
goto fail2;
}
- EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
+ netif_dbg(efx, probe, efx->net_dev,
+ "using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted,
* but just in case...
*/
- EFX_ERR(efx, "failed to set consistent DMA mask\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to set consistent DMA mask\n");
goto fail2;
}
efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
if (rc) {
- EFX_ERR(efx, "request for memory BAR failed\n");
+ netif_err(efx, probe, efx->net_dev,
+ "request for memory BAR failed\n");
rc = -EIO;
goto fail3;
}
efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size);
if (!efx->membase) {
- EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ netif_err(efx, probe, efx->net_dev,
+ "could not map memory BAR at %llx+%x\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size);
rc = -ENOMEM;
goto fail4;
}
- EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ netif_dbg(efx, probe, efx->net_dev,
+ "memory BAR at %llx+%x (virtual %p)\n",
+ (unsigned long long)efx->membase_phys,
+ efx->type->mem_map_size, efx->membase);
return 0;
@@ -938,7 +962,7 @@ static int efx_init_io(struct efx_nic *efx)
static void efx_fini_io(struct efx_nic *efx)
{
- EFX_LOG(efx, "shutting down I/O\n");
+ netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
if (efx->membase) {
iounmap(efx->membase);
@@ -1002,9 +1026,11 @@ static void efx_probe_interrupts(struct efx_nic *efx)
xentries[i].entry = i;
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
if (rc > 0) {
- EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
- " available (%d < %d).\n", rc, n_channels);
- EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Insufficient MSI-X vectors"
+ " available (%d < %d).\n", rc, n_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "WARNING: Performance may be reduced.\n");
EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
rc = pci_enable_msix(efx->pci_dev, xentries,
@@ -1028,7 +1054,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
} else {
/* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI;
- EFX_ERR(efx, "could not enable MSI-X\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
}
}
@@ -1041,7 +1068,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq;
} else {
- EFX_ERR(efx, "could not enable MSI\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI\n");
efx->interrupt_mode = EFX_INT_MODE_LEGACY;
}
}
@@ -1093,9 +1121,10 @@ static void efx_set_channels(struct efx_nic *efx)
static int efx_probe_nic(struct efx_nic *efx)
{
+ size_t i;
int rc;
- EFX_LOG(efx, "creating NIC\n");
+ netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
/* Carry out hardware-type specific initialisation */
rc = efx->type->probe(efx);
@@ -1106,6 +1135,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
efx_probe_interrupts(efx);
+ if (efx->n_channels > 1)
+ get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = i % efx->n_rx_channels;
+
efx_set_channels(efx);
efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
@@ -1117,7 +1151,7 @@ static int efx_probe_nic(struct efx_nic *efx)
static void efx_remove_nic(struct efx_nic *efx)
{
- EFX_LOG(efx, "destroying NIC\n");
+ netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
efx_remove_interrupts(efx);
efx->type->remove(efx);
@@ -1137,14 +1171,14 @@ static int efx_probe_all(struct efx_nic *efx)
/* Create NIC */
rc = efx_probe_nic(efx);
if (rc) {
- EFX_ERR(efx, "failed to create NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
goto fail1;
}
/* Create port */
rc = efx_probe_port(efx);
if (rc) {
- EFX_ERR(efx, "failed to create port\n");
+ netif_err(efx, probe, efx->net_dev, "failed to create port\n");
goto fail2;
}
@@ -1152,8 +1186,9 @@ static int efx_probe_all(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
rc = efx_probe_channel(channel);
if (rc) {
- EFX_ERR(efx, "failed to create channel %d\n",
- channel->channel);
+ netif_err(efx, probe, efx->net_dev,
+ "failed to create channel %d\n",
+ channel->channel);
goto fail3;
}
}
@@ -1233,15 +1268,8 @@ static void efx_start_all(struct efx_nic *efx)
* since we're holding the rtnl_lock at this point. */
static void efx_flush_all(struct efx_nic *efx)
{
- struct efx_rx_queue *rx_queue;
-
/* Make sure the hardware monitor is stopped */
cancel_delayed_work_sync(&efx->monitor_work);
-
- /* Ensure that all RX slow refills are complete. */
- efx_for_each_rx_queue(rx_queue, efx)
- cancel_delayed_work_sync(&rx_queue->work);
-
/* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->mac_work);
}
@@ -1356,8 +1384,9 @@ static void efx_monitor(struct work_struct *data)
struct efx_nic *efx = container_of(data, struct efx_nic,
monitor_work.work);
- EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
- raw_smp_processor_id());
+ netif_vdbg(efx, timer, efx->net_dev,
+ "hardware monitor executing on CPU %d\n",
+ raw_smp_processor_id());
BUG_ON(efx->type->monitor == NULL);
/* If the mac_lock is already held then it is likely a port
@@ -1464,8 +1493,8 @@ static int efx_net_open(struct net_device *net_dev)
struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx);
- EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
- raw_smp_processor_id());
+ netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+ raw_smp_processor_id());
if (efx->state == STATE_DISABLED)
return -EIO;
@@ -1490,8 +1519,8 @@ static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
- raw_smp_processor_id());
+ netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+ raw_smp_processor_id());
if (efx->state != STATE_DISABLED) {
/* Stop the device and flush all the channels */
@@ -1504,11 +1533,10 @@ static int efx_net_stop(struct net_device *net_dev)
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_mac_stats *mac_stats = &efx->mac_stats;
- struct net_device_stats *stats = &net_dev->stats;
spin_lock_bh(&efx->stats_lock);
efx->type->update_stats(efx);
@@ -1530,11 +1558,8 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
stats->tx_window_errors = mac_stats->tx_late_collision;
stats->rx_errors = (stats->rx_length_errors +
- stats->rx_over_errors +
stats->rx_crc_errors +
stats->rx_frame_errors +
- stats->rx_fifo_errors +
- stats->rx_missed_errors +
mac_stats->rx_symbol_error);
stats->tx_errors = (stats->tx_window_errors +
mac_stats->tx_bad);
@@ -1547,8 +1572,9 @@ static void efx_watchdog(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
- efx->port_enabled);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX stuck with port_enabled=%d: resetting channels\n",
+ efx->port_enabled);
efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
@@ -1567,7 +1593,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_stop_all(efx);
- EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_fini_channels(efx);
@@ -1593,8 +1619,9 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) {
- EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
- new_addr);
+ netif_err(efx, drv, efx->net_dev,
+ "invalid ethernet MAC address requested: %pM\n",
+ new_addr);
return -EINVAL;
}
@@ -1645,7 +1672,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
- .ndo_get_stats = efx_net_stats,
+ .ndo_get_stats64 = efx_net_stats,
.ndo_tx_timeout = efx_watchdog,
.ndo_start_xmit = efx_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
@@ -1697,7 +1724,6 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
- SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
/* Clear MAC statistics */
@@ -1722,7 +1748,8 @@ static int efx_register_netdev(struct efx_nic *efx)
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
if (rc) {
- EFX_ERR(efx, "failed to init net dev attributes\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
goto fail_registered;
}
@@ -1730,7 +1757,7 @@ static int efx_register_netdev(struct efx_nic *efx)
fail_locked:
rtnl_unlock();
- EFX_ERR(efx, "could not register net dev\n");
+ netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc;
fail_registered:
@@ -1795,7 +1822,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
rc = efx->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise NIC\n");
+ netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
goto fail;
}
@@ -1807,7 +1834,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (rc)
goto fail;
if (efx->phy_op->reconfigure(efx))
- EFX_ERR(efx, "could not restore PHY settings\n");
+ netif_err(efx, drv, efx->net_dev,
+ "could not restore PHY settings\n");
}
efx->mac_op->reconfigure(efx);
@@ -1840,13 +1868,14 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
int rc, rc2;
bool disabled;
- EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
efx_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
- EFX_ERR(efx, "failed to reset hardware\n");
+ netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
goto out;
}
@@ -1871,10 +1900,10 @@ out:
if (disabled) {
dev_close(efx->net_dev);
- EFX_ERR(efx, "has been disabled\n");
+ netif_err(efx, drv, efx->net_dev, "has been disabled\n");
efx->state = STATE_DISABLED;
} else {
- EFX_LOG(efx, "reset complete\n");
+ netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
}
return rc;
}
@@ -1886,10 +1915,14 @@ static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+ if (efx->reset_pending == RESET_TYPE_NONE)
+ return;
+
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flag set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
- EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
+ netif_info(efx, drv, efx->net_dev,
+ "scheduled reset quenched. NIC not RUNNING\n");
return;
}
@@ -1903,7 +1936,8 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
enum reset_type method;
if (efx->reset_pending != RESET_TYPE_NONE) {
- EFX_INFO(efx, "quenching already scheduled reset\n");
+ netif_info(efx, drv, efx->net_dev,
+ "quenching already scheduled reset\n");
return;
}
@@ -1927,10 +1961,12 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
}
if (method != type)
- EFX_LOG(efx, "scheduling %s reset for %s\n",
- RESET_TYPE(method), RESET_TYPE(type));
+ netif_dbg(efx, drv, efx->net_dev,
+ "scheduling %s reset for %s\n",
+ RESET_TYPE(method), RESET_TYPE(type));
else
- EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
+ netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+ RESET_TYPE(method));
efx->reset_pending = method;
@@ -2017,6 +2053,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
INIT_WORK(&efx->reset_work, efx_reset_work);
INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
efx->pci_dev = pci_dev;
+ efx->msg_enable = debug;
efx->state = STATE_INIT;
efx->reset_pending = RESET_TYPE_NONE;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
@@ -2052,8 +2089,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
rx_queue->queue = i;
rx_queue->channel = &efx->channel[0]; /* for safety */
rx_queue->buffer = NULL;
- spin_lock_init(&rx_queue->add_lock);
- INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+ setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
+ (unsigned long)rx_queue);
}
efx->type = type;
@@ -2136,7 +2173,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_pci_remove_main(efx);
efx_fini_io(efx);
- EFX_LOG(efx, "shutdown successful\n");
+ netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
pci_set_drvdata(pci_dev, NULL);
efx_fini_struct(efx);
@@ -2161,13 +2198,15 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise NIC\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise NIC\n");
goto fail3;
}
rc = efx_init_port(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise port\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise port\n");
goto fail4;
}
@@ -2223,11 +2262,13 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_HIGHDMA | NETIF_F_TSO);
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
+ SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev);
if (rc)
goto fail1;
- EFX_INFO(efx, "Solarflare Communications NIC detected\n");
+ netif_info(efx, probe, efx->net_dev,
+ "Solarflare Communications NIC detected\n");
/* Set up basic I/O (BAR mappings etc) */
rc = efx_init_io(efx);
@@ -2265,7 +2306,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
}
if (rc) {
- EFX_ERR(efx, "Could not reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n");
goto fail4;
}
@@ -2277,7 +2318,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail5;
- EFX_LOG(efx, "initialisation successful\n");
+ netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
rtnl_lock();
efx_mtd_probe(efx); /* allowed to fail */
@@ -2293,7 +2334,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
efx_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
- EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
+ netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
}
@@ -2332,6 +2373,9 @@ static int efx_pm_thaw(struct device *dev)
efx->type->resume_wol(efx);
+ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+ queue_work(reset_workqueue, &efx->reset_work);
+
return 0;
}
@@ -2394,7 +2438,7 @@ static struct dev_pm_ops efx_pm_ops = {
};
static struct pci_driver efx_pci_driver = {
- .name = EFX_DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
@@ -2421,11 +2465,6 @@ static int __init efx_init_module(void)
if (rc)
goto err_notifier;
- refill_workqueue = create_workqueue("sfc_refill");
- if (!refill_workqueue) {
- rc = -ENOMEM;
- goto err_refill;
- }
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
rc = -ENOMEM;
@@ -2441,8 +2480,6 @@ static int __init efx_init_module(void)
err_pci:
destroy_workqueue(reset_workqueue);
err_reset:
- destroy_workqueue(refill_workqueue);
- err_refill:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
return rc;
@@ -2454,7 +2491,6 @@ static void __exit efx_exit_module(void)
pci_unregister_driver(&efx_pci_driver);
destroy_workqueue(reset_workqueue);
- destroy_workqueue(refill_workqueue);
unregister_netdevice_notifier(&efx_netdev_notifier);
}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304..060dc952a0f 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_work(struct work_struct *data);
+extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, bool checksummed);
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard);
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_RXQ_SIZE 1024
#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
@@ -106,8 +106,9 @@ extern unsigned int efx_monitor_interval;
static inline void efx_schedule_channel(struct efx_channel *channel)
{
- EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
- channel->channel, raw_smp_processor_id());
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d scheduling NAPI poll on CPU%d\n",
+ channel->channel, raw_smp_processor_id());
channel->work_pending = true;
napi_schedule(&channel->napi_str);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 22026bfbc4c..fd19d6ab97a 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -218,8 +218,8 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
/* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
- EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
- " setting\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
}
@@ -234,7 +234,7 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
- strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
siena_print_fwver(efx, info->fw_version,
@@ -242,6 +242,32 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
}
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+ return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+ struct ethtool_regs *regs, void *buf)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ regs->version = efx->type->revision;
+ efx_nic_get_regs(efx, buf);
+}
+
+static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ return efx->msg_enable;
+}
+
+static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ efx->msg_enable = msg_enable;
+}
+
/**
* efx_fill_test - fill in an individual self-test entry
* @test_index: Index of the test
@@ -443,12 +469,13 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct efx_ethtool_stat *stat;
struct efx_channel *channel;
+ struct rtnl_link_stats64 temp;
int i;
EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
/* Update MAC and NIC statistics */
- dev_get_stats(net_dev);
+ dev_get_stats(net_dev, &temp);
/* Fill detailed statistics buffer */
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -520,6 +547,14 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
return efx->rx_checksum_enabled;
}
+static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ u32 supported = efx->type->offload_features & ETH_FLAG_RXHASH;
+
+ return ethtool_op_set_flags(net_dev, data, supported);
+}
+
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -539,7 +574,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!already_up) {
rc = dev_open(efx->net_dev);
if (rc) {
- EFX_ERR(efx, "failed opening device.\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed opening device.\n");
goto fail2;
}
}
@@ -551,9 +587,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!already_up)
dev_close(efx->net_dev);
- EFX_LOG(efx, "%s %sline self-tests\n",
- rc == 0 ? "passed" : "failed",
- (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+ netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+ rc == 0 ? "passed" : "failed",
+ (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
fail2:
fail1:
@@ -679,8 +715,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
return -EOPNOTSUPP;
if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
- EFX_ERR(efx, "invalid coalescing setting. "
- "Only rx/tx_coalesce_usecs_irq are supported\n");
+ netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. "
+ "Only rx/tx_coalesce_usecs_irq are supported\n");
return -EOPNOTSUPP;
}
@@ -692,8 +728,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
efx_for_each_tx_queue(tx_queue, efx) {
if ((tx_queue->channel->channel < efx->n_rx_channels) &&
tx_usecs) {
- EFX_ERR(efx, "Channel is shared. "
- "Only RX coalescing may be set\n");
+ netif_err(efx, drv, efx->net_dev, "Channel is shared. "
+ "Only RX coalescing may be set\n");
return -EOPNOTSUPP;
}
}
@@ -721,13 +757,15 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(pause->autoneg ? EFX_FC_AUTO : 0));
if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
- EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "Flow control unsupported: tx ON rx OFF\n");
rc = -EINVAL;
goto out;
}
if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
- EFX_LOG(efx, "Autonegotiation is disabled\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "Autonegotiation is disabled\n");
rc = -EINVAL;
goto out;
}
@@ -758,8 +796,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx->phy_op->reconfigure(efx);
if (rc) {
- EFX_ERR(efx, "Unable to advertise requested flow "
- "control setting\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to advertise requested flow "
+ "control setting\n");
goto out;
}
}
@@ -830,10 +869,101 @@ extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, method);
}
+static int
+efx_ethtool_get_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info, void *rules __always_unused)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = efx->n_rx_channels;
+ return 0;
+
+ case ETHTOOL_GRXFH: {
+ unsigned min_revision = 0;
+
+ info->data = 0;
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case IPV4_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_FALCON_B0;
+ break;
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through */
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ min_revision = EFX_REV_SIENA_A0;
+ break;
+ default:
+ break;
+ }
+ if (efx_nic_rev(efx) < min_revision)
+ info->data = 0;
+ return 0;
+ }
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ indir->size = ARRAY_SIZE(efx->rx_indir_table);
+ memcpy(indir->ring_index, efx->rx_indir_table,
+ copy_size * sizeof(indir->ring_index[0]));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ size_t i;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
+ return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ if (indir->ring_index[i] >= efx->n_rx_channels)
+ return -EINVAL;
+
+ memcpy(efx->rx_indir_table, indir->ring_index,
+ sizeof(efx->rx_indir_table));
+ efx_nic_push_rx_indir_table(efx);
+ return 0;
+}
+
const struct ethtool_ops efx_ethtool_ops = {
.get_settings = efx_ethtool_get_settings,
.set_settings = efx_ethtool_set_settings,
.get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_regs_len = efx_ethtool_get_regs_len,
+ .get_regs = efx_ethtool_get_regs,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
.nway_reset = efx_ethtool_nway_reset,
.get_link = efx_ethtool_get_link,
.get_eeprom_len = efx_ethtool_get_eeprom_len,
@@ -854,7 +984,7 @@ const struct ethtool_ops efx_ethtool_ops = {
/* Need to enable/disable TSO-IPv6 too */
.set_tso = efx_ethtool_set_tso,
.get_flags = ethtool_op_get_flags,
- .set_flags = ethtool_op_set_flags,
+ .set_flags = efx_ethtool_set_flags,
.get_sset_count = efx_ethtool_get_sset_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
@@ -863,4 +993,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
+ .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 655b697b45b..4f9d33f3cca 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -167,13 +167,15 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
* exit without having touched the hardware.
*/
if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
- EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
- raw_smp_processor_id());
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d not for me\n", irq,
+ raw_smp_processor_id());
return IRQ_NONE;
}
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -239,7 +241,8 @@ static int falcon_spi_wait(struct efx_nic *efx)
if (!falcon_spi_poll(efx))
return 0;
if (time_after_eq(jiffies, timeout)) {
- EFX_ERR(efx, "timed out waiting for SPI\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for SPI\n");
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -333,9 +336,10 @@ falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
if (!(status & SPI_STATUS_NRDY))
return 0;
if (time_after_eq(jiffies, timeout)) {
- EFX_ERR(efx, "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
@@ -469,7 +473,8 @@ static void falcon_reset_macs(struct efx_nic *efx)
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XMAC core reset\n");
}
}
@@ -492,12 +497,13 @@ static void falcon_reset_macs(struct efx_nic *efx)
if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
!EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
- EFX_LOG(efx, "Completed MAC reset after %d loops\n",
- count);
+ netif_dbg(efx, hw, efx->net_dev,
+ "Completed MAC reset after %d loops\n",
+ count);
break;
}
if (count > 20) {
- EFX_ERR(efx, "MAC reset failed\n");
+ netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
break;
}
count++;
@@ -548,7 +554,9 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
- int link_speed;
+ int link_speed, isolate;
+
+ isolate = (efx->reset_pending != RESET_TYPE_NONE);
switch (link_state->speed) {
case 10000: link_speed = 3; break;
@@ -570,7 +578,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
* discarded. */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
- !link_state->up);
+ !link_state->up || isolate);
}
efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
@@ -584,7 +592,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
/* Unisolate the MAC -> RX */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
efx_writeo(efx, &reg, FR_AZ_RX_CFG);
}
@@ -625,7 +633,8 @@ static void falcon_stats_complete(struct efx_nic *efx)
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
} else {
- EFX_ERR(efx, "timed out waiting for statistics\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for statistics\n");
}
}
@@ -715,16 +724,17 @@ static int falcon_gmii_wait(struct efx_nic *efx)
if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
- EFX_ERR(efx, "error from GMII access "
- EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(md_stat));
+ netif_err(efx, hw, efx->net_dev,
+ "error from GMII access "
+ EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(md_stat));
return -EIO;
}
return 0;
}
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for GMII\n");
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
return -ETIMEDOUT;
}
@@ -736,7 +746,8 @@ static int falcon_mdio_write(struct net_device *net_dev,
efx_oword_t reg;
int rc;
- EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing MDIO %d register %d.%d with 0x%04x\n",
prtad, devad, addr, value);
mutex_lock(&efx->mdio_lock);
@@ -810,8 +821,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
if (rc == 0) {
efx_reado(efx, &reg, FR_AB_MD_RXD);
rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
- EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
- prtad, devad, addr, rc);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got %04x\n",
+ prtad, devad, addr, rc);
} else {
/* Abort the read operation */
EFX_POPULATE_OWORD_2(reg,
@@ -819,8 +831,9 @@ static int falcon_mdio_read(struct net_device *net_dev,
FRF_AB_MD_GC, 1);
efx_writeo(efx, &reg, FR_AB_MD_CS);
- EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
- prtad, devad, addr, rc);
+ netif_dbg(efx, hw, efx->net_dev,
+ "read from MDIO %d register %d.%d, got error %d\n",
+ prtad, devad, addr, rc);
}
out:
@@ -871,7 +884,8 @@ static void falcon_switch_mac(struct efx_nic *efx)
falcon_clock_mac(efx);
- EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
+ netif_dbg(efx, hw, efx->net_dev, "selected %cMAC\n",
+ EFX_IS10G(efx) ? 'X' : 'G');
/* Not all macs support a mac-level link state */
efx->xmac_poll_required = false;
falcon_reset_macs(efx);
@@ -895,8 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->phy_op = &falcon_qt202x_phy_ops;
break;
default:
- EFX_ERR(efx, "Unknown PHY type %d\n",
- efx->phy_type);
+ netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
+ efx->phy_type);
return -ENODEV;
}
@@ -924,10 +938,11 @@ static int falcon_probe_port(struct efx_nic *efx)
FALCON_MAC_STATS_SIZE);
if (rc)
return rc;
- EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
return 0;
}
@@ -967,8 +982,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
mutex_unlock(&efx->spi_lock);
if (rc) {
- EFX_ERR(efx, "Failed to read %s\n",
- efx->spi_flash ? "flash" : "EEPROM");
+ netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
+ efx->spi_flash ? "flash" : "EEPROM");
rc = -EIO;
goto out;
}
@@ -978,11 +993,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
rc = -EINVAL;
if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
- EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM bad magic 0x%x\n", magic_num);
goto out;
}
if (struct_ver < 2) {
- EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has ancient version 0x%x\n", struct_ver);
goto out;
} else if (struct_ver < 4) {
word = &nvconfig->board_magic_num;
@@ -995,7 +1012,8 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
csum += le16_to_cpu(*word);
if (~csum & 0xffff) {
- EFX_ERR(efx, "NVRAM has incorrect checksum\n");
+ netif_err(efx, hw, efx->net_dev,
+ "NVRAM has incorrect checksum\n");
goto out;
}
@@ -1073,22 +1091,25 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
efx_oword_t glb_ctl_reg_ker;
int rc;
- EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
+ netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
+ RESET_TYPE(method));
/* Initiate device reset */
if (method == RESET_TYPE_WORLD) {
rc = pci_save_state(efx->pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to backup PCI state of primary "
- "function prior to hardware reset\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of primary "
+ "function prior to hardware reset\n");
goto fail1;
}
if (efx_nic_is_dual_func(efx)) {
rc = pci_save_state(nic_data->pci_dev2);
if (rc) {
- EFX_ERR(efx, "failed to backup PCI state of "
- "secondary function prior to "
- "hardware reset\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to backup PCI state of "
+ "secondary function prior to "
+ "hardware reset\n");
goto fail2;
}
}
@@ -1113,7 +1134,7 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
}
efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
- EFX_LOG(efx, "waiting for hardware reset\n");
+ netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
schedule_timeout_uninterruptible(HZ / 20);
/* Restore PCI configuration if needed */
@@ -1121,28 +1142,32 @@ static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
if (efx_nic_is_dual_func(efx)) {
rc = pci_restore_state(nic_data->pci_dev2);
if (rc) {
- EFX_ERR(efx, "failed to restore PCI config for "
- "the secondary function\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PCI config for "
+ "the secondary function\n");
goto fail3;
}
}
rc = pci_restore_state(efx->pci_dev);
if (rc) {
- EFX_ERR(efx, "failed to restore PCI config for the "
- "primary function\n");
+ netif_err(efx, drv, efx->net_dev,
+ "failed to restore PCI config for the "
+ "primary function\n");
goto fail4;
}
- EFX_LOG(efx, "successfully restored PCI config\n");
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully restored PCI config\n");
}
/* Assert that reset complete */
efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
rc = -ETIMEDOUT;
- EFX_ERR(efx, "timed out waiting for hardware reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for hardware reset\n");
goto fail5;
}
- EFX_LOG(efx, "hardware reset complete\n");
+ netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
return 0;
@@ -1165,8 +1190,9 @@ static void falcon_monitor(struct efx_nic *efx)
rc = falcon_board(efx)->type->monitor(efx);
if (rc) {
- EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
- (rc == -ERANGE) ? "reported fault" : "failed");
+ netif_err(efx, hw, efx->net_dev,
+ "Board sensor %s; shutting down PHY\n",
+ (rc == -ERANGE) ? "reported fault" : "failed");
efx->phy_mode |= PHY_MODE_LOW_POWER;
rc = __efx_reconfigure_port(efx);
WARN_ON(rc);
@@ -1217,7 +1243,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
/* Wait for SRAM reset to complete */
count = 0;
do {
- EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
+ netif_dbg(efx, hw, efx->net_dev,
+ "waiting for SRAM reset (attempt %d)...\n", count);
/* SRAM reset is slow; expect around 16ms */
schedule_timeout_uninterruptible(HZ / 50);
@@ -1225,13 +1252,14 @@ static int falcon_reset_sram(struct efx_nic *efx)
/* Check for reset complete */
efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
- EFX_LOG(efx, "SRAM reset complete\n");
+ netif_dbg(efx, hw, efx->net_dev,
+ "SRAM reset complete\n");
return 0;
}
} while (++count < 20); /* wait upto 0.4 sec */
- EFX_ERR(efx, "timed out waiting for SRAM reset\n");
+ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
return -ETIMEDOUT;
}
@@ -1290,7 +1318,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
rc = falcon_read_nvram(efx, nvconfig);
if (rc == -EINVAL) {
- EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
board_rev = 0;
@@ -1324,7 +1353,8 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
/* Read the MAC addresses */
memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
- EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
+ netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
+ efx->phy_type, efx->mdio.prtad);
rc = falcon_probe_board(efx, board_rev);
if (rc)
@@ -1353,14 +1383,16 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
- EFX_LOG(efx, "Booted from %s\n",
- boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
+ netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
+ boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
+ "flash" : "EEPROM");
} else {
/* Disable VPD and set clock dividers to safe
* values for initial programming. */
boot_dev = -1;
- EFX_LOG(efx, "Booted from internal ASIC settings;"
- " setting SPI config\n");
+ netif_dbg(efx, probe, efx->net_dev,
+ "Booted from internal ASIC settings;"
+ " setting SPI config\n");
EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
/* 125 MHz / 7 ~= 20 MHz */
FRF_AB_EE_SF_CLOCK_DIV, 7,
@@ -1394,7 +1426,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
if (efx_nic_fpga_ver(efx) != 0) {
- EFX_ERR(efx, "Falcon FPGA not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon FPGA not supported\n");
goto fail1;
}
@@ -1404,16 +1437,19 @@ static int falcon_probe_nic(struct efx_nic *efx)
u8 pci_rev = efx->pci_dev->revision;
if ((pci_rev == 0xff) || (pci_rev == 0)) {
- EFX_ERR(efx, "Falcon rev A0 not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A0 not supported\n");
goto fail1;
}
efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
- EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 1G not supported\n");
goto fail1;
}
if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
- EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Falcon rev A1 PCI-X not supported\n");
goto fail1;
}
@@ -1427,7 +1463,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
}
if (!nic_data->pci_dev2) {
- EFX_ERR(efx, "failed to find secondary function\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to find secondary function\n");
rc = -ENODEV;
goto fail2;
}
@@ -1436,7 +1473,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
/* Now we can reset the NIC */
rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
- EFX_ERR(efx, "failed to reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
@@ -1446,9 +1483,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
- EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
- (u64)efx->irq_status.dma_addr,
- efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (u64)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (u64)virt_to_phys(efx->irq_status.addr));
falcon_probe_spi_devices(efx);
@@ -1472,7 +1511,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = falcon_board(efx)->type->init(efx);
if (rc) {
- EFX_ERR(efx, "failed to initialise board\n");
+ netif_err(efx, probe, efx->net_dev,
+ "failed to initialise board\n");
goto fail6;
}
@@ -1542,6 +1582,13 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
+
+ /* Enable hash insertion. This is broken for the
+ * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
+ * IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
}
/* Always enable XOFF signal from RX FIFO. We enable
* or disable transmission of pause frames at the MAC. */
@@ -1615,8 +1662,12 @@ static int falcon_init_nic(struct efx_nic *efx)
falcon_init_rx_cfg(efx);
- /* Set destination of both TX and RX Flush events */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
+ /* Set destination of both TX and RX Flush events */
EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
@@ -1821,6 +1872,7 @@ struct efx_nic_type falcon_b0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -1828,7 +1880,7 @@ struct efx_nic_type falcon_b0_nic_type = {
* channels */
.tx_dc_base = 0x130000,
.rx_dc_base = 0x100000,
- .offload_features = NETIF_F_IP_CSUM,
+ .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH,
.reset_world_flags = ETH_RESET_IRQ,
};
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index c7a933a3292..3d950c2cf20 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -106,12 +106,17 @@ static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
alarms1 &= mask;
alarms2 &= mask >> 8;
if (alarms1 || alarms2) {
- EFX_ERR(efx,
- "LM87 detected a hardware failure (status %02x:%02x)"
- "%s%s\n",
- alarms1, alarms2,
- (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
- (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
+ netif_err(efx, hw, efx->net_dev,
+ "LM87 detected a hardware failure (status %02x:%02x)"
+ "%s%s%s\n",
+ alarms1, alarms2,
+ (alarms1 & LM87_ALARM_TEMP_INT) ?
+ "; board is overheating" : "",
+ (alarms1 & LM87_ALARM_TEMP_EXT1) ?
+ "; controller is overheating" : "",
+ (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
+ || alarms2) ?
+ "; electrical fault" : "");
return -ERANGE;
}
@@ -243,7 +248,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
(0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
(0 << P0_EN_1V0X_LBN));
if (rc != out) {
- EFX_INFO(efx, "power-cycling PHY\n");
+ netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n");
rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
if (rc)
goto fail_on;
@@ -269,7 +274,8 @@ static int sfe4001_poweron(struct efx_nic *efx)
if (rc)
goto fail_on;
- EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
+ netif_info(efx, hw, efx->net_dev,
+ "waiting for DSP boot (attempt %d)...\n", i);
/* In flash config mode, DSP does not turn on AFE, so
* just wait 1 second.
@@ -291,7 +297,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
}
}
- EFX_INFO(efx, "timed out waiting for DSP boot\n");
+ netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n");
rc = -ETIMEDOUT;
fail_on:
sfe4001_poweroff(efx);
@@ -377,7 +383,7 @@ static void sfe4001_fini(struct efx_nic *efx)
{
struct falcon_board *board = falcon_board(efx);
- EFX_INFO(efx, "%s\n", __func__);
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
sfe4001_poweroff(efx);
@@ -461,7 +467,7 @@ static int sfe4001_init(struct efx_nic *efx)
if (rc)
goto fail_on;
- EFX_INFO(efx, "PHY is powered on\n");
+ netif_info(efx, hw, efx->net_dev, "PHY is powered on\n");
return 0;
fail_on:
@@ -493,7 +499,7 @@ static int sfn4111t_check_hw(struct efx_nic *efx)
static void sfn4111t_fini(struct efx_nic *efx)
{
- EFX_INFO(efx, "%s\n", __func__);
+ netif_info(efx, drv, efx->net_dev, "%s\n", __func__);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
i2c_unregister_device(falcon_board(efx)->hwmon_client);
@@ -742,13 +748,14 @@ int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
board->type = &board_types[i];
if (board->type) {
- EFX_INFO(efx, "board is %s rev %c%d\n",
+ netif_info(efx, probe, efx->net_dev, "board is %s rev %c%d\n",
(efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
? board->type->ref_model : board->type->gen_type,
'A' + board->major, board->minor);
return 0;
} else {
- EFX_ERR(efx, "unknown board type %d\n", type_id);
+ netif_err(efx, probe, efx->net_dev, "unknown board type %d\n",
+ type_id);
return -ENODEV;
}
}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index c84a2ce2ccb..bae656dd2c4 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -81,7 +81,8 @@ int falcon_reset_xaui(struct efx_nic *efx)
}
udelay(10);
}
- EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
return -ETIMEDOUT;
}
@@ -256,7 +257,7 @@ static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
falcon_stop_nic_stats(efx);
while (!mac_up && tries) {
- EFX_LOG(efx, "bashing xaui\n");
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
falcon_reset_xaui(efx);
udelay(200);
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index b89177c27f4..85a99fe8743 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -78,8 +78,9 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
{
unsigned long flags __attribute__ ((unused));
- EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
- EFX_OWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing register %x with " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
@@ -105,8 +106,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
- EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
- addr, EFX_QWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+ addr, EFX_QWORD_VAL(*value));
spin_lock_irqsave(&efx->biu_lock, flags);
#ifdef EFX_USE_QWORD_IO
@@ -129,8 +131,9 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
- EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
- reg, EFX_DWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "writing partial register %x with "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
/* No lock required */
_efx_writed(efx, value->u32[0], reg);
@@ -155,8 +158,9 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
value->u32[3] = _efx_readd(efx, reg + 12);
spin_unlock_irqrestore(&efx->biu_lock, flags);
- EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
- EFX_OWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+ EFX_OWORD_VAL(*value));
}
/* Read an 8-byte SRAM entry through supplied mapping,
@@ -177,8 +181,9 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
- EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
- addr, EFX_QWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+ addr, EFX_QWORD_VAL(*value));
}
/* Read dword from register that allows partial writes (sic) */
@@ -186,8 +191,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
unsigned int reg)
{
value->u32[0] = _efx_readd(efx, reg);
- EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
- reg, EFX_DWORD_VAL(*value));
+ netif_vdbg(efx, hw, efx->net_dev,
+ "read from register %x, got "EFX_DWORD_FMT"\n",
+ reg, EFX_DWORD_VAL(*value));
}
/* Write to a register forming part of a table */
@@ -211,6 +217,13 @@ static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
}
+/* Read from a dword register forming part of a table */
+static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
+ unsigned int reg, unsigned int index)
+{
+ efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
+}
+
/* Page-mapped register block size */
#define EFX_PAGE_BLOCK_SIZE 0x2000
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 93cc3c1b945..3912b8fed91 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -168,11 +168,12 @@ static int efx_mcdi_poll(struct efx_nic *efx)
error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
if (error && mcdi->resplen == 0) {
- EFX_ERR(efx, "MC rebooted\n");
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
rc = EIO;
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
efx_readd(efx, &reg, pdu + 4);
@@ -303,8 +304,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
/* The request has been cancelled */
--mcdi->credits;
else
- EFX_ERR(efx, "MC response mismatch tx seq 0x%x rx "
- "seq 0x%x\n", seqno, mcdi->seqno);
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx "
+ "seq 0x%x\n", seqno, mcdi->seqno);
} else {
mcdi->resprc = errno;
mcdi->resplen = datalen;
@@ -352,8 +354,9 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
++mcdi->credits;
spin_unlock_bh(&mcdi->iface_lock);
- EFX_ERR(efx, "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
} else {
size_t resplen;
@@ -374,11 +377,13 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
- EFX_ERR(efx, "MC fatal error %d\n", -rc);
+ netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
+ -rc);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else
- EFX_ERR(efx, "MC command 0x%x inlen %d failed rc=%d\n",
- cmd, (int)inlen, -rc);
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d failed rc=%d\n",
+ cmd, (int)inlen, -rc);
}
efx_mcdi_release(mcdi);
@@ -534,8 +539,9 @@ static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
state_txt = sensor_status_names[state];
- EFX_ERR(efx, "Sensor %d (%s) reports condition '%s' for raw value %d\n",
- monitor, name, state_txt, value);
+ netif_err(efx, hw, efx->net_dev,
+ "Sensor %d (%s) reports condition '%s' for raw value %d\n",
+ monitor, name, state_txt, value);
}
/* Called from falcon_process_eventq for MCDI events */
@@ -548,12 +554,13 @@ void efx_mcdi_process_event(struct efx_channel *channel,
switch (code) {
case MCDI_EVENT_CODE_BADSSERT:
- EFX_ERR(efx, "MC watchdog or assertion failure at 0x%x\n", data);
+ netif_err(efx, hw, efx->net_dev,
+ "MC watchdog or assertion failure at 0x%x\n", data);
efx_mcdi_ev_death(efx, EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
- EFX_INFO(efx, "MCDI PM event.\n");
+ netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
break;
case MCDI_EVENT_CODE_CMDDONE:
@@ -570,10 +577,11 @@ void efx_mcdi_process_event(struct efx_channel *channel,
efx_mcdi_sensor_event(efx, event);
break;
case MCDI_EVENT_CODE_SCHEDERR:
- EFX_INFO(efx, "MC Scheduler error address=0x%x\n", data);
+ netif_info(efx, hw, efx->net_dev,
+ "MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
- EFX_INFO(efx, "MC Reboot\n");
+ netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
@@ -581,7 +589,8 @@ void efx_mcdi_process_event(struct efx_channel *channel,
break;
default:
- EFX_ERR(efx, "Unknown MCDI event 0x%x\n", code);
+ netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
+ code);
}
}
@@ -627,7 +636,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -657,7 +666,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -695,7 +704,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d len=%d\n", __func__, rc, (int)outlen);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+ __func__, rc, (int)outlen);
return rc;
}
@@ -724,7 +734,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -749,8 +759,8 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n",
- __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -781,7 +791,7 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -802,7 +812,7 @@ int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -827,7 +837,7 @@ int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -853,7 +863,7 @@ int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -877,7 +887,7 @@ int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -898,7 +908,7 @@ int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -948,9 +958,10 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
return 0;
fail2:
- EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
+ netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+ __func__, type);
fail1:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -994,14 +1005,15 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
? "watchdog reset"
: "unknown assertion";
- EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+ netif_err(efx, hw, efx->net_dev,
+ "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
- EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
@@ -1050,14 +1062,16 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
NULL, 0, NULL);
if (rc)
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
}
int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -1075,7 +1089,7 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return 0;
if (rc == 0)
rc = -EIO;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1108,7 +1122,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
fail:
*id_out = -1;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1143,7 +1157,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
fail:
*id_out = -1;
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1163,7 +1177,7 @@ int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -1179,7 +1193,7 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 39182631ac9..f88f4bf986f 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -69,8 +69,8 @@ static int efx_mcdi_get_mac_faults(struct efx_nic *efx, u32 *faults)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n",
- __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return rc;
}
@@ -110,8 +110,8 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
return 0;
fail:
- EFX_ERR(efx, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
return rc;
}
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 6032c0e1f1f..0121e71702b 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -20,7 +20,7 @@
#include "nic.h"
#include "selftest.h"
-struct efx_mcdi_phy_cfg {
+struct efx_mcdi_phy_data {
u32 flags;
u32 type;
u32 supported_cap;
@@ -35,7 +35,7 @@ struct efx_mcdi_phy_cfg {
};
static int
-efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
+efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
size_t outlen;
@@ -71,7 +71,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -97,7 +97,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -122,7 +122,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -150,7 +150,7 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -178,7 +178,7 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
return 0;
fail:
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
@@ -259,7 +259,7 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
enum efx_phy_mode mode, supported;
u32 flags;
@@ -307,7 +307,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_data;
+ struct efx_mcdi_phy_data *phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
u32 caps;
int rc;
@@ -395,6 +395,7 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
return 0;
@@ -405,7 +406,7 @@ fail:
int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
ethtool_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
@@ -446,7 +447,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
*/
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
/* The link partner capabilities are only relevent if the
@@ -465,8 +466,8 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
rmtadv |= ADVERTISED_Asym_Pause;
if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
- EFX_ERR(efx, "warning: link partner doesn't support "
- "pause frames");
+ netif_err(efx, link, efx->net_dev,
+ "warning: link partner doesn't support pause frames");
}
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
@@ -482,7 +483,8 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
efx->link_state.up = false;
} else {
efx_mcdi_phy_decode_link(
@@ -505,7 +507,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
@@ -525,7 +527,8 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
outbuf, sizeof(outbuf), NULL);
if (rc) {
- EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
return;
}
ecmd->lp_advertising =
@@ -535,7 +538,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps;
int rc;
@@ -674,7 +677,7 @@ out:
static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
unsigned flags)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 mode;
int rc;
@@ -712,7 +715,7 @@ static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
{
- struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
if (index == 0)
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 0548fcbbdcd..eeaf0bd64bd 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -63,7 +63,8 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
/* Read MMD STATUS2 to check it is responding. */
status = efx_mdio_read(efx, mmd, MDIO_STAT2);
if ((status & MDIO_STAT2_DEVPRST) != MDIO_STAT2_DEVPRST_VAL) {
- EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d not responding.\n", mmd);
return -EIO;
}
}
@@ -72,12 +73,14 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal)
status = efx_mdio_read(efx, mmd, MDIO_STAT1);
if (status & MDIO_STAT1_FAULT) {
if (fault_fatal) {
- EFX_ERR(efx, "PHY MMD %d reporting fatal"
- " fault: status %x\n", mmd, status);
+ netif_err(efx, hw, efx->net_dev,
+ "PHY MMD %d reporting fatal"
+ " fault: status %x\n", mmd, status);
return -EIO;
} else {
- EFX_LOG(efx, "PHY MMD %d reporting status"
- " %x (expected)\n", mmd, status);
+ netif_dbg(efx, hw, efx->net_dev,
+ "PHY MMD %d reporting status"
+ " %x (expected)\n", mmd, status);
}
}
return 0;
@@ -103,8 +106,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
if (mask & 1) {
stat = efx_mdio_read(efx, mmd, MDIO_CTRL1);
if (stat < 0) {
- EFX_ERR(efx, "failed to read status of"
- " MMD %d\n", mmd);
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read status of"
+ " MMD %d\n", mmd);
return -EIO;
}
if (stat & MDIO_CTRL1_RESET)
@@ -119,8 +123,9 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask)
msleep(spintime);
}
if (in_reset != 0) {
- EFX_ERR(efx, "not all MMDs came out of reset in time."
- " MMDs still in reset: %x\n", in_reset);
+ netif_err(efx, hw, efx->net_dev,
+ "not all MMDs came out of reset in time."
+ " MMDs still in reset: %x\n", in_reset);
rc = -ETIMEDOUT;
}
return rc;
@@ -142,16 +147,18 @@ int efx_mdio_check_mmds(struct efx_nic *efx,
devs1 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS1);
devs2 = efx_mdio_read(efx, probe_mmd, MDIO_DEVS2);
if (devs1 < 0 || devs2 < 0) {
- EFX_ERR(efx, "failed to read devices present\n");
+ netif_err(efx, hw, efx->net_dev,
+ "failed to read devices present\n");
return -EIO;
}
devices = devs1 | (devs2 << 16);
if ((devices & mmd_mask) != mmd_mask) {
- EFX_ERR(efx, "required MMDs not present: got %x, "
- "wanted %x\n", devices, mmd_mask);
+ netif_err(efx, hw, efx->net_dev,
+ "required MMDs not present: got %x, wanted %x\n",
+ devices, mmd_mask);
return -ENODEV;
}
- EFX_TRACE(efx, "Devices present: %x\n", devices);
+ netif_vdbg(efx, hw, efx->net_dev, "Devices present: %x\n", devices);
/* Check all required MMDs are responding and happy. */
while (mmd_mask) {
@@ -219,7 +226,7 @@ static void efx_mdio_set_mmd_lpower(struct efx_nic *efx,
{
int stat = efx_mdio_read(efx, mmd, MDIO_STAT1);
- EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n",
+ netif_vdbg(efx, drv, efx->net_dev, "Setting low power mode for MMD %d to %d\n",
mmd, lpower);
if (stat & MDIO_STAT1_LPOWERABLE) {
@@ -349,8 +356,8 @@ int efx_mdio_test_alive(struct efx_nic *efx)
if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
(physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
+ netif_err(efx, hw, efx->net_dev,
+ "no MDIO PHY present with ID %d\n", efx->mdio.prtad);
rc = -EINVAL;
} else {
rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f89e7192960..75791d3d496 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -51,7 +51,8 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
sync = !!(lane_status & MDIO_PHYXS_LNSTAT_ALIGN);
if (!sync)
- EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
+ netif_dbg(efx, hw, efx->net_dev, "XGXS lane status: %x\n",
+ lane_status);
return sync;
}
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index f3ac7f30b5e..02e54b4f701 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
-#define EFX_DRIVER_NAME "sfc_mtd"
#include "net_driver.h"
#include "spi.h"
#include "efx.h"
@@ -71,8 +70,10 @@ static int siena_mtd_probe(struct efx_nic *efx);
/* SPI utilities */
-static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
+static int
+efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
u8 status;
@@ -92,7 +93,7 @@ static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
if (signal_pending(current))
return -EINTR;
}
- EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
+ pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
return -ETIMEDOUT;
}
@@ -131,8 +132,10 @@ efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
return 0;
}
-static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
+static int
+efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
{
+ struct efx_mtd *efx_mtd = part->mtd.priv;
const struct efx_spi_device *spi = efx_mtd->spi;
struct efx_nic *efx = efx_mtd->efx;
unsigned pos, block_len;
@@ -156,7 +159,7 @@ static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
NULL, 0);
if (rc)
return rc;
- rc = efx_spi_slow_wait(efx_mtd, false);
+ rc = efx_spi_slow_wait(part, false);
/* Verify the entire region has been wiped */
memset(empty, 0xff, sizeof(empty));
@@ -198,13 +201,14 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
int rc;
rc = efx_mtd->ops->sync(mtd);
if (rc)
- EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
+ pr_err("%s: %s sync failed (%d)\n",
+ part->name, efx_mtd->name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -338,7 +342,7 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = mutex_lock_interruptible(&efx->spi_lock);
if (rc)
return rc;
- rc = efx_spi_erase(efx_mtd, part->offset + start, len);
+ rc = efx_spi_erase(part, part->offset + start, len);
mutex_unlock(&efx->spi_lock);
return rc;
}
@@ -363,12 +367,13 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
static int falcon_mtd_sync(struct mtd_info *mtd)
{
+ struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
struct efx_mtd *efx_mtd = mtd->priv;
struct efx_nic *efx = efx_mtd->efx;
int rc;
mutex_lock(&efx->spi_lock);
- rc = efx_spi_slow_wait(efx_mtd, true);
+ rc = efx_spi_slow_wait(part, true);
mutex_unlock(&efx->spi_lock);
return rc;
}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 4762c91cb58..64e7caa4bbb 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -13,11 +13,16 @@
#ifndef EFX_NET_DRIVER_H
#define EFX_NET_DRIVER_H
+#if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG)
+#define DEBUG
+#endif
+
#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -34,9 +39,7 @@
* Build definitions
*
**************************************************************************/
-#ifndef EFX_DRIVER_NAME
-#define EFX_DRIVER_NAME "sfc"
-#endif
+
#define EFX_DRIVER_VERSION "3.0"
#ifdef EFX_ENABLE_DEBUG
@@ -47,35 +50,6 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif
-/* Un-rate-limited logging */
-#define EFX_ERR(efx, fmt, args...) \
-dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
-
-#define EFX_INFO(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
-
-#ifdef EFX_ENABLE_DEBUG
-#define EFX_LOG(efx, fmt, args...) \
-dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#else
-#define EFX_LOG(efx, fmt, args...) \
-dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
-#endif
-
-#define EFX_TRACE(efx, fmt, args...) do {} while (0)
-
-#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
-
-/* Rate-limited logging */
-#define EFX_ERR_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
-
-#define EFX_INFO_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
-
-#define EFX_LOG_RL(efx, fmt, args...) \
-do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
-
/**************************************************************************
*
* Efx data structures
@@ -221,7 +195,6 @@ struct efx_tx_queue {
* If both this and skb are %NULL, the buffer slot is currently free.
* @data: Pointer to ethernet header
* @len: Buffer length, in bytes.
- * @unmap_addr: DMA address to unmap
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
@@ -229,7 +202,24 @@ struct efx_rx_buffer {
struct page *page;
char *data;
unsigned int len;
- dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @refcnt: Number of struct efx_rx_buffer's referencing this page.
+ * When refcnt falls to zero, the page is unmapped for dma
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+ unsigned refcnt;
+ dma_addr_t dma_addr;
+
+ unsigned int __pad[0] ____cacheline_aligned;
};
/**
@@ -242,10 +232,6 @@ struct efx_rx_buffer {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @add_lock: Receive queue descriptor add spin lock.
- * This lock must be held in order to add buffers to the RX
- * descriptor ring (rxd and buffer) and to update added_count (but
- * not removed_count).
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
@@ -259,12 +245,7 @@ struct efx_rx_buffer {
* overflow was observed. It should never be set.
* @alloc_page_count: RX allocation strategy counter.
* @alloc_skb_count: RX allocation strategy counter.
- * @work: Descriptor push work thread
- * @buf_page: Page for next RX buffer.
- * We can use a single page for multiple RX buffers. This tracks
- * the remaining space in the allocation.
- * @buf_dma_addr: Page's DMA address.
- * @buf_data: Page's host address.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
* @flushed: Use when handling queue flushing
*/
struct efx_rx_queue {
@@ -277,7 +258,6 @@ struct efx_rx_queue {
int added_count;
int notified_count;
int removed_count;
- spinlock_t add_lock;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int fast_fill_limit;
@@ -285,12 +265,9 @@ struct efx_rx_queue {
unsigned int min_overfill;
unsigned int alloc_page_count;
unsigned int alloc_skb_count;
- struct delayed_work work;
+ struct timer_list slow_fill;
unsigned int slow_fill_count;
- struct page *buf_page;
- dma_addr_t buf_dma_addr;
- char *buf_data;
enum efx_flush_state flushed;
};
@@ -336,7 +313,7 @@ enum efx_rx_alloc_method {
* @eventq: Event queue buffer
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @eventq_magic: Event queue magic value for driver-generated test events
+ * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -367,7 +344,7 @@ struct efx_channel {
struct efx_special_buffer eventq;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int eventq_magic;
+ unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
@@ -658,6 +635,7 @@ union efx_multicast_hash {
* @interrupt_mode: Interrupt mode
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock.
* @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
* @tx_queue: TX DMA queues
@@ -669,6 +647,7 @@ union efx_multicast_hash {
* @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_indir_table: Indirection table for RSS
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
@@ -740,6 +719,7 @@ struct efx_nic {
enum efx_int_mode interrupt_mode;
bool irq_rx_adaptive;
unsigned int irq_rx_moderation;
+ u32 msg_enable;
enum nic_state state;
enum reset_type reset_pending;
@@ -754,6 +734,8 @@ struct efx_nic {
unsigned n_tx_channels;
unsigned int rx_buffer_len;
unsigned int rx_buffer_order;
+ u8 rx_hash_key[40];
+ u32 rx_indir_table[128];
unsigned int_error_count;
unsigned long int_error_expire;
@@ -866,7 +848,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_padding: Padding added to each RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX buffer
+ * @rx_buffer_padding: Size of padding at end of RX buffer
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
@@ -910,6 +893,7 @@ struct efx_nic_type {
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
+ unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 5d3aaec5855..f595d920c7c 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,6 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
+/* Generated event code for efx_generate_test_event() */
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ (0x00010100 + (_channel)->channel)
+
+/* Generated event code for efx_generate_fill_event() */
+#define EFX_CHANNEL_MAGIC_FILL(_channel) \
+ (0x00010200 + (_channel)->channel)
+
/**************************************************************************
*
* Solarstorm hardware access
@@ -171,9 +179,10 @@ int efx_nic_test_registers(struct efx_nic *efx,
return 0;
fail:
- EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
@@ -206,8 +215,9 @@ efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->dma_addr + (i * 4096);
- EFX_LOG(efx, "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
@@ -227,8 +237,8 @@ efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
if (!buffer->entries)
return;
- EFX_LOG(efx, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
FRF_AZ_BUF_UPD_CMD, 0,
@@ -268,11 +278,12 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
- EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
@@ -283,11 +294,12 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
if (!buffer->addr)
return;
- EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->dma_addr, buffer->len,
+ buffer->addr, (u64)virt_to_phys(buffer->addr));
pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
buffer->dma_addr);
@@ -547,9 +559,10 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
- EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
- rx_queue->queue, rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ rx_queue->queue, rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->flushed = FLUSH_NONE;
@@ -686,9 +699,10 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
- EFX_ERR(efx, "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
}
return tx_packets;
@@ -751,20 +765,21 @@ static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
* to a FIFO overflow.
*/
#ifdef EFX_ENABLE_DEBUG
- if (rx_ev_other_err) {
- EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- rx_queue->queue, EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ rx_queue->queue, EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
}
@@ -778,8 +793,9 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
expected = rx_queue->removed_count & EFX_RXQ_MASK;
dropped = (index - expected) & EFX_RXQ_MASK;
- EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
@@ -850,6 +866,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
checksummed, discard);
}
+static void
+efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned code;
+
+ code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ if (code == EFX_CHANNEL_MAGIC_TEST(channel))
+ ++channel->magic_count;
+ else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
+ else
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+}
+
/* Global events are basically PHY events */
static void
efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -873,8 +909,9 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
- EFX_ERR(efx, "channel %d seen global RX_RESET "
- "event. Resetting.\n", channel->channel);
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen global RX_RESET event. Resetting.\n",
+ channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
@@ -883,9 +920,10 @@ efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
}
if (!handled)
- EFX_ERR(efx, "channel %d unknown global event "
- EFX_QWORD_FMT "\n", channel->channel,
- EFX_QWORD_VAL(*event));
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown global event "
+ EFX_QWORD_FMT "\n", channel->channel,
+ EFX_QWORD_VAL(*event));
}
static void
@@ -900,31 +938,35 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
- EFX_LOG(efx, "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_SRM_UPD_DONE_EV:
- EFX_TRACE(efx, "channel %d SRAM update done\n",
- channel->channel);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
break;
case FSE_AZ_WAKE_UP_EV:
- EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AZ_TIMER_EV:
- EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
break;
case FSE_AA_RX_RECOVER_EV:
- EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
@@ -933,19 +975,22 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
- EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
- EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
break;
default:
- EFX_TRACE(efx, "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
break;
}
}
@@ -968,8 +1013,9 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* End of events */
break;
- EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
@@ -993,11 +1039,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
- channel->eventq_magic = EFX_QWORD_FIELD(
- event, FSF_AZ_DRV_GEN_EV_MAGIC);
- EFX_LOG(channel->efx, "channel %d received generated "
- "event "EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(event));
+ efx_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
efx_handle_global_event(channel, &event);
@@ -1009,9 +1051,10 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
efx_mcdi_process_event(channel, &event);
break;
default:
- EFX_ERR(channel->efx, "channel %d unknown event type %d"
- " (data " EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
}
}
@@ -1036,9 +1079,10 @@ void efx_nic_init_eventq(struct efx_channel *channel)
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
@@ -1088,12 +1132,20 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
}
-/* Generates a test event on the event queue. A subsequent call to
- * process_eventq() should pick up the event and place the value of
- * "magic" into channel->eventq_magic;
- */
-void efx_nic_generate_test_event(struct efx_channel *channel, unsigned int magic)
+void efx_nic_generate_test_event(struct efx_channel *channel)
{
+ unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
+ efx_qword_t test_event;
+
+ EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_generate_event(channel, &test_event);
+}
+
+void efx_nic_generate_fill_event(struct efx_channel *channel)
+{
+ unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
efx_qword_t test_event;
EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
@@ -1208,20 +1260,19 @@ int efx_nic_flush_queues(struct efx_nic *efx)
* leading to a reset, or fake up success anyway */
efx_for_each_tx_queue(tx_queue, efx) {
if (tx_queue->flushed != FLUSH_DONE)
- EFX_ERR(efx, "tx queue %d flush command timed out\n",
- tx_queue->queue);
+ netif_err(efx, hw, efx->net_dev,
+ "tx queue %d flush command timed out\n",
+ tx_queue->queue);
tx_queue->flushed = FLUSH_DONE;
}
efx_for_each_rx_queue(rx_queue, efx) {
if (rx_queue->flushed != FLUSH_DONE)
- EFX_ERR(efx, "rx queue %d flush command timed out\n",
- rx_queue->queue);
+ netif_err(efx, hw, efx->net_dev,
+ "rx queue %d flush command timed out\n",
+ rx_queue->queue);
rx_queue->flushed = FLUSH_DONE;
}
- if (EFX_WORKAROUND_7803(efx))
- return 0;
-
return -ETIMEDOUT;
}
@@ -1290,10 +1341,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
- EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
@@ -1301,8 +1352,9 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
- EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
}
/* Disable both devices */
@@ -1319,11 +1371,13 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
}
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
- EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
@@ -1386,8 +1440,9 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
}
return result;
@@ -1408,8 +1463,9 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
int syserr;
efx->last_irq_cpu = raw_smp_processor_id();
- EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
if (channel->channel == efx->fatal_irq_level) {
@@ -1428,22 +1484,21 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
-static void efx_setup_rss_indir_table(struct efx_nic *efx)
+void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
- int i = 0;
- unsigned long offset;
+ size_t i = 0;
efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return;
- for (offset = FR_BZ_RX_INDIRECTION_TBL;
- offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
- offset += 0x10) {
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- i % efx->n_rx_channels);
- efx_writed(efx, &dword, offset);
- i++;
+ efx->rx_indir_table[i]);
+ efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
}
}
@@ -1465,8 +1520,9 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
efx->name, efx);
if (rc) {
- EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
- efx->pci_dev->irq);
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook legacy IRQ %d\n",
+ efx->pci_dev->irq);
goto fail1;
}
return 0;
@@ -1478,7 +1534,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
IRQF_PROBE_SHARED, /* Not shared */
channel->name, channel);
if (rc) {
- EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
+ netif_err(efx, drv, efx->net_dev,
+ "failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
}
@@ -1576,7 +1633,7 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
- efx_setup_rss_indir_table(efx);
+ efx_nic_push_rx_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
@@ -1598,3 +1655,269 @@ void efx_nic_init_common(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
}
+
+/* Register dump */
+
+#define REGISTER_REVISION_A 1
+#define REGISTER_REVISION_B 2
+#define REGISTER_REVISION_C 3
+#define REGISTER_REVISION_Z 3 /* latest revision */
+
+struct efx_nic_reg {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+};
+
+#define REGISTER(name, min_rev, max_rev) { \
+ FR_ ## min_rev ## max_rev ## _ ## name, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \
+}
+#define REGISTER_AA(name) REGISTER(name, A, A)
+#define REGISTER_AB(name) REGISTER(name, A, B)
+#define REGISTER_AZ(name) REGISTER(name, A, Z)
+#define REGISTER_BB(name) REGISTER(name, B, B)
+#define REGISTER_BZ(name) REGISTER(name, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, C, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+ REGISTER_AZ(ADR_REGION),
+ REGISTER_AZ(INT_EN_KER),
+ REGISTER_BZ(INT_EN_CHAR),
+ REGISTER_AZ(INT_ADR_KER),
+ REGISTER_BZ(INT_ADR_CHAR),
+ /* INT_ACK_KER is WO */
+ /* INT_ISR0 is RC */
+ REGISTER_AZ(HW_INIT),
+ REGISTER_CZ(USR_EV_CFG),
+ REGISTER_AB(EE_SPI_HCMD),
+ REGISTER_AB(EE_SPI_HADR),
+ REGISTER_AB(EE_SPI_HDATA),
+ REGISTER_AB(EE_BASE_PAGE),
+ REGISTER_AB(EE_VPD_CFG0),
+ /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+ /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+ /* PCIE_CORE_INDIRECT is indirect */
+ REGISTER_AB(NIC_STAT),
+ REGISTER_AB(GPIO_CTL),
+ REGISTER_AB(GLB_CTL),
+ /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+ REGISTER_BZ(DP_CTRL),
+ REGISTER_AZ(MEM_STAT),
+ REGISTER_AZ(CS_DEBUG),
+ REGISTER_AZ(ALTERA_BUILD),
+ REGISTER_AZ(CSR_SPARE),
+ REGISTER_AB(PCIE_SD_CTL0123),
+ REGISTER_AB(PCIE_SD_CTL45),
+ REGISTER_AB(PCIE_PCS_CTL_STAT),
+ /* DEBUG_DATA_OUT is not used */
+ /* DRV_EV is WO */
+ REGISTER_AZ(EVQ_CTL),
+ REGISTER_AZ(EVQ_CNT1),
+ REGISTER_AZ(EVQ_CNT2),
+ REGISTER_AZ(BUF_TBL_CFG),
+ REGISTER_AZ(SRM_RX_DC_CFG),
+ REGISTER_AZ(SRM_TX_DC_CFG),
+ REGISTER_AZ(SRM_CFG),
+ /* BUF_TBL_UPD is WO */
+ REGISTER_AZ(SRM_UPD_EVQ),
+ REGISTER_AZ(SRAM_PARITY),
+ REGISTER_AZ(RX_CFG),
+ REGISTER_BZ(RX_FILTER_CTL),
+ /* RX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(RX_DC_CFG),
+ REGISTER_AZ(RX_DC_PF_WM),
+ REGISTER_BZ(RX_RSS_TKEY),
+ /* RX_NODESC_DROP is RC */
+ REGISTER_AA(RX_SELF_RST),
+ /* RX_DEBUG, RX_PUSH_DROP are not used */
+ REGISTER_CZ(RX_RSS_IPV6_REG1),
+ REGISTER_CZ(RX_RSS_IPV6_REG2),
+ REGISTER_CZ(RX_RSS_IPV6_REG3),
+ /* TX_FLUSH_DESCQ is WO */
+ REGISTER_AZ(TX_DC_CFG),
+ REGISTER_AA(TX_CHKSM_CFG),
+ REGISTER_AZ(TX_CFG),
+ /* TX_PUSH_DROP is not used */
+ REGISTER_AZ(TX_RESERVED),
+ REGISTER_BZ(TX_PACE),
+ /* TX_PACE_DROP_QID is RC */
+ REGISTER_BB(TX_VLAN),
+ REGISTER_BZ(TX_IPFIL_PORTEN),
+ REGISTER_AB(MD_TXD),
+ REGISTER_AB(MD_RXD),
+ REGISTER_AB(MD_CS),
+ REGISTER_AB(MD_PHY_ADR),
+ REGISTER_AB(MD_ID),
+ /* MD_STAT is RC */
+ REGISTER_AB(MAC_STAT_DMA),
+ REGISTER_AB(MAC_CTRL),
+ REGISTER_BB(GEN_MODE),
+ REGISTER_AB(MAC_MC_HASH_REG0),
+ REGISTER_AB(MAC_MC_HASH_REG1),
+ REGISTER_AB(GM_CFG1),
+ REGISTER_AB(GM_CFG2),
+ /* GM_IPG and GM_HD are not used */
+ REGISTER_AB(GM_MAX_FLEN),
+ /* GM_TEST is not used */
+ REGISTER_AB(GM_ADR1),
+ REGISTER_AB(GM_ADR2),
+ REGISTER_AB(GMF_CFG0),
+ REGISTER_AB(GMF_CFG1),
+ REGISTER_AB(GMF_CFG2),
+ REGISTER_AB(GMF_CFG3),
+ REGISTER_AB(GMF_CFG4),
+ REGISTER_AB(GMF_CFG5),
+ REGISTER_BB(TX_SRC_MAC_CTL),
+ REGISTER_AB(XM_ADR_LO),
+ REGISTER_AB(XM_ADR_HI),
+ REGISTER_AB(XM_GLB_CFG),
+ REGISTER_AB(XM_TX_CFG),
+ REGISTER_AB(XM_RX_CFG),
+ REGISTER_AB(XM_MGT_INT_MASK),
+ REGISTER_AB(XM_FC),
+ REGISTER_AB(XM_PAUSE_TIME),
+ REGISTER_AB(XM_TX_PARAM),
+ REGISTER_AB(XM_RX_PARAM),
+ /* XM_MGT_INT_MSK (note no 'A') is RC */
+ REGISTER_AB(XX_PWR_RST),
+ REGISTER_AB(XX_SD_CTL),
+ REGISTER_AB(XX_TXDRV_CTL),
+ /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+ /* XX_CORE_STAT is partly RC */
+};
+
+struct efx_nic_reg_table {
+ u32 offset:24;
+ u32 min_revision:2, max_revision:2;
+ u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+ offset, \
+ REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \
+ step, rows \
+}
+#define REGISTER_TABLE(name, min_rev, max_rev) \
+ REGISTER_TABLE_DIMENSIONS( \
+ name, FR_ ## min_rev ## max_rev ## _ ## name, \
+ min_rev, max_rev, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+ FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+#define REGISTER_TABLE_BB_CZ(name) \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_BB_ ## name ## _ROWS), \
+ REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \
+ FR_BZ_ ## name ## _STEP, \
+ FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+ /* DRIVER is not used */
+ /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+ REGISTER_TABLE_BB(TX_IPFIL_TBL),
+ REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+ REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+ REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+ REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+ /* The register buffer is allocated with slab, so we can't
+ * reasonably read all of the buffer table (up to 8MB!).
+ * However this driver will only use a few entries. Reading
+ * 1K entries allows for some expansion of queue count and
+ * size before we need to change the version. */
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+ A, A, 8, 1024),
+ REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+ B, Z, 8, 1024),
+ /* RX_FILTER_TBL{0,1} is huge and not used by this driver */
+ REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_BB_CZ(TIMER_TBL),
+ REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+ REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+ /* TX_FILTER_TBL0 is huge and not used by this driver */
+ REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+ REGISTER_TABLE_CZ(MC_TREG_SMEM),
+ /* MSIX_PBA_TABLE is not mapped */
+ /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+ size_t len = 0;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++)
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision)
+ len += sizeof(efx_oword_t);
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++)
+ if (efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision)
+ len += table->rows * min_t(size_t, table->step, 16);
+
+ return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+ const struct efx_nic_reg *reg;
+ const struct efx_nic_reg_table *table;
+
+ for (reg = efx_nic_regs;
+ reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+ reg++) {
+ if (efx->type->revision >= reg->min_revision &&
+ efx->type->revision <= reg->max_revision) {
+ efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+ buf += sizeof(efx_oword_t);
+ }
+ }
+
+ for (table = efx_nic_reg_tables;
+ table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+ table++) {
+ size_t size, i;
+
+ if (!(efx->type->revision >= table->min_revision &&
+ efx->type->revision <= table->max_revision))
+ continue;
+
+ size = min_t(size_t, table->step, 16);
+
+ for (i = 0; i < table->rows; i++) {
+ switch (table->step) {
+ case 4: /* 32-bit register or SRAM */
+ efx_readd_table(efx, buf, table->offset, i);
+ break;
+ case 8: /* 64-bit SRAM */
+ efx_sram_readq(efx,
+ efx->membase + table->offset,
+ buf, i);
+ break;
+ case 16: /* 128-bit register */
+ efx_reado_table(efx, buf, table->offset, i);
+ break;
+ case 32: /* 128-bit register, interleaved */
+ efx_reado_table(efx, buf, table->offset, 2 * i);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ buf += size;
+ }
+ }
+}
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index bbc2c0c2f84..0438dc98722 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -142,7 +142,6 @@ struct siena_nic_data {
u32 fw_build;
struct efx_mcdi_iface mcdi;
int wol_filter_id;
- u8 ipv6_rss_key[40];
};
extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -190,8 +189,8 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_generate_test_event(struct efx_channel *channel,
- unsigned int magic);
+extern void efx_nic_generate_test_event(struct efx_channel *channel);
+extern void efx_nic_generate_fill_event(struct efx_channel *channel);
extern void efx_nic_generate_interrupt(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
@@ -208,6 +207,7 @@ extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx);
+extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len);
@@ -222,6 +222,9 @@ extern int efx_nic_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs,
size_t n_regs);
+extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
+extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
/**************************************************************************
*
* Falcon MAC stats
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e077bef08a5..68813d1d85f 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -91,9 +91,10 @@ static int qt2025c_wait_heartbeat(struct efx_nic *efx)
if (time_after(jiffies, timeout)) {
/* Some cables have EEPROMs that conflict with the
* PHY's on-board EEPROM so it cannot load firmware */
- EFX_ERR(efx, "If an SFP+ direct attach cable is"
- " connected, please check that it complies"
- " with the SFP+ specification\n");
+ netif_err(efx, hw, efx->net_dev,
+ "If an SFP+ direct attach cable is"
+ " connected, please check that it complies"
+ " with the SFP+ specification\n");
return -ETIMEDOUT;
}
msleep(QT2025C_HEARTB_WAIT);
@@ -145,7 +146,8 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
/* Bug 17689: occasionally heartbeat starts but firmware status
* code never progresses beyond 0x00. Try again, once, after
* restarting execution of the firmware image. */
- EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+ netif_dbg(efx, hw, efx->net_dev,
+ "bashing QT2025C microcontroller\n");
qt2025c_restart_firmware(efx);
rc = qt2025c_wait_heartbeat(efx);
if (rc != 0)
@@ -165,11 +167,12 @@ static void qt2025c_firmware_id(struct efx_nic *efx)
for (i = 0; i < sizeof(firmware_id); i++)
firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_FW_PRODUCT_CODE_1 + i);
- EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
- (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
- firmware_id[3] >> 4, firmware_id[3] & 0xf,
- firmware_id[4], firmware_id[5],
- firmware_id[6], firmware_id[7], firmware_id[8]);
+ netif_info(efx, probe, efx->net_dev,
+ "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+ (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+ firmware_id[3] >> 4, firmware_id[3] & 0xf,
+ firmware_id[4], firmware_id[5],
+ firmware_id[6], firmware_id[7], firmware_id[8]);
phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
((firmware_id[3] & 0x0f) << 16) |
(firmware_id[4] << 8) | firmware_id[5];
@@ -198,7 +201,7 @@ static void qt2025c_bug17190_workaround(struct efx_nic *efx)
}
if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
- EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+ netif_dbg(efx, hw, efx->net_dev, "bashing QT2025C PMA/PMD\n");
efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
MDIO_PMA_CTRL1_LOOPBACK, true);
msleep(100);
@@ -231,7 +234,8 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
reg = efx_mdio_read(efx, 1, 0xc319);
if ((reg & 0x0038) == phy_op_mode)
return 0;
- EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+ netif_dbg(efx, hw, efx->net_dev, "Switching PHY to mode 0x%04x\n",
+ phy_op_mode);
/* This sequence replicates the register writes configured in the boot
* EEPROM (including the differences between board revisions), except
@@ -287,8 +291,9 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
/* Wait for the microcontroller to be ready again */
rc = qt2025c_wait_reset(efx);
if (rc < 0) {
- EFX_ERR(efx, "PHY microcontroller reset during mode switch "
- "timed out\n");
+ netif_err(efx, hw, efx->net_dev,
+ "PHY microcontroller reset during mode switch "
+ "timed out\n");
return rc;
}
@@ -324,7 +329,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
return 0;
fail:
- EFX_ERR(efx, "PHY reset timed out\n");
+ netif_err(efx, hw, efx->net_dev, "PHY reset timed out\n");
return rc;
}
@@ -353,14 +358,15 @@ static int qt202x_phy_init(struct efx_nic *efx)
rc = qt202x_reset_phy(efx);
if (rc) {
- EFX_ERR(efx, "PHY init failed\n");
+ netif_err(efx, probe, efx->net_dev, "PHY init failed\n");
return rc;
}
devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
- EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
- devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
- efx_mdio_id_rev(devid));
+ netif_info(efx, probe, efx->net_dev,
+ "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
+ devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
+ efx_mdio_id_rev(devid));
if (efx->phy_type == PHY_TYPE_QT2025C)
qt2025c_firmware_id(efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f5..799c461ce7b 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -25,6 +25,9 @@
/* Number of RX descriptors pushed at once. */
#define EFX_RX_BATCH 8
+/* Maximum size of a buffer sharing a page */
+#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
@@ -98,155 +101,151 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
return PAGE_SIZE << efx->rx_buffer_order;
}
+static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
+ return __le32_to_cpup((const __le32 *)(buf->data - 4));
+#else
+ const u8 *data = (const u8 *)(buf->data - 4);
+ return ((u32)data[0] |
+ (u32)data[1] << 8 |
+ (u32)data[2] << 16 |
+ (u32)data[3] << 24);
+#endif
+}
/**
- * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
+ * struct efx_rx_buffer for each one. Return a negative error code or 0
+ * on success. May fail having only inserted fewer than EFX_RX_BATCH
+ * buffers.
*/
-static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct net_device *net_dev = efx->net_dev;
+ struct efx_rx_buffer *rx_buf;
int skb_len = efx->rx_buffer_len;
+ unsigned index, count;
- rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!rx_buf->skb))
- return -ENOMEM;
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
- /* Adjust the SKB for padding and checksum */
- skb_reserve(rx_buf->skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->data = (char *)rx_buf->skb->data;
- rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+ rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+ if (unlikely(!rx_buf->skb))
+ return -ENOMEM;
+ rx_buf->page = NULL;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
- rx_buf->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
+ /* Adjust the SKB for padding and checksum */
+ skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+ rx_buf->len = skb_len - NET_IP_ALIGN;
+ rx_buf->data = (char *)rx_buf->skb->data;
+ rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->data, rx_buf->len,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(pci_dma_mapping_error(efx->pci_dev,
+ rx_buf->dma_addr))) {
+ dev_kfree_skb_any(rx_buf->skb);
+ rx_buf->skb = NULL;
+ return -EIO;
+ }
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
- dev_kfree_skb_any(rx_buf->skb);
- rx_buf->skb = NULL;
- return -EIO;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_skb_count;
}
return 0;
}
/**
- * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
- * @rx_buf: RX buffer structure to populate
*
- * This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information. Return a negative error code or 0 on success.
+ * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
+ * and populates struct efx_rx_buffers for each one. Return a negative error
+ * code or 0 on success. If a single page can be split between two buffers,
+ * then the page will either be inserted fully, or not at at all.
*/
-static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- int bytes, space, offset;
-
- bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
-
- /* If there is space left in the previously allocated page,
- * then use it. Otherwise allocate a new one */
- rx_buf->page = rx_queue->buf_page;
- if (rx_buf->page == NULL) {
- dma_addr_t dma_addr;
-
- rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(rx_buf->page == NULL))
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+
+ /* We can split a page between two buffers */
+ BUILD_BUG_ON(EFX_RX_BATCH & 1);
+
+ for (count = 0; count < EFX_RX_BATCH; ++count) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
return -ENOMEM;
-
- dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
- 0, efx_rx_buf_size(efx),
+ dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
-
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
- __free_pages(rx_buf->page, efx->rx_buffer_order);
- rx_buf->page = NULL;
+ __free_pages(page, efx->rx_buffer_order);
return -EIO;
}
-
- rx_queue->buf_page = rx_buf->page;
- rx_queue->buf_dma_addr = dma_addr;
- rx_queue->buf_data = (page_address(rx_buf->page) +
- EFX_PAGE_IP_ALIGN);
- }
-
- rx_buf->len = bytes;
- rx_buf->data = rx_queue->buf_data;
- offset = efx_rx_buf_offset(rx_buf);
- rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
-
- /* Try to pack multiple buffers per page */
- if (efx->rx_buffer_order == 0) {
- /* The next buffer starts on the next 512 byte boundary */
- rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
- offset += ((bytes + 0x1ff) & ~0x1ff);
-
- space = efx_rx_buf_size(efx) - offset;
- if (space >= bytes) {
- /* Refs dropped on kernel releasing each skb */
- get_page(rx_queue->buf_page);
- goto out;
+ page_addr = page_address(page);
+ state = page_addr;
+ state->refcnt = 0;
+ state->dma_addr = dma_addr;
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->skb = NULL;
+ rx_buf->page = page;
+ rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ ++rx_queue->added_count;
+ ++rx_queue->alloc_page_count;
+ ++state->refcnt;
+
+ if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
+ /* Use the second half of the page */
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
}
}
- /* This is the final RX buffer for this page, so mark it for
- * unmapping */
- rx_queue->buf_page = NULL;
- rx_buf->unmap_addr = rx_queue->buf_dma_addr;
-
- out:
return 0;
}
-/* This allocates memory for a new receive buffer, maps it for DMA,
- * and populates a struct efx_rx_buffer with the relevant
- * information.
- */
-static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *new_rx_buf)
-{
- int rc = 0;
-
- if (rx_queue->channel->rx_alloc_push_pages) {
- new_rx_buf->skb = NULL;
- rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
- rx_queue->alloc_page_count++;
- } else {
- new_rx_buf->page = NULL;
- rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
- rx_queue->alloc_skb_count++;
- }
-
- if (unlikely(rc < 0))
- EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
- rx_queue->queue, rc);
- return rc;
-}
-
static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf)
{
if (rx_buf->page) {
+ struct efx_rx_page_state *state;
+
EFX_BUG_ON_PARANOID(rx_buf->skb);
- if (rx_buf->unmap_addr) {
- pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+
+ state = page_address(rx_buf->page);
+ if (--state->refcnt == 0) {
+ pci_unmap_page(efx->pci_dev,
+ state->dma_addr,
efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
- rx_buf->unmap_addr = 0;
}
} else if (likely(rx_buf->skb)) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
@@ -273,31 +272,84 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
efx_free_rx_buffer(rx_queue->efx, rx_buf);
}
+/* Attempt to resurrect the other receive buffer that used to share this page,
+ * which had previously been passed up to the kernel and freed. */
+static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_rx_page_state *state = page_address(rx_buf->page);
+ struct efx_rx_buffer *new_buf;
+ unsigned fill_level, index;
+
+ /* +1 because efx_rx_packet() incremented removed_count. +1 because
+ * we'd like to insert an additional descriptor whilst leaving
+ * EFX_RXD_HEAD_ROOM for the non-recycle path */
+ fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
+ if (unlikely(fill_level >= EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM)) {
+ /* We could place "state" on a list, and drain the list in
+ * efx_fast_push_rx_descriptors(). For now, this will do. */
+ return;
+ }
+
+ ++state->refcnt;
+ get_page(rx_buf->page);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+ new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
+ new_buf->skb = NULL;
+ new_buf->page = rx_buf->page;
+ new_buf->data = (void *)
+ ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
+ new_buf->len = rx_buf->len;
+ ++rx_queue->added_count;
+}
+
+/* Recycle the given rx buffer directly back into the rx_queue. There is
+ * always room to add this buffer, because we've just popped a buffer. */
+static void efx_recycle_rx_buffer(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel];
+ struct efx_rx_buffer *new_buf;
+ unsigned index;
+
+ if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
+ page_count(rx_buf->page) == 1)
+ efx_resurrect_rx_buffer(rx_queue, rx_buf);
+
+ index = rx_queue->added_count & EFX_RXQ_MASK;
+ new_buf = efx_rx_buffer(rx_queue, index);
+
+ memcpy(new_buf, rx_buf, sizeof(*new_buf));
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
+ ++rx_queue->added_count;
+}
+
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
- * @retry: Recheck the fill level
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@fast_fill_limit. If there is insufficient atomic
- * memory to do so, the caller should retry.
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
*/
-static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
- int retry)
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_rx_buffer *rx_buf;
- unsigned fill_level, index;
- int i, space, rc = 0;
+ struct efx_channel *channel = rx_queue->channel;
+ unsigned fill_level;
+ int space, rc = 0;
- /* Calculate current fill level. Do this outside the lock,
- * because most of the time we'll end up not wanting to do the
- * fill anyway.
- */
+ /* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
-
- /* Don't fill if we don't need to */
if (fill_level >= rx_queue->fast_fill_trigger)
- return 0;
+ goto out;
/* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,99 +357,47 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
rx_queue->min_fill = fill_level;
}
- /* Acquire RX add lock. If this lock is contended, then a fast
- * fill must already be in progress (e.g. in the refill
- * tasklet), so we don't need to do anything
- */
- if (!spin_trylock_bh(&rx_queue->add_lock))
- return -1;
-
- retry:
- /* Recalculate current fill level now that we have the lock */
- fill_level = (rx_queue->added_count - rx_queue->removed_count);
- EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
space = rx_queue->fast_fill_limit - fill_level;
if (space < EFX_RX_BATCH)
- goto out_unlock;
+ goto out;
- EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
- " level %d to level %d using %s allocation\n",
- rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
- rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filling descriptor ring from"
+ " level %d to level %d using %s allocation\n",
+ rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+ channel->rx_alloc_push_pages ? "page" : "skb");
do {
- for (i = 0; i < EFX_RX_BATCH; ++i) {
- index = rx_queue->added_count & EFX_RXQ_MASK;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rc = efx_init_rx_buffer(rx_queue, rx_buf);
- if (unlikely(rc))
- goto out;
- ++rx_queue->added_count;
+ if (channel->rx_alloc_push_pages)
+ rc = efx_init_rx_buffers_page(rx_queue);
+ else
+ rc = efx_init_rx_buffers_skb(rx_queue);
+ if (unlikely(rc)) {
+ /* Ensure that we don't leave the rx queue empty */
+ if (rx_queue->added_count == rx_queue->removed_count)
+ efx_schedule_slow_fill(rx_queue);
+ goto out;
}
} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
- EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
- "to level %d\n", rx_queue->queue,
- rx_queue->added_count - rx_queue->removed_count);
+ netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+ "RX queue %d fast-filled descriptor ring "
+ "to level %d\n", rx_queue->queue,
+ rx_queue->added_count - rx_queue->removed_count);
out:
- /* Send write pointer to card. */
- efx_nic_notify_rx_desc(rx_queue);
-
- /* If the fast fill is running inside from the refill tasklet, then
- * for SMP systems it may be running on a different CPU to
- * RX event processing, which means that the fill level may now be
- * out of date. */
- if (unlikely(retry && (rc == 0)))
- goto retry;
-
- out_unlock:
- spin_unlock_bh(&rx_queue->add_lock);
-
- return rc;
-}
-
-/**
- * efx_fast_push_rx_descriptors - push new RX descriptors quickly
- * @rx_queue: RX descriptor queue
- *
- * This will aim to fill the RX descriptor queue up to
- * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
- * it will schedule a work item to immediately continue the fast fill
- */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
-{
- int rc;
-
- rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
- if (unlikely(rc)) {
- /* Schedule the work item to run immediately. The hope is
- * that work is immediately pending to free some memory
- * (e.g. an RX event or TX completion)
- */
- efx_schedule_slow_fill(rx_queue, 0);
- }
+ if (rx_queue->notified_count != rx_queue->added_count)
+ efx_nic_notify_rx_desc(rx_queue);
}
-void efx_rx_work(struct work_struct *data)
+void efx_rx_slow_fill(unsigned long context)
{
- struct efx_rx_queue *rx_queue;
- int rc;
-
- rx_queue = container_of(data, struct efx_rx_queue, work.work);
-
- if (unlikely(!rx_queue->channel->enabled))
- return;
-
- EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
- "%d\n", rx_queue->queue, raw_smp_processor_id());
+ struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
+ struct efx_channel *channel = rx_queue->channel;
+ /* Post an event to cause NAPI to run and refill the queue */
+ efx_nic_generate_fill_event(channel);
++rx_queue->slow_fill_count;
- /* Push new RX descriptors, allowing at least 1 jiffy for
- * the kernel to free some more memory. */
- rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
- if (rc)
- efx_schedule_slow_fill(rx_queue, 1);
}
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -417,10 +417,12 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*discard = true;
if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
- EFX_ERR_RL(efx, " RX queue %d seriously overlength "
- "RX event (0x%x > 0x%x+0x%x). Leaking\n",
- rx_queue->queue, len, max_len,
- efx->type->rx_buffer_padding);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d seriously overlength "
+ "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+ rx_queue->queue, len, max_len,
+ efx->type->rx_buffer_padding);
/* If this buffer was skb-allocated, then the meta
* data at the end of the skb will be trashed. So
* we have no choice but to leak the fragment.
@@ -428,8 +430,11 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
*leak_packet = (rx_buf->skb != NULL);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
- EFX_ERR_RL(efx, " RX queue %d overlength RX event "
- "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
+ if (net_ratelimit())
+ netif_err(efx, rx_err, efx->net_dev,
+ " RX queue %d overlength RX event "
+ "(0x%x > 0x%x)\n",
+ rx_queue->queue, len, max_len);
}
rx_queue->channel->n_rx_overlength++;
@@ -449,6 +454,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
/* Pass the skb/page into the LRO engine */
if (rx_buf->page) {
+ struct efx_nic *efx = channel->efx;
struct page *page = rx_buf->page;
struct sk_buff *skb;
@@ -461,6 +467,9 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
return;
}
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(rx_buf);
+
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->frags[0].page_offset =
efx_rx_buf_offset(rx_buf);
@@ -498,6 +507,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
unsigned int len, bool checksummed, bool discard)
{
struct efx_nic *efx = rx_queue->efx;
+ struct efx_channel *channel = rx_queue->channel;
struct efx_rx_buffer *rx_buf;
bool leak_packet = false;
@@ -516,21 +526,23 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
efx_rx_packet__check_len(rx_queue, rx_buf, len,
&discard, &leak_packet);
- EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
- rx_queue->queue, index,
- (unsigned long long)rx_buf->dma_addr, len,
- (checksummed ? " [SUMMED]" : ""),
- (discard ? " [DISCARD]" : ""));
+ netif_vdbg(efx, rx_status, efx->net_dev,
+ "RX queue %d received id %x at %llx+%x %s%s\n",
+ rx_queue->queue, index,
+ (unsigned long long)rx_buf->dma_addr, len,
+ (checksummed ? " [SUMMED]" : ""),
+ (discard ? " [DISCARD]" : ""));
/* Discard packet, if instructed to do so */
if (unlikely(discard)) {
if (unlikely(leak_packet))
- rx_queue->channel->n_skbuff_leaks++;
+ channel->n_skbuff_leaks++;
else
- /* We haven't called efx_unmap_rx_buffer yet,
- * so fini the entire rx_buffer here */
- efx_fini_rx_buffer(rx_queue, rx_buf);
- return;
+ efx_recycle_rx_buffer(channel, rx_buf);
+
+ /* Don't hold off the previous receive */
+ rx_buf = NULL;
+ goto out;
}
/* Release card resources - assumes all RX buffers consumed in-order
@@ -547,6 +559,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
* prefetched into cache.
*/
rx_buf->len = len;
+out:
if (rx_queue->channel->rx_pkt)
__efx_rx_packet(rx_queue->channel,
rx_queue->channel->rx_pkt,
@@ -562,6 +575,9 @@ void __efx_rx_packet(struct efx_channel *channel,
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
+ rx_buf->data += efx->type->rx_buffer_hash_size;
+ rx_buf->len -= efx->type->rx_buffer_hash_size;
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -574,8 +590,12 @@ void __efx_rx_packet(struct efx_channel *channel,
if (rx_buf->skb) {
prefetch(skb_shinfo(rx_buf->skb));
+ skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size);
skb_put(rx_buf->skb, rx_buf->len);
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf);
+
/* Move past the ethernet header. rx_buf->data still points
* at the ethernet header */
rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
@@ -633,7 +653,8 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
unsigned int rxq_size;
int rc;
- EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
+ netif_dbg(efx, probe, efx->net_dev,
+ "creating RX queue %d\n", rx_queue->queue);
/* Allocate RX buffers */
rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
@@ -653,7 +674,8 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
unsigned int max_fill, trigger, limit;
- EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "initialising RX queue %d\n", rx_queue->queue);
/* Initialise ptr fields */
rx_queue->added_count = 0;
@@ -680,8 +702,10 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
int i;
struct efx_rx_buffer *rx_buf;
- EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "shutting down RX queue %d\n", rx_queue->queue);
+ del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers NB start at index 0 not current HW ptr */
@@ -691,21 +715,12 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
-
- /* For a page that is part-way through splitting into RX buffers */
- if (rx_queue->buf_page != NULL) {
- pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
- efx_rx_buf_size(rx_queue->efx),
- PCI_DMA_FROMDEVICE);
- __free_pages(rx_queue->buf_page,
- rx_queue->efx->rx_buffer_order);
- rx_queue->buf_page = NULL;
- }
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
- EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
+ netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+ "destroying RX queue %d\n", rx_queue->queue);
efx_nic_remove_rx(rx_queue);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 371e86cc090..85f015f005d 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -38,7 +38,7 @@ struct efx_loopback_payload {
struct udphdr udp;
__be16 iteration;
const char msg[64];
-} __attribute__ ((packed));
+} __packed;
/* Loopback test source MAC address */
static const unsigned char payload_source[ETH_ALEN] = {
@@ -123,7 +123,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
{
struct efx_channel *channel;
- EFX_LOG(efx, "testing interrupts\n");
+ netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
/* Reset interrupt flag */
@@ -142,16 +142,17 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
- EFX_LOG(efx, "waiting for test interrupt\n");
+ netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
schedule_timeout_uninterruptible(HZ / 10);
if (efx->last_irq_cpu >= 0)
goto success;
- EFX_ERR(efx, "timed out waiting for interrupt\n");
+ netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
return -ETIMEDOUT;
success:
- EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
+ netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+ INT_MODE(efx),
efx->last_irq_cpu);
tests->interrupt = 1;
return 0;
@@ -161,23 +162,18 @@ static int efx_test_interrupts(struct efx_nic *efx,
static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
- unsigned int magic, count;
-
- /* Channel specific code, limited to 20 bits */
- magic = (0x00010150 + channel->channel);
- EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
- channel->channel, magic);
+ struct efx_nic *efx = channel->efx;
+ unsigned int magic_count, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- /* Reset flag and zero magic word */
+ magic_count = channel->magic_count;
channel->efx->last_irq_cpu = -1;
- channel->eventq_magic = 0;
smp_wmb();
- efx_nic_generate_test_event(channel, magic);
+ efx_nic_generate_test_event(channel);
/* Wait for arrival of interrupt */
count = 0;
@@ -187,33 +183,36 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
if (channel->work_pending)
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic)
+ if (channel->magic_count != magic_count)
goto eventq_ok;
} while (++count < 2);
- EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n",
- channel->channel);
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d timed out waiting for event queue\n",
+ channel->channel);
/* See if interrupt arrived */
if (channel->efx->last_irq_cpu >= 0) {
- EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d "
- "during event queue test\n", channel->channel,
- raw_smp_processor_id());
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d saw interrupt on CPU%d "
+ "during event queue test\n", channel->channel,
+ raw_smp_processor_id());
tests->eventq_int[channel->channel] = 1;
}
/* Check to see if event was received even if interrupt wasn't */
efx_process_channel_now(channel);
- if (channel->eventq_magic == magic) {
- EFX_ERR(channel->efx, "channel %d event was generated, but "
- "failed to trigger an interrupt\n", channel->channel);
+ if (channel->magic_count != magic_count) {
+ netif_err(efx, drv, efx->net_dev,
+ "channel %d event was generated, but "
+ "failed to trigger an interrupt\n", channel->channel);
tests->eventq_dma[channel->channel] = 1;
}
return -ETIMEDOUT;
eventq_ok:
- EFX_LOG(channel->efx, "channel %d event queue passed\n",
- channel->channel);
+ netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n",
+ channel->channel);
tests->eventq_dma[channel->channel] = 1;
tests->eventq_int[channel->channel] = 1;
tests->eventq_poll[channel->channel] = 1;
@@ -266,51 +265,57 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
/* Check that header exists */
if (pkt_len < sizeof(received->header)) {
- EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
- "test\n", pkt_len, LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw runt RX packet (length %d) in %s loopback "
+ "test\n", pkt_len, LOOPBACK_MODE(efx));
goto err;
}
/* Check that the ethernet header exists */
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
- EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw non-loopback RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check packet length */
if (pkt_len != sizeof(*payload)) {
- EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in "
- "%s loopback test\n", pkt_len, (int)sizeof(*payload),
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw incorrect RX packet length %d (wanted %d) in "
+ "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that IP header matches */
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
- EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted IP header in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that msg and padding matches */
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
- EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw corrupted RX packet in %s loopback test\n",
+ LOOPBACK_MODE(efx));
goto err;
}
/* Check that iteration matches */
if (received->iteration != payload->iteration) {
- EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in "
- "%s loopback test\n", ntohs(received->iteration),
- ntohs(payload->iteration), LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "saw RX packet from iteration %d (wanted %d) in "
+ "%s loopback test\n", ntohs(received->iteration),
+ ntohs(payload->iteration), LOOPBACK_MODE(efx));
goto err;
}
/* Increase correct RX count */
- EFX_TRACE(efx, "got loopback RX in %s loopback test\n",
- LOOPBACK_MODE(efx));
+ netif_vdbg(efx, drv, efx->net_dev,
+ "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
atomic_inc(&state->rx_good);
return;
@@ -318,10 +323,10 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
err:
#ifdef EFX_ENABLE_DEBUG
if (atomic_read(&state->rx_bad) == 0) {
- EFX_ERR(efx, "received packet:\n");
+ netif_err(efx, drv, efx->net_dev, "received packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
buf_ptr, pkt_len, 0);
- EFX_ERR(efx, "expected packet:\n");
+ netif_err(efx, drv, efx->net_dev, "expected packet:\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
&state->payload, sizeof(state->payload), 0);
}
@@ -402,9 +407,11 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) {
- EFX_ERR(efx, "TX queue %d could not transmit packet %d "
- "of %d in %s loopback test\n", tx_queue->queue,
- i + 1, state->packet_count, LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d could not transmit packet %d of "
+ "%d in %s loopback test\n", tx_queue->queue,
+ i + 1, state->packet_count,
+ LOOPBACK_MODE(efx));
/* Defer cleaning up the other skbs for the caller */
kfree_skb(skb);
@@ -460,20 +467,22 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
/* Don't free the skbs; they will be picked up on TX
* overflow or channel teardown.
*/
- EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d "
- "TX completion events in %s loopback test\n",
- tx_queue->queue, tx_done, state->packet_count,
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "TX completion events in %s loopback test\n",
+ tx_queue->queue, tx_done, state->packet_count,
+ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */
}
/* We may always be up to a flush away from our desired packet total */
if (rx_good != state->packet_count) {
- EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d "
- "received packets in %s loopback test\n",
- tx_queue->queue, rx_good, state->packet_count,
- LOOPBACK_MODE(efx));
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d saw only %d out of an expected %d "
+ "received packets in %s loopback test\n",
+ tx_queue->queue, rx_good, state->packet_count,
+ LOOPBACK_MODE(efx));
rc = -ETIMEDOUT;
/* Fall through */
}
@@ -505,9 +514,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
return -ENOMEM;
state->flush = false;
- EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
- "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
- state->packet_count);
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d testing %s loopback with %d packets\n",
+ tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
efx_iterate_state(efx);
begin_rc = efx_begin_loopback(tx_queue);
@@ -531,9 +541,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
}
}
- EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length "
- "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
- state->packet_count);
+ netif_dbg(efx, drv, efx->net_dev,
+ "TX queue %d passed %s loopback test with a burst length "
+ "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+ state->packet_count);
return 0;
}
@@ -545,7 +556,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
static int efx_wait_for_link(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
- int count;
+ int count, link_up_count = 0;
bool link_up;
for (count = 0; count < 40; count++) {
@@ -567,8 +578,12 @@ static int efx_wait_for_link(struct efx_nic *efx)
link_up = !efx->mac_op->check_fault(efx);
mutex_unlock(&efx->mac_lock);
- if (link_up)
- return 0;
+ if (link_up) {
+ if (++link_up_count == 2)
+ return 0;
+ } else {
+ link_up_count = 0;
+ }
}
return -ETIMEDOUT;
@@ -604,15 +619,17 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
rc = __efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
- EFX_ERR(efx, "unable to move into %s loopback\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "unable to move into %s loopback\n",
+ LOOPBACK_MODE(efx));
goto out;
}
rc = efx_wait_for_link(efx);
if (rc) {
- EFX_ERR(efx, "loopback %s never came up\n",
- LOOPBACK_MODE(efx));
+ netif_err(efx, drv, efx->net_dev,
+ "loopback %s never came up\n",
+ LOOPBACK_MODE(efx));
goto out;
}
@@ -720,7 +737,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
rc_reset = rc;
if (rc_reset) {
- EFX_ERR(efx, "Unable to recover from chip test\n");
+ netif_err(efx, drv, efx->net_dev,
+ "Unable to recover from chip test\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f2b1e618075..3fab030f8ab 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -118,10 +118,11 @@ static int siena_probe_port(struct efx_nic *efx)
MC_CMD_MAC_NSTATS * sizeof(u64));
if (rc)
return rc;
- EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
@@ -216,7 +217,8 @@ static int siena_probe_nic(struct efx_nic *efx)
efx->nic_data = nic_data;
if (efx_nic_fpga_ver(efx) != 0) {
- EFX_ERR(efx, "Siena FPGA not supported\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
@@ -233,8 +235,8 @@ static int siena_probe_nic(struct efx_nic *efx)
rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build);
if (rc) {
- EFX_ERR(efx, "Failed to read MCPU firmware version - "
- "rc %d\n", rc);
+ netif_err(efx, probe, efx->net_dev,
+ "Failed to read MCPU firmware version - rc %d\n", rc);
goto fail1; /* MCPU absent? */
}
@@ -242,17 +244,19 @@ static int siena_probe_nic(struct efx_nic *efx)
* filter settings. We must do this before we reset the NIC */
rc = efx_mcdi_drv_attach(efx, true, &already_attached);
if (rc) {
- EFX_ERR(efx, "Unable to register driver with MCPU\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
goto fail2;
}
if (already_attached)
/* Not a fatal error */
- EFX_ERR(efx, "Host already registered with MCPU\n");
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
/* Now we can reset the NIC */
rc = siena_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
- EFX_ERR(efx, "failed to reset NIC\n");
+ netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
}
@@ -264,24 +268,23 @@ static int siena_probe_nic(struct efx_nic *efx)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
- EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
- (unsigned long long)efx->irq_status.dma_addr,
- efx->irq_status.addr,
- (unsigned long long)virt_to_phys(efx->irq_status.addr));
+ netif_dbg(efx, probe, efx->net_dev,
+ "INT_KER at %llx (virt %p phys %llx)\n",
+ (unsigned long long)efx->irq_status.dma_addr,
+ efx->irq_status.addr,
+ (unsigned long long)virt_to_phys(efx->irq_status.addr));
/* Read in the non-volatile configuration */
rc = siena_probe_nvconfig(efx);
if (rc == -EINVAL) {
- EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
+ netif_err(efx, probe, efx->net_dev,
+ "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
} else if (rc) {
goto fail5;
}
- get_random_bytes(&nic_data->ipv6_rss_key,
- sizeof(nic_data->ipv6_rss_key));
-
return 0;
fail5:
@@ -301,7 +304,6 @@ fail1:
*/
static int siena_init_nic(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
efx_oword_t temp;
int rc;
@@ -326,25 +328,36 @@ static int siena_init_nic(struct efx_nic *efx)
efx_reado(efx, &temp, FR_AZ_RX_CFG);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
+ /* Enable hash insertion. This is broken for the 'Falcon' hash
+ * if IPv6 hashing is also enabled, so also select Toeplitz
+ * TCP/IPv4 and IPv4 hashes. */
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
+ /* Set hash key for IPv4 */
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
+ efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
+
/* Enable IPv6 RSS */
- BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
+ BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
- memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
+ memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
- memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
+ memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
- memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
+ memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
/* No MCDI operation has been defined to set thresholds */
- EFX_ERR(efx, "ignoring RX flow control thresholds\n");
+ netif_err(efx, hw, efx->net_dev,
+ "ignoring RX flow control thresholds\n");
/* Enable event logging */
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -565,7 +578,8 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
return 0;
fail:
- EFX_ERR(efx, "%s failed: type=%d rc=%d\n", __func__, type, rc);
+ netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
+ __func__, type, rc);
return rc;
}
@@ -628,6 +642,7 @@ struct efx_nic_type siena_a0_nic_type = {
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
@@ -635,6 +650,7 @@ struct efx_nic_type siena_a0_nic_type = {
* channels */
.tx_dc_base = 0x88000,
.rx_dc_base = 0x68000,
- .offload_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH),
.reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT,
};
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f21efe7bd31..6791be90c2f 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -228,7 +228,8 @@ int sft9001_wait_boot(struct efx_nic *efx)
boot_stat = efx_mdio_read(efx, MDIO_MMD_PCS,
PCS_BOOT_STATUS_REG);
if (boot_stat >= 0) {
- EFX_LOG(efx, "PHY boot status = %#x\n", boot_stat);
+ netif_dbg(efx, hw, efx->net_dev,
+ "PHY boot status = %#x\n", boot_stat);
switch (boot_stat &
((1 << PCS_BOOT_FATAL_ERROR_LBN) |
(3 << PCS_BOOT_PROGRESS_LBN) |
@@ -463,10 +464,11 @@ static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
} else {
reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
- EFX_ERR(efx, "appears to be plugged into a port"
- " that is not 10GBASE-T capable. The PHY"
- " supports 10GBASE-T ONLY, so no link can"
- " be established\n");
+ netif_err(efx, link, efx->net_dev,
+ "appears to be plugged into a port"
+ " that is not 10GBASE-T capable. The PHY"
+ " supports 10GBASE-T ONLY, so no link can"
+ " be established\n");
}
efx_mdio_write(efx, MDIO_MMD_PMAPMD,
PMA_PMD_LED_OVERR_REG, reg);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 6bb12a87ef2..c6942da2c99 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -42,7 +42,7 @@ void efx_stop_queue(struct efx_channel *channel)
return;
spin_lock_bh(&channel->tx_stop_lock);
- EFX_TRACE(efx, "stop TX queue\n");
+ netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue(
@@ -64,7 +64,7 @@ void efx_wake_queue(struct efx_channel *channel)
local_bh_disable();
if (atomic_dec_and_lock(&channel->tx_stop_count,
&channel->tx_stop_lock)) {
- EFX_TRACE(efx, "waking TX queue\n");
+ netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue(
netdev_get_tx_queue(
efx->net_dev,
@@ -94,8 +94,9 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
if (buffer->skb) {
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
- EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
- "complete\n", tx_queue->queue, read_ptr);
+ netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+ "TX queue %d transmission id %x complete\n",
+ tx_queue->queue, tx_queue->read_count);
}
}
@@ -300,9 +301,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
pci_err:
- EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
- "fragments for DMA\n", tx_queue->queue, skb->len,
- skb_shinfo(skb)->nr_frags + 1);
+ netif_err(efx, tx_err, efx->net_dev,
+ " TX queue %d could not map skb with %d bytes %d "
+ "fragments for DMA\n", tx_queue->queue, skb->len,
+ skb_shinfo(skb)->nr_frags + 1);
/* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb);
@@ -354,9 +356,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
if (unlikely(buffer->len == 0)) {
- EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
- "completion id %x\n", tx_queue->queue,
- read_ptr);
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX queue %d spurious TX completion id %x\n",
+ tx_queue->queue, read_ptr);
efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
@@ -431,7 +433,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
unsigned int txq_size;
int i, rc;
- EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
+ netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d\n",
+ tx_queue->queue);
/* Allocate software ring */
txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
@@ -456,7 +459,8 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "initialising TX queue %d\n", tx_queue->queue);
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
@@ -488,7 +492,8 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
@@ -507,7 +512,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
- EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
kfree(tx_queue->buffer);
@@ -639,8 +645,8 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
if (base_kva == NULL) {
- EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
- " headers\n");
+ netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
+ "Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
@@ -1124,7 +1130,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
return NETDEV_TX_OK;
mem_err:
- EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
+ netif_err(efx, tx_err, efx->net_dev,
+ "Out of memory for TSO headers, or PCI mapping error\n");
dev_kfree_skb_any(skb);
goto unwind;
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 518f7fc9147..782e45a613d 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -54,7 +54,7 @@
/* Increase filter depth to avoid RX_RESET */
#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
/* Flushes may never complete */
-#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A