summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c711
1 files changed, 198 insertions, 513 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f88c822e57a..e1ceb37ef12 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_find_enabled_vfs(struct igb_adapter *adapter);
-static int igb_check_vf_assignment(struct igb_adapter *adapter);
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp);
}
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
" %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- (u64)buffer_info->dma,
- buffer_info->length,
+ (u64)dma_unmap_addr(buffer_info, dma),
+ dma_unmap_len(buffer_info, len),
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1, buffer_info->skb->data,
- buffer_info->length, true);
+ dma_unmap_len(buffer_info, len),
+ true);
}
}
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
{
struct igb_ring *ring;
int i;
- int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
for (i = 0; i < adapter->num_rx_queues; i++) {
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
- adapter->node);
- if (!ring)
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
if (!ring)
goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
- ring->numa_node = adapter->node;
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
adapter->rx_ring[i] = ring;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_cache_ring_register(adapter);
return 0;
err:
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_free_queues(adapter);
return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
struct igb_q_vector *q_vector;
struct e1000_hw *hw = &adapter->hw;
int v_idx;
- int orig_node = adapter->node;
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- if ((adapter->num_q_vectors == (adapter->num_rx_queues +
- adapter->num_tx_queues)) &&
- (adapter->num_rx_queues == v_idx))
- adapter->node = orig_node;
- if (orig_node == -1) {
- int cur_node = next_online_node(adapter->node);
- if (cur_node == MAX_NUMNODES)
- cur_node = first_online_node;
- adapter->node = cur_node;
- }
- q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
- adapter->node);
- if (!q_vector)
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
+ q_vector = kzalloc(sizeof(struct igb_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
- /* Restore the adapter's original node */
- adapter->node = orig_node;
return 0;
err_out:
- /* Restore the adapter's original node */
- adapter->node = orig_node;
igb_free_q_vectors(adapter);
return -ENOMEM;
}
@@ -1751,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+#ifdef CONFIG_IGB_PTP
+ /* Re-enable PTP, where applicable. */
+ igb_ptp_reset(adapter);
+#endif /* CONFIG_IGB_PTP */
+
igb_get_phy_info(hw);
}
@@ -2180,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
}
#endif
+
#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
+#endif /* CONFIG_IGB_PTP */
-#endif
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2259,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev)
pm_runtime_get_noresume(&pdev->dev);
#ifdef CONFIG_IGB_PTP
- igb_ptp_remove(adapter);
+ igb_ptp_stop(adapter);
+#endif /* CONFIG_IGB_PTP */
-#endif
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2294,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
/* disable iov and allow time for transactions to clear */
- if (!igb_check_vf_assignment(adapter)) {
+ if (igb_vfs_are_assigned(adapter)) {
+ dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ } else {
pci_disable_sriov(pdev);
msleep(500);
- } else {
- dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
}
kfree(adapter->vf_data);
@@ -2338,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- int old_vfs = igb_find_enabled_vfs(adapter);
+ int old_vfs = pci_num_vf(adapter->pdev);
int i;
/* Virtualization features not supported on i210 family. */
@@ -2418,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- adapter->node = -1;
-
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
@@ -2666,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
int igb_setup_tx_resources(struct igb_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
- int orig_node = dev_to_node(dev);
int size;
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
- if (!tx_ring->tx_buffer_info)
- tx_ring->tx_buffer_info = vzalloc(size);
+
+ tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
@@ -2680,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- set_dev_node(dev, tx_ring->numa_node);
tx_ring->desc = dma_alloc_coherent(dev,
tx_ring->size,
&tx_ring->dma,
GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!tx_ring->desc)
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
-
if (!tx_ring->desc)
goto err;
@@ -2702,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
err:
vfree(tx_ring->tx_buffer_info);
- dev_err(dev,
- "Unable to allocate memory for the transmit descriptor ring\n");
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
return -ENOMEM;
}
@@ -2820,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- int orig_node = dev_to_node(dev);
- int size, desc_len;
+ int size;
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
- if (!rx_ring->rx_buffer_info)
- rx_ring->rx_buffer_info = vzalloc(size);
+
+ rx_ring->rx_buffer_info = vzalloc(size);
if (!rx_ring->rx_buffer_info)
goto err;
- desc_len = sizeof(union e1000_adv_rx_desc);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * desc_len;
+ rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- set_dev_node(dev, rx_ring->numa_node);
rx_ring->desc = dma_alloc_coherent(dev,
rx_ring->size,
&rx_ring->dma,
GFP_KERNEL);
- set_dev_node(dev, orig_node);
- if (!rx_ring->desc)
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
-
if (!rx_ring->desc)
goto err;
@@ -2859,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
err:
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
- dev_err(dev, "Unable to allocate memory for the receive descriptor"
- " ring\n");
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
return -ENOMEM;
}
@@ -2898,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0, shift2 = 0;
- union e1000_reta {
- u32 dword;
- u8 bytes[4];
- } reta;
- static const u8 rsshash[40] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
- 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
- 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
- 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
+ u32 j, num_rx_queues, shift = 0;
+ static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
+ 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
+ 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
+ 0xFA01ACBE };
/* Fill out hash function seeds */
- for (j = 0; j < 10; j++) {
- u32 rsskey = rsshash[(j * 4)];
- rsskey |= rsshash[(j * 4) + 1] << 8;
- rsskey |= rsshash[(j * 4) + 2] << 16;
- rsskey |= rsshash[(j * 4) + 3] << 24;
- array_wr32(E1000_RSSRK(0), j, rsskey);
- }
+ for (j = 0; j < 10; j++)
+ wr32(E1000_RSSRK(j), rsskey[j]);
num_rx_queues = adapter->rss_queues;
- if (adapter->vfs_allocated_count) {
- /* 82575 and 82576 supports 2 RSS queues for VMDq */
- switch (hw->mac.type) {
- case e1000_i350:
- case e1000_82580:
- num_rx_queues = 1;
- shift = 0;
- break;
- case e1000_82576:
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count) {
shift = 3;
num_rx_queues = 2;
- break;
- case e1000_82575:
- shift = 2;
- shift2 = 6;
- default:
- break;
}
- } else {
- if (hw->mac.type == e1000_82575)
- shift = 6;
+ break;
+ default:
+ break;
}
- for (j = 0; j < (32 * 4); j++) {
- reta.bytes[j & 3] = (j % num_rx_queues) << shift;
- if (shift2)
- reta.bytes[j & 3] |= num_rx_queues << shift2;
- if ((j & 3) == 3)
- wr32(E1000_RETA(j >> 2), reta.dword);
+ /*
+ * Populate the indirection table 4 entries at a time. To do this
+ * we are generating the results for n and n+2 and then interleaving
+ * those with the results with n+1 and n+3.
+ */
+ for (j = 0; j < 32; j++) {
+ /* first pass generates n and n+2 */
+ u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
+ u32 reta = (base & 0x07800780) >> (7 - shift);
+
+ /* second pass generates n+1 and n+3 */
+ base += 0x00010001 * num_rx_queues;
+ reta |= (base & 0x07800780) << (1 + shift);
+
+ wr32(E1000_RETA(j), reta);
}
/*
@@ -3184,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
#endif
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+#ifdef CONFIG_IGB_PTP
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
+#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3269,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
- if (tx_buffer->dma)
+ if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
- } else if (tx_buffer->dma) {
+ } else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
- tx_buffer->dma = 0;
+ dma_unmap_len_set(tx_buffer, len, 0);
/* buffer_info must be completely set up in the transmit path */
}
@@ -4229,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
if (tx_flags & IGB_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
+#endif /* CONFIG_IGB_PTP */
/* set segmentation bits for TSO */
if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4275,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
const u8 hdr_len)
{
struct sk_buff *skb = first->skb;
- struct igb_tx_buffer *tx_buffer_info;
+ struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
dma_addr_t dma;
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4296,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
goto dma_error;
/* record length, and DMA address */
- first->length = size;
- first->dma = dma;
+ dma_unmap_len_set(first, len, size);
+ dma_unmap_addr_set(first, dma, dma);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
for (;;) {
@@ -4339,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_buffer_info->length = size;
- tx_buffer_info->dma = dma;
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4392,9 +4324,9 @@ dma_error:
/* clear dma mappings for failed tx_buffer_info map */
for (;;) {
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
- if (tx_buffer_info == first)
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
@@ -4440,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
+#ifdef CONFIG_IGB_PTP
+ struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
+#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4462,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+#ifdef CONFIG_IGB_PTP
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
+
+ adapter->ptp_tx_skb = skb_get(skb);
+ if (adapter->hw.mac.type == e1000_82576)
+ schedule_work(&adapter->ptp_tx_work);
}
+#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4661,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
+ u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
- ring->rx_stats.drops += rqdpc_tmp;
- net_stats->rx_fifo_errors += rqdpc_tmp;
+ if (rqdpc) {
+ ring->rx_stats.drops += rqdpc;
+ net_stats->rx_fifo_errors += rqdpc;
+ }
do {
start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
@@ -4755,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
reg = rd32(E1000_CTRL_EXT);
if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
adapter->stats.rxerrc += rd32(E1000_RXERRC);
- adapter->stats.tncrs += rd32(E1000_TNCRS);
+
+ /* this stat has invalid values on i210/i211 */
+ if ((hw->mac.type != e1000_i210) &&
+ (hw->mac.type != e1000_i211))
+ adapter->stats.tncrs += rd32(E1000_TNCRS);
}
adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -4852,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
wr32(E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED;
@@ -5002,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn;
eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
- (pdev->devfn & 1);
- break;
- case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
- (pdev->devfn & 3);
- break;
- default:
- device_id = 0;
- thisvf_devfn = 0;
- break;
- }
-
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
- }
-
- if (pvfdev)
- adapter->vf_data[vf].vfdev = pvfdev;
- else
- dev_err(&pdev->dev,
- "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
- return pvfdev != NULL;
+ return 0;
}
-static int igb_find_enabled_vfs(struct igb_adapter *adapter)
+static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- u16 vf_stride;
- unsigned int device_id;
- int vfs_found = 0;
+ struct pci_dev *vfdev;
+ int dev_id;
switch (adapter->hw.mac.type) {
case e1000_82576:
- device_id = IGB_82576_VF_DEV_ID;
- /* VF Stride for 82576 is 2 */
- vf_stride = 2;
+ dev_id = IGB_82576_VF_DEV_ID;
break;
case e1000_i350:
- device_id = IGB_I350_VF_DEV_ID;
- /* VF Stride for I350 is 4 */
- vf_stride = 4;
+ dev_id = IGB_I350_VF_DEV_ID;
break;
default:
- device_id = 0;
- vf_stride = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += vf_stride;
- pvfdev = pci_get_device(hw->vendor_id,
- device_id, pvfdev);
+ return false;
}
- return vfs_found;
-}
-
-static int igb_check_vf_assignment(struct igb_adapter *adapter)
-{
- int i;
- for (i = 0; i < adapter->vfs_allocated_count; i++) {
- if (adapter->vf_data[i].vfdev) {
- if (adapter->vf_data[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
return true;
}
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
}
+
return false;
}
@@ -5643,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5684,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+#ifdef CONFIG_IGB_PTP
+ if (icr & E1000_ICR_TS) {
+ u32 tsicr = rd32(E1000_TSICR);
+
+ if (tsicr & E1000_TSICR_TXTS) {
+ /* acknowledge the interrupt */
+ wr32(E1000_TSICR, E1000_TSICR_TXTS);
+ /* retrieve hardware timestamp */
+ schedule_work(&adapter->ptp_tx_work);
+ }
+ }
+#endif /* CONFIG_IGB_PTP */
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5743,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget)
return 0;
}
-#ifdef CONFIG_IGB_PTP
-/**
- * igb_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: pointer to q_vector containing needed info
- * @buffer: pointer to igb_tx_buffer structure
- *
- * If we were asked to do hardware stamping and such a time stamp is
- * available, then it must have been for this skb here because we only
- * allow only one such packet into the queue.
- */
-static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
- struct igb_tx_buffer *buffer_info)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- struct skb_shared_hwtstamps shhwtstamps;
- u64 regval;
-
- /* if skb does not support hw timestamp or TX stamp not valid exit */
- if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
- !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
- return;
-
- regval = rd32(E1000_TXSTMPL);
- regval |= (u64)rd32(E1000_TXSTMPH) << 32;
-
- igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
- skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
-}
-
-#endif
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
@@ -5785,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
struct igb_tx_buffer *tx_buffer;
- union e1000_adv_tx_desc *tx_desc, *eop_desc;
+ union e1000_adv_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
@@ -5797,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_desc = IGB_TX_DESC(tx_ring, i);
i -= tx_ring->count;
- for (; budget; budget--) {
- eop_desc = tx_buffer->next_to_watch;
-
- /* prevent any other reads prior to eop_desc */
- rmb();
+ do {
+ union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
/* if next_to_watch is not set then there is no work pending */
if (!eop_desc)
break;
+ /* prevent any other reads prior to eop_desc */
+ rmb();
+
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
@@ -5818,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef CONFIG_IGB_PTP
- /* retrieve hardware timestamp */
- igb_tx_hwtstamp(q_vector, tx_buffer);
-
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
- tx_buffer->skb = NULL;
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ /* clear tx_buffer data */
+ tx_buffer->skb = NULL;
+ dma_unmap_len_set(tx_buffer, len, 0);
+
/* clear last DMA location and unmap remaining buffers */
while (tx_desc != eop_desc) {
- tx_buffer->dma = 0;
-
tx_buffer++;
tx_desc++;
i++;
@@ -5847,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
/* unmap any remaining paged data */
- if (tx_buffer->dma) {
+ if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
- tx_buffer->dma,
- tx_buffer->length,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
}
}
- /* clear last DMA location */
- tx_buffer->dma = 0;
-
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
tx_desc++;
@@ -5867,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, 0);
}
- }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
@@ -5883,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
struct e1000_hw *hw = &adapter->hw;
- eop_desc = tx_buffer->next_to_watch;
-
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
- if (eop_desc &&
+ if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
!(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5912,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
tx_ring->next_to_use,
tx_ring->next_to_clean,
tx_buffer->time_stamp,
- eop_desc,
+ tx_buffer->next_to_watch,
jiffies,
- eop_desc->wb.status);
+ tx_buffer->next_to_watch->wb.status);
netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
@@ -5994,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-#ifdef CONFIG_IGB_PTP
-static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
- u64 regval;
-
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
- /*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
- *
- * If nothing went wrong, then it should have a shared tx_flags that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
-
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
-
- igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
-}
-
-#endif
static void igb_rx_vlan(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6146,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
}
#ifdef CONFIG_IGB_PTP
- igb_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif
+ igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
+#endif /* CONFIG_IGB_PTP */
igb_rx_hash(rx_ring, rx_desc, skb);
igb_rx_checksum(rx_ring, rx_desc, skb);
igb_rx_vlan(rx_ring, rx_desc, skb);
@@ -6341,181 +6195,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
- * igb_hwtstamp_ioctl - control hardware time stamping
- * @netdev:
- * @ifreq:
- * @cmd:
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- **/
-static int igb_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
- u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
- u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_cfg = 0;
- bool is_l4 = false;
- bool is_l2 = false;
- u32 regval;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- tsync_tx_ctl = 0;
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- tsync_rx_ctl = 0;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
- is_l4 = true;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- is_l2 = true;
- is_l4 = true;
- break;
- default:
- return -ERANGE;
- }
-
- if (hw->mac.type == e1000_82575) {
- if (tsync_rx_ctl | tsync_tx_ctl)
- return -EINVAL;
- return 0;
- }
-
- /*
- * Per-packet timestamping only works if all packets are
- * timestamped, so enable timestamping in all packets as
- * long as one rx filter was configured.
- */
- if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
- tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- }
-
- /* enable/disable TX */
- regval = rd32(E1000_TSYNCTXCTL);
- regval &= ~E1000_TSYNCTXCTL_ENABLED;
- regval |= tsync_tx_ctl;
- wr32(E1000_TSYNCTXCTL, regval);
-
- /* enable/disable RX */
- regval = rd32(E1000_TSYNCRXCTL);
- regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
- regval |= tsync_rx_ctl;
- wr32(E1000_TSYNCRXCTL, regval);
-
- /* define which PTP packets are time stamped */
- wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
-
- /* define ethertype filter for timestamped packets */
- if (is_l2)
- wr32(E1000_ETQF(3),
- (E1000_ETQF_FILTER_ENABLE | /* enable filter */
- E1000_ETQF_1588 | /* enable timestamping */
- ETH_P_1588)); /* 1588 eth protocol type */
- else
- wr32(E1000_ETQF(3), 0);
-
-#define PTP_PORT 319
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IPPROTO_UDP /* UDP */
- | E1000_FTQF_VF_BP /* VF not compared */
- | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
- | E1000_FTQF_MASK); /* mask all inputs */
- ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
- wr32(E1000_IMIR(3), htons(PTP_PORT));
- wr32(E1000_IMIREXT(3),
- (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
- if (hw->mac.type == e1000_82576) {
- /* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_PORT));
- ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
- }
- wr32(E1000_FTQF(3), ftqf);
- } else {
- wr32(E1000_FTQF(3), E1000_FTQF_MASK);
- }
- wrfl();
-
- adapter->hwtstamp_config = config;
-
- /* clear TX/RX time stamp registers, just to be sure */
- regval = rd32(E1000_TXSTMPH);
- regval = rd32(E1000_RXSTMPH);
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-/**
* igb_ioctl -
* @netdev:
* @ifreq:
@@ -6528,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
- return igb_hwtstamp_ioctl(netdev, ifr, cmd);
+ return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6667,6 +6348,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
err_inval: