summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c29
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c501
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
16 files changed, 545 insertions, 299 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 736a7d987db..9089d00f142 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 1; MDI => 0 */
+ if ((hw->media_type == e1000_media_type_copper) &&
+ netif_carrier_ok(netdev))
+ ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
+ ETH_TP_MDI_X :
+ ETH_TP_MDI);
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->mdix;
return 0;
}
@@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->mdix = AUTO_ALL_MODES;
+ else
+ hw->mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3bfbb8df898..0ae2fcfa512 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4939,6 +4939,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ hw->mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade957f..080c89093fe 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext, eecd;
+ u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
/*
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
- ew32(TCTL, E1000_TCTL_PSP);
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
e1e_flush();
usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
- * If the partner code word is null, stop forcing
- * and restart auto negotiation.
*/
- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
+ if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 0349e2478df..2e76f06720f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
return 0;
}
@@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
@@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev,
return -EINVAL;
}
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev,
hw->fc.requested_mode = e1000_fc_default;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->state);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /*
+ * fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else {
+ } else
e1000e_reset(adapter);
- }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b245310f1..46c3b1f9ff8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+ struct e1000_buffer *bi)
+{
+ int i;
+ struct e1000_ps_page *ps_page;
+
+ for (i = 0; i < adapter->rx_ps_pages; i++) {
+ ps_page = &bi->ps_pages[i];
+
+ if (ps_page->page) {
+ pr_info("packet dump for ps_page %d:\n", i);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, page_address(ps_page->page),
+ PAGE_SIZE, true);
+ }
+ }
+}
+
/*
* e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
(unsigned long long)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, buffer_info->skb->data,
+ buffer_info->skb->len, true);
}
/* Print Rx Ring Summary */
@@ -381,10 +399,8 @@ rx_ring_summary:
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(buffer_info->dma),
- adapter->rx_ps_bsize0, true);
+ e1000e_dump_ps_pages(adapter,
+ buffer_info);
}
}
break;
@@ -444,12 +460,12 @@ rx_ring_summary:
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter))
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16,
1,
- phys_to_virt
- (buffer_info->dma),
+ buffer_info->skb->data,
adapter->rx_buffer_len,
true);
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b860d4f7ea2..fc62a3f3a5b 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82577 PHY Diagnostics Status */
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
@@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
+ /* Set MDI/MDIX mode */
+ ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ return ret_val;
+ phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data);
+ if (ret_val)
+ return ret_val;
+
return e1000_set_master_slave_mode(hw);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c..ba994fb4cec 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ size = 15;
+ }
+
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
} else
nvm->type = e1000_nvm_flash_hw;
- /*
- * Check for invalid size
- */
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
- size = 15;
- }
-
/* NVM Function Pointers */
switch (hw->mac.type) {
case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 7be98b6f105..3404bc79f4c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+ if (ret_val)
+ goto out;
+
+ /* Set MDI/MDIX mode */
+ ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+ if (ret_val)
+ goto out;
+ phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+ /*
+ * Options:
+ * 0 - Auto (default)
+ * 1 - MDI mode
+ * 2 - MDI-X mode
+ */
+ switch (hw->phy.mdix) {
+ case 1:
+ break;
+ case 2:
+ phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+ break;
+ case 0:
+ default:
+ phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+ break;
+ }
+ ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
out:
return ret_val;
@@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
if (ret_val)
goto out;
- phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
+ phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 34e40619f16..6ac3299bfcb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
/* I82580 PHY Control 2 */
-#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
-#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
+#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600
/* I82580 PHY Diagnostics Status */
#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88dca..28394bea525 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e..be02168f130 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -198,6 +198,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ /* MDI-X => 2; MDI =>1; Invalid =>0 */
+ if (hw->phy.media_type == e1000_media_type_copper)
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
+ ETH_TP_MDI;
+ else
+ ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+ if (hw->phy.mdix == AUTO_ALL_MODES)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ else
+ ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+
return 0;
}
@@ -209,11 +222,27 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev, "Cannot change link "
- "characteristics when SoL/IDER is active.\n");
+ dev_err(&adapter->pdev->dev,
+ "Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
+ /*
+ * MDI setting is only allowed when autoneg enabled because
+ * some hardware doesn't allow MDI setting when speed or
+ * duplex is forced.
+ */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
+
+ if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+ (ecmd->autoneg != AUTONEG_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+ return -EINVAL;
+ }
+ }
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -227,12 +256,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
hw->fc.requested_mode = e1000_fc_default;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
+ /* calling this overrides forced MDI setting */
if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
}
+ /* MDI-X => 2; MDI => 1; Auto => 3 */
+ if (ecmd->eth_tp_mdix_ctrl) {
+ /*
+ * fix up the value for auto (3 => 0) as zero is mapped
+ * internally to auto
+ */
+ if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
+ hw->phy.mdix = AUTO_ALL_MODES;
+ else
+ hw->phy.mdix = ecmd->eth_tp_mdix_ctrl;
+ }
+
/* reset the link */
if (netif_running(adapter->netdev)) {
igb_down(adapter);
@@ -1089,8 +1131,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev, "pattern test reg %04X "
- "failed: got 0x%08X expected 0x%08X\n",
+ dev_err(&adapter->pdev->dev,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
return 1;
@@ -1108,8 +1150,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
- " got 0x%08X expected 0x%08X\n", reg,
+ dev_err(&adapter->pdev->dev,
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
(val & mask), (write & mask));
*data = reg;
return 1;
@@ -1171,8 +1213,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_STATUS, toggle);
after = rd32(E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev, "failed STATUS register test "
- "got: 0x%08X expected: 0x%08X\n", after, value);
+ dev_err(&adapter->pdev->dev,
+ "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1497,6 +1540,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
break;
}
+ /* add small delay to avoid loopback test failure */
+ msleep(50);
+
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
@@ -1777,16 +1823,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
* sessions are active */
if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev,
- "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
+ "Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i210)) {
+ || (adapter->hw.mac.type == e1000_i211)) {
dev_err(&adapter->pdev->dev,
- "Loopback test not supported "
- "on this part at this time.\n");
+ "Loopback test not supported on this part at this time.\n");
*data = 0;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1050411e7ca..73cc273ef98 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
+ 16, 1, buffer_info->skb->data,
buffer_info->length, true);
}
}
@@ -547,18 +547,17 @@ rx_ring_summary:
(u64)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter)) {
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->skb) {
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- phys_to_virt(buffer_info->dma),
- IGB_RX_HDR_LEN, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
- phys_to_virt(
- buffer_info->page_dma +
- buffer_info->page_offset),
+ page_address(buffer_info->page) +
+ buffer_info->page_offset,
PAGE_SIZE/2, true);
}
}
@@ -6235,7 +6234,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
@@ -6676,6 +6675,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
default:
goto err_inval;
}
+
+ /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
+ adapter->hw.phy.mdix = AUTO_ALL_MODES;
+
return 0;
err_inval:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index b9623e9ea89..bffcf1f2357 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -78,6 +78,9 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
+#define IXGBE_RXBUFFER_2K 2048
+#define IXGBE_RXBUFFER_3K 3072
+#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
@@ -104,6 +107,7 @@
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
+#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -293,16 +297,25 @@ struct ixgbe_ring_feature {
* this is twice the size of a half page we need to double the page order
* for FCoE enabled Rx queues.
*/
-#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{
- return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
+#ifdef IXGBE_FCOE
+ if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+ return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
+ IXGBE_RXBUFFER_3K;
+#endif
+ return IXGBE_RXBUFFER_2K;
}
-#else
-#define ixgbe_rx_pg_order(_ring) 0
+
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+#ifdef IXGBE_FCOE
+ if (test_bit(__IXGBE_RX_FCOE, &ring->state))
+ return (PAGE_SIZE < 8192) ? 1 : 0;
#endif
+ return 0;
+}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
-#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137501d..18bf08c9d7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c709eae58c6..fa0d6e1561c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1141,8 +1141,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
/* alloc new page for storage */
if (likely(!page)) {
- page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
- ixgbe_rx_pg_order(rx_ring));
+ page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
+ bi->skb, ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
@@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
}
bi->dma = dma;
- bi->page_offset ^= ixgbe_rx_bufsz(rx_ring);
+ bi->page_offset = 0;
return true;
}
@@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return max_len;
}
-static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- __le32 rsc_enabled;
- u32 rsc_cnt;
-
- if (!ring_is_rsc_enabled(rx_ring))
- return;
-
- rsc_enabled = rx_desc->wb.lower.lo_dword.data &
- cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
-
- /* If this is an RSC frame rsc_cnt should be non-zero */
- if (!rsc_enabled)
- return;
-
- rsc_cnt = le32_to_cpu(rsc_enabled);
- rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
-
- IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
-}
-
static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
struct sk_buff *skb)
{
@@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
prefetch(IXGBE_RX_DESC(rx_ring, ntc));
- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
- return false;
+ /* update RSC append count if present */
+ if (ring_is_rsc_enabled(rx_ring)) {
+ __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
+ cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
+
+ if (unlikely(rsc_enabled)) {
+ u32 rsc_cnt = le32_to_cpu(rsc_enabled);
- /* append_cnt indicates packet is RSC, if so fetch nextp */
- if (IXGBE_CB(skb)->append_cnt) {
- ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
- ntc &= IXGBE_RXDADV_NEXTP_MASK;
- ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+ rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
+ IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
+
+ /* update ntc based on RSC value */
+ ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
+ ntc &= IXGBE_RXDADV_NEXTP_MASK;
+ ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
+ }
}
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ return false;
+
/* place skb in next buffer to be received */
rx_ring->rx_buffer_info[ntc].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
@@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
}
/**
+ * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an ixgbe specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
+
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being updated
+ *
+ * This function provides a basic DMA sync up for the first fragment of an
+ * skb. The reason for doing this is that the first fragment cannot be
+ * unmapped until we have reached the end of packet descriptor for a buffer
+ * chain.
+ */
+static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+{
+ /* if the page was released unmap it, else just sync our portion */
+ if (unlikely(IXGBE_CB(skb)->page_released)) {
+ dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
+ ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+ IXGBE_CB(skb)->page_released = false;
+ } else {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ IXGBE_CB(skb)->dma,
+ frag->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+ IXGBE_CB(skb)->dma = 0;
+}
+
+/**
* ixgbe_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
@@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
struct net_device *netdev = rx_ring->netdev;
- unsigned char *va;
- unsigned int pull_len;
-
- /* if the page was released unmap it, else just sync our portion */
- if (unlikely(IXGBE_CB(skb)->page_released)) {
- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
- IXGBE_CB(skb)->page_released = false;
- } else {
- dma_sync_single_range_for_cpu(rx_ring->dev,
- IXGBE_CB(skb)->dma,
- frag->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- }
- IXGBE_CB(skb)->dma = 0;
/* verify that the packet does not have any known errors */
if (unlikely(ixgbe_test_staterr(rx_desc,
@@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
return true;
}
- /*
- * it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /*
- * we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = skb_frag_size(frag);
- if (pull_len > IXGBE_RX_HDR_SIZE)
- pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
-
- /*
- * if we sucked the frag empty then we should free it,
- * if there are other frags here something is screwed up in hardware
- */
- if (skb_frag_size(frag) == 0) {
- BUG_ON(skb_shinfo(skb)->nr_frags != 1);
- skb_shinfo(skb)->nr_frags = 0;
- __skb_frag_unref(frag);
- skb->truesize -= ixgbe_rx_bufsz(rx_ring);
- }
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ ixgbe_pull_tail(rx_ring, skb);
#ifdef IXGBE_FCOE
/* do not attempt to pad FCoE Frames as this will disrupt DDP */
@@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
}
/**
- * ixgbe_can_reuse_page - determine if we can reuse a page
- * @rx_buffer: pointer to rx_buffer containing the page we want to reuse
- *
- * Returns true if page can be reused in another Rx buffer
- **/
-static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer)
-{
- struct page *page = rx_buffer->page;
-
- /* if we are only owner of page and it is local we can reuse it */
- return likely(page_count(page) == 1) &&
- likely(page_to_nid(page) == numa_node_id());
-}
-
-/**
* ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused
*
- * Syncronizes page for reuse by the adapter
+ * Synchronizes page for reuse by the adapter
**/
static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *old_buff)
{
struct ixgbe_rx_buffer *new_buff;
u16 nta = rx_ring->next_to_alloc;
- u16 bufsz = ixgbe_rx_bufsz(rx_ring);
new_buff = &rx_ring->rx_buffer_info[nta];
@@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
/* transfer page from old buffer to new buffer */
new_buff->page = old_buff->page;
new_buff->dma = old_buff->dma;
-
- /* flip page offset to other buffer and store to new_buff */
- new_buff->page_offset = old_buff->page_offset ^ bufsz;
+ new_buff->page_offset = old_buff->page_offset;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
- new_buff->page_offset, bufsz,
+ new_buff->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
-
- /* bump ref count on page before it is given to the stack */
- get_page(new_buff->page);
}
/**
@@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
* @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
*
- * This function is based on skb_add_rx_frag. I would have used that
- * function however it doesn't handle the truesize case correctly since we
- * are allocating more memory than might be used for a single receive.
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
**/
-static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *rx_buffer,
- struct sk_buff *skb, int size)
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer->page, rx_buffer->page_offset,
- size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += ixgbe_rx_bufsz(rx_ring);
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+ ixgbe_rx_bufsz(rx_ring);
+#endif
+
+ if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbe_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IXGBE_RX_HDR_SIZE);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ /*
+ * Delay unmapping of the first packet. It carries the
+ * header information, HW may still access the header
+ * after the writeback. Only unmap it when EOP is
+ * reached
+ */
+ if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
+ goto dma_sync;
+
+ IXGBE_CB(skb)->dma = rx_buffer->dma;
+ } else {
+ if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+ ixgbe_dma_sync_frag(rx_ring, skb);
+
+dma_sync:
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ ixgbe_rx_bufsz(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+
+ /* pull page into skb */
+ if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+ } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ /* the page has been released from the ring */
+ IXGBE_CB(skb)->page_released = true;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ ixgbe_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->skb = NULL;
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
/**
@@ -1658,11 +1790,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
- struct ixgbe_rx_buffer *rx_buffer;
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
- struct page *page;
- u16 ntc;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -1670,9 +1799,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
cleaned_count = 0;
}
- ntc = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC(rx_ring, ntc);
- rx_buffer = &rx_ring->rx_buffer_info[ntc];
+ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
break;
@@ -1684,75 +1811,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
rmb();
- page = rx_buffer->page;
- prefetchw(page);
-
- skb = rx_buffer->skb;
+ /* retrieve a buffer from the ring */
+ skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
-
- /* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
-#endif
-
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBE_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- break;
- }
-
- /*
- * we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
-
- /*
- * Delay unmapping of the first packet. It carries the
- * header information, HW may still access the header
- * after the writeback. Only unmap it when EOP is
- * reached
- */
- IXGBE_CB(skb)->dma = rx_buffer->dma;
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- ixgbe_rx_bufsz(rx_ring),
- DMA_FROM_DEVICE);
- }
-
- /* pull page into skb */
- ixgbe_add_rx_frag(rx_ring, rx_buffer, skb,
- le16_to_cpu(rx_desc->wb.upper.length));
-
- if (ixgbe_can_reuse_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- ixgbe_reuse_rx_page(rx_ring, rx_buffer);
- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
- /* the page has been released from the ring */
- IXGBE_CB(skb)->page_released = true;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buffer->dma,
- ixgbe_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->skb = NULL;
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
-
- ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
cleaned_count++;
@@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
-#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
- srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-#endif
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater
* than 65536
*/
-#if (PAGE_SIZE <= 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (PAGE_SIZE <= 16384)
- rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#else
- rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
-#endif
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
}
@@ -4130,27 +4184,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers
- * @rx_ring: ring to setup
- *
- * On many IA platforms the L1 cache has a critical stride of 4K, this
- * results in each receive buffer starting in the same cache set. To help
- * reduce the pressure on this cache set we can interleave the offsets so
- * that only every other buffer will be in the same cache set.
- **/
-static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info;
- u16 i;
-
- for (i = 0; i < rx_ring->count; i += 2) {
- rx_buffer[0].page_offset = 0;
- rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring);
- rx_buffer = &rx_buffer[2];
- }
-}
-
-/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
**/
@@ -4195,8 +4228,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
- ixgbe_init_rx_page_offset(rx_ring);
-
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -4646,8 +4677,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
- ixgbe_init_rx_page_offset(rx_ring);
-
return 0;
err:
vfree(rx_ring->rx_buffer_info);
@@ -5874,9 +5903,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
+ if (unlikely(skb->no_fcs))
+ first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
+ return;
+ }
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -5938,7 +5970,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
__le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */
@@ -5958,6 +5989,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
#endif
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
+ cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+
return cmd_type;
}
@@ -6063,8 +6098,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
if (likely(!data_len))
break;
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
i++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3f9841d619a..60ef6458741 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -352,7 +352,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
-
bi->skb = skb;
}
if (!bi->dma) {