summaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/Makefile4
-rw-r--r--drivers/net/igb/e1000_82575.c141
-rw-r--r--drivers/net/igb/e1000_82575.h64
-rw-r--r--drivers/net/igb/e1000_defines.h134
-rw-r--r--drivers/net/igb/e1000_hw.h202
-rw-r--r--drivers/net/igb/e1000_mac.c66
-rw-r--r--drivers/net/igb/e1000_mac.h9
-rw-r--r--drivers/net/igb/e1000_mbx.c447
-rw-r--r--drivers/net/igb/e1000_mbx.h77
-rw-r--r--drivers/net/igb/e1000_nvm.c44
-rw-r--r--drivers/net/igb/e1000_phy.c352
-rw-r--r--drivers/net/igb/e1000_phy.h3
-rw-r--r--drivers/net/igb/e1000_regs.h103
-rw-r--r--drivers/net/igb/igb.h92
-rw-r--r--drivers/net/igb/igb_ethtool.c206
-rw-r--r--drivers/net/igb/igb_main.c1891
16 files changed, 2580 insertions, 1255 deletions
diff --git a/drivers/net/igb/Makefile b/drivers/net/igb/Makefile
index 1927b3fd6f0..8372cb9a8c1 100644
--- a/drivers/net/igb/Makefile
+++ b/drivers/net/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2007 Intel Corporation.
+# Copyright(c) 1999 - 2009 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -33,5 +33,5 @@
obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
- e1000_mac.o e1000_nvm.o e1000_phy.o
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 13ca73f96ec..efd9be21488 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 - 2008 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -62,17 +62,12 @@ static bool igb_sgmii_active_82575(struct e1000_hw *);
static s32 igb_reset_init_script_82575(struct e1000_hw *);
static s32 igb_read_mac_addr_82575(struct e1000_hw *);
-
-struct e1000_dev_spec_82575 {
- bool sgmii_active;
-};
-
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
struct e1000_nvm_info *nvm = &hw->nvm;
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 *dev_spec;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
u32 eecd;
s32 ret_val;
u16 size;
@@ -85,8 +80,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->type = e1000_82575;
break;
case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
mac->type = e1000_82576;
break;
default:
@@ -94,17 +91,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* MAC initialization */
- hw->dev_spec_size = sizeof(struct e1000_dev_spec_82575);
-
- /* Device-specific structure allocation */
- hw->dev_spec = kzalloc(hw->dev_spec_size, GFP_KERNEL);
-
- if (!hw->dev_spec)
- return -ENOMEM;
-
- dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
-
/* Set media type */
/*
* The 82575 uses bits 22:23 for link mode. The mode can be changed
@@ -195,13 +181,13 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* PHY function pointers */
if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset_phy = igb_phy_hw_reset_sgmii_82575;
- phy->ops.read_phy_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_phy_reg = igb_write_phy_reg_sgmii_82575;
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
} else {
- phy->ops.reset_phy = igb_phy_hw_reset;
- phy->ops.read_phy_reg = igb_read_phy_reg_igp;
- phy->ops.write_phy_reg = igb_write_phy_reg_igp;
+ phy->ops.reset = igb_phy_hw_reset;
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
}
/* Set phy->phy_addr and phy->id. */
@@ -229,6 +215,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
return -E1000_ERR_PHY;
}
+ /* if 82576 then initialize mailbox parameters */
+ if (mac->type == e1000_82576)
+ igb_init_mbx_params_pf(hw);
+
return 0;
}
@@ -451,7 +441,7 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
* SFP documentation requires the following to configure the SPF module
* to work on SGMII. No further documentation is given.
*/
- ret_val = hw->phy.ops.write_phy_reg(hw, 0x1B, 0x8084);
+ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
if (ret_val)
goto out;
@@ -480,28 +470,28 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
s32 ret_val;
u16 data;
- ret_val = phy->ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
goto out;
if (active) {
data |= IGP02E1000_PM_D0_LPLU;
- ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
if (ret_val)
goto out;
/* When LPLU is enabled, we should disable SmartSpeed */
- ret_val = phy->ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
goto out;
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
- ret_val = phy->ops.write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
@@ -510,24 +500,24 @@ static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
- ret_val = phy->ops.read_phy_reg(hw,
+ ret_val = phy->ops.read_reg(hw,
IGP01E1000_PHY_PORT_CONFIG, &data);
if (ret_val)
goto out;
data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
goto out;
} else if (phy->smart_speed == e1000_smart_speed_off) {
- ret_val = phy->ops.read_phy_reg(hw,
+ ret_val = phy->ops.read_reg(hw,
IGP01E1000_PHY_PORT_CONFIG, &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = phy->ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
goto out;
@@ -803,7 +793,7 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
}
/**
- * igb_update_mc_addr_list_82575 - Update Multicast addresses
+ * igb_update_mc_addr_list - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
@@ -815,9 +805,9 @@ static void igb_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void igb_update_mc_addr_list_82575(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count,
+ u32 rar_used_count, u32 rar_count)
{
u32 hash_value;
u32 i;
@@ -1051,7 +1041,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
* depending on user settings.
*/
hw_dbg("Forcing Speed and Duplex\n");
- ret_val = igb_phy_force_speed_duplex(hw);
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
hw_dbg("Error Forcing Speed and Duplex\n");
goto out;
@@ -1110,6 +1100,13 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw)
E1000_CTRL_SWDPIN1;
wr32(E1000_CTRL, reg);
+ /* Power on phy for 82576 fiber adapters */
+ if (hw->mac.type == e1000_82576) {
+ reg = rd32(E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP7_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+ }
+
/* Set switch control to serdes energy detect */
reg = rd32(E1000_CONNSW);
reg |= E1000_CONNSW_ENRGSRC;
@@ -1227,20 +1224,12 @@ out:
**/
static bool igb_sgmii_active_82575(struct e1000_hw *hw)
{
- struct e1000_dev_spec_82575 *dev_spec;
- bool ret_val;
-
- if (hw->mac.type != e1000_82575) {
- ret_val = false;
- goto out;
- }
-
- dev_spec = (struct e1000_dev_spec_82575 *)hw->dev_spec;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
- ret_val = dev_spec->sgmii_active;
+ if (hw->mac.type != e1000_82575 && hw->mac.type != e1000_82576)
+ return false;
-out:
- return ret_val;
+ return dev_spec->sgmii_active;
}
/**
@@ -1430,6 +1419,44 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
rd32(E1000_MPC);
}
+/**
+ * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables L2 switch loopback functionality.
+ **/
+void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 dtxswc = rd32(E1000_DTXSWC);
+
+ if (enable)
+ dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+ else
+ dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+
+ wr32(E1000_DTXSWC, dtxswc);
+}
+
+/**
+ * igb_vmdq_set_replication_pf - enable or disable vmdq replication
+ * @hw: pointer to the hardware struct
+ * @enable: state to enter, either enabled or disabled
+ *
+ * enables/disables replication of packets across multiple pools.
+ **/
+void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+ u32 vt_ctl = rd32(E1000_VT_CTL);
+
+ if (enable)
+ vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+ else
+ vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+ wr32(E1000_VT_CTL, vt_ctl);
+}
+
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.reset_hw = igb_reset_hw_82575,
.init_hw = igb_init_hw_82575,
@@ -1440,16 +1467,16 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
};
static struct e1000_phy_operations e1000_phy_ops_82575 = {
- .acquire_phy = igb_acquire_phy_82575,
+ .acquire = igb_acquire_phy_82575,
.get_cfg_done = igb_get_cfg_done_82575,
- .release_phy = igb_release_phy_82575,
+ .release = igb_release_phy_82575,
};
static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
- .acquire_nvm = igb_acquire_nvm_82575,
- .read_nvm = igb_read_nvm_eerd,
- .release_nvm = igb_release_nvm_82575,
- .write_nvm = igb_write_nvm_spi,
+ .acquire = igb_acquire_nvm_82575,
+ .read = igb_read_nvm_eerd,
+ .release = igb_release_nvm_82575,
+ .write = igb_write_nvm_spi,
};
const struct e1000_info e1000_82575_info = {
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index c1928b5efe1..eaf97705036 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 - 2008 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,7 +28,7 @@
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
-void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32);
+void igb_update_mc_addr_list(struct e1000_hw*, u8*, u32, u32, u32);
extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
@@ -40,8 +40,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
+#define E1000_MRQC_ENABLE_VMDQ 0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
@@ -58,9 +61,6 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
E1000_EICR_RX_QUEUE2 | \
E1000_EICR_RX_QUEUE3)
-#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
-#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
-
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
/* Receive Descriptor - Advanced */
@@ -95,12 +95,6 @@ union e1000_adv_rx_desc {
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
-/* RSS Hash results */
-
-/* RSS Packet Types as indicated in the receive descriptor */
-#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
-#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
-
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
@@ -116,6 +110,7 @@ union e1000_adv_tx_desc {
};
/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
@@ -149,11 +144,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
-#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
-
-#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
-#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
@@ -170,4 +162,44 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+#define MAX_NUM_VFS 8
+
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
+
+#define E1000_VLVF_ARRAY_SIZE 32
+#define E1000_VLVF_VLANID_MASK 0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT 12
+#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN 0x00100000
+#define E1000_VLVF_VLANID_ENABLE 0x80000000
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define ALL_QUEUES 0xFFFF
+
+void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
+void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+
#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 40d03426c12..ad2d319d0f8 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 - 2008 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -42,33 +42,11 @@
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
-#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
-#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
-#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
-#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
-#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
-#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
-#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
-#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
-
-/* Wake Up Status */
-
-/* Wake Up Packet Length */
-
-/* Four Flexible Filters are supported */
-#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
-
-/* Each Flexible Filter is at most 128 (0x80) bytes in length */
-#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128
-
/* Extended Device Control */
-#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */
-#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
-#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
-#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */
-#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
@@ -103,13 +81,7 @@
#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
-#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
-#define E1000_RXD_ERR_CE 0x01 /* CRC Error */
-#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
-#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
-#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
-#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_STAT_TS 0x10000 /* Pkt was time stamped */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
@@ -119,14 +91,6 @@
#define E1000_RXDEXT_STATERR_IPE 0x40000000
#define E1000_RXDEXT_STATERR_RXE 0x80000000
-/* mask to determine if packets should be dropped due to frame errors */
-#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
- E1000_RXD_ERR_CE | \
- E1000_RXD_ERR_SE | \
- E1000_RXD_ERR_SEQ | \
- E1000_RXD_ERR_CXE | \
- E1000_RXD_ERR_RXE)
-
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
E1000_RXDEXT_STATERR_CE | \
@@ -145,16 +109,11 @@
/* Management Control */
#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */
#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */
-#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
/* Enable Neighbor Discovery Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
-/* Enable MNG packets to host memory */
-#define E1000_MANC_EN_MNG2HOST 0x00200000
-/* Enable IP address filtering */
-
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
@@ -162,14 +121,11 @@
#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */
#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */
#define E1000_RCTL_LPE 0x00000020 /* long packet enable */
-#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
-#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
-#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
@@ -226,11 +182,7 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
-#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
-#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */
-#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -308,9 +260,7 @@
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
/* LED Control */
-#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F
#define E1000_LEDCTL_LED0_MODE_SHIFT 0
-#define E1000_LEDCTL_LED0_IVRT 0x00000040
#define E1000_LEDCTL_LED0_BLINK 0x00000080
#define E1000_LEDCTL_MODE_LED_ON 0xE
@@ -357,12 +307,7 @@
#define MAX_JUMBO_FRAME_SIZE 0x3F00
-/* Extended Configuration Control and Size */
-#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040
-
/* PBA constants */
-#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
-#define E1000_PBA_24K 0x0018
#define E1000_PBA_34K 0x0022
#define E1000_PBA_64K 0x0040 /* 64KB */
@@ -378,41 +323,15 @@
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
-#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
-#define E1000_ICR_RXO 0x00000040 /* rx overrun */
#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
-#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */
-#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */
-#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */
-#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */
-#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */
-#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */
-#define E1000_ICR_TXD_LOW 0x00008000
-#define E1000_ICR_SRPD 0x00010000
-#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */
-#define E1000_ICR_MNG 0x00040000 /* Manageability event */
-#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */
+#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
/* If this bit asserted, the driver should claim the interrupt */
#define E1000_ICR_INT_ASSERTED 0x80000000
-/* queue 0 Rx descriptor FIFO parity error */
-#define E1000_ICR_RXD_FIFO_PAR0 0x00100000
-/* queue 0 Tx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR0 0x00200000
-/* host arb read buffer parity error */
-#define E1000_ICR_HOST_ARB_PAR 0x00400000
-#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */
-/* queue 1 Rx descriptor FIFO parity error */
-#define E1000_ICR_RXD_FIFO_PAR1 0x01000000
-/* queue 1 Tx descriptor FIFO parity error */
-#define E1000_ICR_TXD_FIFO_PAR1 0x02000000
-/* FW changed the status of DISSW bit in the FWSM */
-#define E1000_ICR_DSW 0x00000020
/* LAN connected device generates an interrupt */
-#define E1000_ICR_PHYINT 0x00001000
-#define E1000_ICR_EPRST 0x00100000 /* ME handware reset occurs */
+#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */
/* Extended Interrupt Cause Read */
#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
@@ -423,7 +342,6 @@
#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */
#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */
#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */
-#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
/* TCP Timer */
@@ -441,17 +359,19 @@
E1000_IMS_TXDW | \
E1000_IMS_RXDMT0 | \
E1000_IMS_RXSEQ | \
- E1000_IMS_LSC)
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
+#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
+#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
/* Extended Interrupt Mask Set */
-#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
/* Interrupt Cause Set */
@@ -481,6 +401,10 @@
* manageability enabled, allowing us room for 15 multicast addresses.
*/
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_POOL_MASK 0x03FC0000
+#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
#define E1000_ERR_NVM 1
@@ -490,10 +414,10 @@
#define E1000_ERR_MAC_INIT 5
#define E1000_ERR_RESET 9
#define E1000_ERR_MASTER_REQUESTS_PENDING 10
-#define E1000_ERR_HOST_INTERFACE_COMMAND 11
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX 15
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -510,30 +434,9 @@
/* Flow Control */
#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
-/* Transmit Configuration Word */
-#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
-
-/* Receive Configuration Word */
-
-/* PCI Express Control */
-#define E1000_GCR_RXD_NO_SNOOP 0x00000001
-#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
-#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
-#define E1000_GCR_TXD_NO_SNOOP 0x00000008
-#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
-#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
-
-#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
- E1000_GCR_RXDSCW_NO_SNOOP | \
- E1000_GCR_RXDSCR_NO_SNOOP | \
- E1000_GCR_TXD_NO_SNOOP | \
- E1000_GCR_TXDSCW_NO_SNOOP | \
- E1000_GCR_TXDSCR_NO_SNOOP)
-
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
@@ -609,6 +512,7 @@
#define NVM_ID_LED_SETTINGS 0x0004
/* For SERDES output amplitude adjustment. */
#define NVM_INIT_CONTROL2_REG 0x000F
+#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
@@ -663,10 +567,8 @@
#define IGP_LED3_MODE 0x07000000
/* PCI/PCI-X/PCI-EX Config space */
-#define PCI_HEADER_TYPE_REGISTER 0x0E
#define PCIE_LINK_STATUS 0x12
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
#define PCIE_LINK_WIDTH_MASK 0x3F0
#define PCIE_LINK_WIDTH_SHIFT 4
@@ -763,4 +665,8 @@
#define E1000_GEN_CTL_ADDRESS_SHIFT 8
#define E1000_GEN_POLL_TIMEOUT 640
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 99504a600a8..68aac20c31c 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,6 @@
#include <linux/delay.h>
#include <linux/io.h>
-#include "e1000_mac.h"
#include "e1000_regs.h"
#include "e1000_defines.h"
@@ -41,6 +40,8 @@ struct e1000_hw;
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
+#define E1000_DEV_ID_82576_NS 0x150A
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
@@ -144,144 +145,6 @@ enum e1000_fc_type {
e1000_fc_default = 0xFF
};
-
-/* Receive Descriptor */
-struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
-};
-
-/* Receive Descriptor - Extended */
-union e1000_rx_desc_extended {
- struct {
- __le64 buffer_addr;
- __le64 reserved;
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length;
- __le16 vlan; /* VLAN tag */
- } upper;
- } wb; /* writeback */
-};
-
-#define MAX_PS_BUFFERS 4
-/* Receive Descriptor - Packet Split */
-union e1000_rx_desc_packet_split {
- struct {
- /* one buffer for protocol header(s), three data buffers */
- __le64 buffer_addr[MAX_PS_BUFFERS];
- } read;
- struct {
- struct {
- __le32 mrq; /* Multiple Rx Queues */
- union {
- __le32 rss; /* RSS Hash */
- struct {
- __le16 ip_id; /* IP id */
- __le16 csum; /* Packet Checksum */
- } csum_ip;
- } hi_dword;
- } lower;
- struct {
- __le32 status_error; /* ext status/error */
- __le16 length0; /* length of buffer 0 */
- __le16 vlan; /* VLAN tag */
- } middle;
- struct {
- __le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
- } upper;
- __le64 reserved;
- } wb; /* writeback */
-};
-
-/* Transmit Descriptor */
-struct e1000_tx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 cso; /* Checksum offset */
- u8 cmd; /* Descriptor control */
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 css; /* Checksum start */
- __le16 special;
- } fields;
- } upper;
-};
-
-/* Offload Context Descriptor */
-struct e1000_context_desc {
- union {
- __le32 ip_config;
- struct {
- u8 ipcss; /* IP checksum start */
- u8 ipcso; /* IP checksum offset */
- __le16 ipcse; /* IP checksum end */
- } ip_fields;
- } lower_setup;
- union {
- __le32 tcp_config;
- struct {
- u8 tucss; /* TCP checksum start */
- u8 tucso; /* TCP checksum offset */
- __le16 tucse; /* TCP checksum end */
- } tcp_fields;
- } upper_setup;
- __le32 cmd_and_length;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 hdr_len; /* Header length */
- __le16 mss; /* Maximum segment size */
- } fields;
- } tcp_seg_setup;
-};
-
-/* Offload data descriptor */
-struct e1000_data_desc {
- __le64 buffer_addr; /* Address of the descriptor's buffer address */
- union {
- __le32 data;
- struct {
- __le16 length; /* Data buffer length */
- u8 typ_len_ext;
- u8 cmd;
- } flags;
- } lower;
- union {
- __le32 data;
- struct {
- u8 status; /* Descriptor status */
- u8 popts; /* Packet Options */
- __le16 special;
- } fields;
- } upper;
-};
-
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
u64 crcerrs;
@@ -359,6 +222,7 @@ struct e1000_hw_stats {
u64 lenerrs;
u64 scvpc;
u64 hrmpc;
+ u64 doosync;
};
struct e1000_phy_stats {
@@ -409,6 +273,7 @@ struct e1000_host_mng_command_info {
#include "e1000_mac.h"
#include "e1000_phy.h"
#include "e1000_nvm.h"
+#include "e1000_mbx.h"
struct e1000_mac_operations {
s32 (*check_for_link)(struct e1000_hw *);
@@ -422,25 +287,25 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire_phy)(struct e1000_hw *);
+ s32 (*acquire)(struct e1000_hw *);
s32 (*check_reset_block)(struct e1000_hw *);
s32 (*force_speed_duplex)(struct e1000_hw *);
s32 (*get_cfg_done)(struct e1000_hw *hw);
s32 (*get_cable_length)(struct e1000_hw *);
s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
- void (*release_phy)(struct e1000_hw *);
- s32 (*reset_phy)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*reset)(struct e1000_hw *);
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
};
struct e1000_nvm_operations {
- s32 (*acquire_nvm)(struct e1000_hw *);
- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
- void (*release_nvm)(struct e1000_hw *);
- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ void (*release)(struct e1000_hw *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
};
struct e1000_info {
@@ -483,7 +348,6 @@ struct e1000_mac_info {
bool asf_firmware_present;
bool autoneg;
bool autoneg_failed;
- bool disable_av;
bool disable_hw_init_bits;
bool get_link_status;
bool ifs_params_forced;
@@ -565,9 +429,40 @@ struct e1000_fc_info {
enum e1000_fc_type original_type;
};
+struct e1000_mbx_operations {
+ s32 (*init_params)(struct e1000_hw *hw);
+ s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct e1000_hw *, u16);
+ s32 (*check_for_ack)(struct e1000_hw *, u16);
+ s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct e1000_mbx_info {
+ struct e1000_mbx_operations ops;
+ struct e1000_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+ bool sgmii_active;
+};
+
struct e1000_hw {
void *back;
- void *dev_spec;
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
@@ -578,9 +473,12 @@ struct e1000_hw {
struct e1000_phy_info phy;
struct e1000_nvm_info nvm;
struct e1000_bus_info bus;
+ struct e1000_mbx_info mbx;
struct e1000_host_mng_dhcp_cookie mng_cookie;
- u32 dev_spec_size;
+ union {
+ struct e1000_dev_spec_82575 _82575;
+ } dev_spec;
u16 device_id;
u16 subsystem_vendor_id;
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 97f0049a5d6..f4c315b5a90 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -37,19 +37,6 @@
static s32 igb_set_default_fc(struct e1000_hw *hw);
static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
-/**
- * igb_remove_device - Free device specific structure
- * @hw: pointer to the HW structure
- *
- * If a device specific structure was allocated, this function will
- * free it.
- **/
-void igb_remove_device(struct e1000_hw *hw)
-{
- /* Freeing the dev_spec member of e1000_hw structure */
- kfree(hw->dev_spec);
-}
-
static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct igb_adapter *adapter = hw->back;
@@ -131,6 +118,37 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
+ * igb_vfta_set - enable or disable vlan in VLAN filter table
+ * @hw: pointer to the HW structure
+ * @vid: VLAN id to add or remove
+ * @add: if true add filter, if false remove
+ *
+ * Sets or clears a bit in the VLAN filter table array based on VLAN id
+ * and if we are adding or removing the filter
+ **/
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+{
+ u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+ u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ u32 vfta = array_rd32(E1000_VFTA, index);
+ s32 ret_val = 0;
+
+ /* bit was set/cleared before we started */
+ if ((!!(vfta & mask)) == add) {
+ ret_val = -E1000_ERR_CONFIG;
+ } else {
+ if (add)
+ vfta |= mask;
+ else
+ vfta &= ~mask;
+ }
+
+ igb_write_vfta(hw, index, vfta);
+
+ return ret_val;
+}
+
+/**
* igb_check_alt_mac_addr - Check for alternate MAC addr
* @hw: pointer to the HW structure
*
@@ -148,7 +166,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
- ret_val = hw->nvm.ops.read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&nvm_alt_mac_addr_offset);
if (ret_val) {
hw_dbg("NVM Read Error\n");
@@ -165,7 +183,7 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
for (i = 0; i < ETH_ALEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
- ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
@@ -213,7 +231,8 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- if (!hw->mac.disable_av)
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
wr32(E1000_RAL(index), rar_low);
@@ -588,8 +607,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- ret_val = hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL2_REG, 1,
- &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
@@ -720,11 +738,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS,
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
&mii_status_reg);
if (ret_val)
goto out;
@@ -742,11 +760,11 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
+ ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_LP_ABILITY,
+ ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
&mii_nway_lp_ability_reg);
if (ret_val)
goto out;
@@ -1041,7 +1059,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
- ret_val = hw->nvm.ops.read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+ ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index cbee6af7d91..a34de526963 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -58,12 +58,12 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
void igb_clear_vfta(struct e1000_hw *hw);
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
void igb_config_collision_dist(struct e1000_hw *hw);
void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
void igb_put_hw_semaphore(struct e1000_hw *hw);
void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
-void igb_remove_device(struct e1000_hw *hw);
void igb_reset_adaptive(struct e1000_hw *hw);
void igb_update_adaptive(struct e1000_hw *hw);
void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
@@ -83,13 +83,8 @@ enum e1000_mng_mode {
#define E1000_FWSM_MODE_MASK 0xE
#define E1000_FWSM_MODE_SHIFT 1
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
extern u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
new file mode 100644
index 00000000000..fe71c7ddaa0
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.c
@@ -0,0 +1,447 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_mbx.h"
+
+/**
+ * igb_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = -E1000_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * igb_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_msg)
+ goto out;
+
+ while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ if (!countdown)
+ break;
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+out:
+ return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ * igb_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!mbx->ops.check_for_ack)
+ goto out;
+
+ while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ if (!countdown)
+ break;
+ countdown--;
+ udelay(mbx->usec_delay);
+ }
+out:
+ return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ * igb_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = igb_poll_for_msg(hw, mbx_id);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * igb_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (!mbx->ops.write)
+ goto out;
+
+ /* send msg*/
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = igb_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_ops_generic - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+}
+
+static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+ u32 mbvficr = rd32(E1000_MBVFICR);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ wr32(E1000_MBVFICR, mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+ u32 vflre = rd32(E1000_VFLRE);
+ s32 ret_val = -E1000_ERR_MBX;
+
+ if (vflre & (1 << vf_number)) {
+ ret_val = 0;
+ wr32(E1000_VFLRE, (1 << vf_number));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * igb_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ u32 p2v_mailbox;
+ s32 ret_val = 0;
+ u16 i;
+
+ /* Take ownership of the buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+ /* Make sure we have ownership now... */
+ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+ if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
+ /* failed to grab ownership */
+ ret_val = -E1000_ERR_MBX;
+ goto out_no_write;
+ }
+
+ /*
+ * flush any ack or msg which may already be in the queue
+ * as they are likely the result of an error
+ */
+ igb_check_for_ack_pf(hw, vf_number);
+ igb_check_for_msg_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * igb_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ u32 p2v_mailbox;
+ s32 ret_val = 0;
+ u16 i;
+
+ /* Take ownership of the buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+ /* Make sure we have ownership now... */
+ p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+ if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
+ /* failed to grab ownership */
+ ret_val = -E1000_ERR_MBX;
+ goto out_no_read;
+ }
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ ret_val = 0;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * e1000_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
+{
+ struct e1000_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type == e1000_82576) {
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = igb_read_mbx_pf;
+ mbx->ops.write = igb_write_mbx_pf;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+ mbx->ops.check_for_msg = igb_check_for_msg_pf;
+ mbx->ops.check_for_ack = igb_check_for_ack_pf;
+ mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
new file mode 100644
index 00000000000..6ec9890a8f7
--- /dev/null
+++ b/drivers/net/igb/e1000_mbx.h
@@ -0,0 +1,77 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_hw.h"
+
+#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define E1000_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET 0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_check_for_msg(struct e1000_hw *, u16);
+s32 igb_check_for_ack(struct e1000_hw *, u16);
+s32 igb_check_for_rst(struct e1000_hw *, u16);
+s32 igb_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a84e4e429fa..a88bfe2f1e8 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -419,7 +419,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
goto out;
}
- ret_val = hw->nvm.ops.acquire_nvm(hw);
+ ret_val = hw->nvm.ops.acquire(hw);
if (ret_val)
goto out;
@@ -468,7 +468,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
msleep(10);
release:
- hw->nvm.ops.release_nvm(hw);
+ hw->nvm.ops.release(hw);
out:
return ret_val;
@@ -487,14 +487,14 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
s32 ret_val;
u16 nvm_data;
- ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
}
*part_num = (u32)(nvm_data << 16);
- ret_val = hw->nvm.ops.read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
@@ -515,29 +515,23 @@ out:
**/
s32 igb_read_mac_addr(struct e1000_hw *hw)
{
- s32 ret_val = 0;
- u16 offset, nvm_data, i;
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
- for (i = 0; i < ETH_ALEN; i += 2) {
- offset = i >> 1;
- ret_val = hw->nvm.ops.read_nvm(hw, offset, 1, &nvm_data);
- if (ret_val) {
- hw_dbg("NVM Read Error\n");
- goto out;
- }
- hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
- hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
- }
+ rar_high = rd32(E1000_RAH(0));
+ rar_low = rd32(E1000_RAL(0));
+
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
- /* Flip last bit of mac address if we're on second port */
- if (hw->bus.func == E1000_FUNC_1)
- hw->mac.perm_addr[5] ^= 1;
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
-out:
- return ret_val;
+ return 0;
}
/**
@@ -554,7 +548,7 @@ s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
u16 i, nvm_data;
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error\n");
goto out;
@@ -587,7 +581,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
u16 i, nvm_data;
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
- ret_val = hw->nvm.ops.read_nvm(hw, i, 1, &nvm_data);
+ ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
if (ret_val) {
hw_dbg("NVM Read Error while updating checksum.\n");
goto out;
@@ -595,7 +589,7 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
checksum += nvm_data;
}
checksum = (u16) NVM_SUM - checksum;
- ret_val = hw->nvm.ops.write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+ ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
if (ret_val)
hw_dbg("NVM Write Error while updating checksum.\n");
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 17fddb91c9f..de2d4862468 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,6 @@
#include "e1000_mac.h"
#include "e1000_phy.h"
-static s32 igb_get_phy_cfg_done(struct e1000_hw *hw);
-static void igb_release_phy(struct e1000_hw *hw);
-static s32 igb_acquire_phy(struct e1000_hw *hw);
-static s32 igb_phy_reset_dsp(struct e1000_hw *hw);
static s32 igb_phy_setup_autoneg(struct e1000_hw *hw);
static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
u16 *phy_ctrl);
@@ -43,9 +39,6 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] =
{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
- (sizeof(e1000_m88_cable_length_table) / \
- sizeof(e1000_m88_cable_length_table[0]))
static const u16 e1000_igp_2_cable_length_table[] =
{ 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -91,13 +84,13 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
s32 ret_val = 0;
u16 phy_id;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID1, &phy_id);
+ ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_ID2, &phy_id);
+ ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
if (ret_val)
goto out;
@@ -118,11 +111,11 @@ static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
{
s32 ret_val;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
out:
return ret_val;
@@ -257,9 +250,12 @@ out:
**/
s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
{
- s32 ret_val;
+ s32 ret_val = 0;
+
+ if (!(hw->phy.ops.acquire))
+ goto out;
- ret_val = igb_acquire_phy(hw);
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
@@ -268,16 +264,15 @@ s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
if (ret_val) {
- igb_release_phy(hw);
+ hw->phy.ops.release(hw);
goto out;
}
}
- ret_val = igb_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+ data);
- igb_release_phy(hw);
+ hw->phy.ops.release(hw);
out:
return ret_val;
@@ -294,9 +289,12 @@ out:
**/
s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
{
- s32 ret_val;
+ s32 ret_val = 0;
- ret_val = igb_acquire_phy(hw);
+ if (!(hw->phy.ops.acquire))
+ goto out;
+
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
@@ -305,16 +303,15 @@ s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
IGP01E1000_PHY_PAGE_SELECT,
(u16)offset);
if (ret_val) {
- igb_release_phy(hw);
+ hw->phy.ops.release(hw);
goto out;
}
}
- ret_val = igb_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
+ ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
data);
- igb_release_phy(hw);
+ hw->phy.ops.release(hw);
out:
return ret_val;
@@ -339,8 +336,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
}
/* Enable CRS on TX. This must be set for half-duplex operation. */
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@@ -383,8 +379,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
if (phy->disable_polarity_correction == 1)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
goto out;
@@ -393,8 +388,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw,
- M88E1000_EXT_PHY_SPEC_CTRL,
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
&phy_data);
if (ret_val)
goto out;
@@ -413,8 +407,7 @@ s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
}
- ret_val = hw->phy.ops.write_phy_reg(hw,
- M88E1000_EXT_PHY_SPEC_CTRL,
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
phy_data);
if (ret_val)
goto out;
@@ -449,7 +442,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
goto out;
}
- ret_val = hw->phy.ops.reset_phy(hw);
+ ret_val = phy->ops.reset(hw);
if (ret_val) {
hw_dbg("Error resetting the PHY.\n");
goto out;
@@ -464,8 +457,8 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
*/
if (phy->type == e1000_phy_igp) {
/* disable lplu d3 during driver init */
- if (hw->phy.ops.set_d3_lplu_state)
- ret_val = hw->phy.ops.set_d3_lplu_state(hw, false);
+ if (phy->ops.set_d3_lplu_state)
+ ret_val = phy->ops.set_d3_lplu_state(hw, false);
if (ret_val) {
hw_dbg("Error Disabling LPLU D3\n");
goto out;
@@ -473,13 +466,13 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
}
/* disable lplu d0 during driver init */
- ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ ret_val = phy->ops.set_d0_lplu_state(hw, false);
if (ret_val) {
hw_dbg("Error Disabling LPLU D0\n");
goto out;
}
/* Configure mdi-mdix settings */
- ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
if (ret_val)
goto out;
@@ -497,7 +490,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
data |= IGP01E1000_PSCR_AUTO_MDIX;
break;
}
- ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
if (ret_val)
goto out;
@@ -510,33 +503,31 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
*/
if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
- ret_val = hw->phy.ops.read_phy_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
- &data);
+ ret_val = phy->ops.read_reg(hw,
+ IGP01E1000_PHY_PORT_CONFIG,
+ &data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = hw->phy.ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
goto out;
/* Set auto Master/Slave resolution process */
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL,
- &data);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
if (ret_val)
goto out;
data &= ~CR_1000T_MS_ENABLE;
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL,
- data);
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
if (ret_val)
goto out;
}
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_CTRL, &data);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
if (ret_val)
goto out;
@@ -560,7 +551,7 @@ s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
default:
break;
}
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_1000T_CTRL, data);
+ ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
if (ret_val)
goto out;
}
@@ -609,12 +600,12 @@ s32 igb_copper_link_autoneg(struct e1000_hw *hw)
* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
goto out;
phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
goto out;
@@ -656,15 +647,13 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_AUTONEG_ADV,
- &mii_autoneg_adv_reg);
+ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
if (ret_val)
goto out;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = hw->phy.ops.read_phy_reg(hw,
- PHY_1000T_CTRL,
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
&mii_1000t_ctrl_reg);
if (ret_val)
goto out;
@@ -785,17 +774,16 @@ static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
goto out;
}
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_AUTONEG_ADV,
- mii_autoneg_adv_reg);
+ ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
if (ret_val)
goto out;
hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
- ret_val = hw->phy.ops.write_phy_reg(hw,
- PHY_1000T_CTRL,
- mii_1000t_ctrl_reg);
+ ret_val = phy->ops.write_reg(hw,
+ PHY_1000T_CTRL,
+ mii_1000t_ctrl_reg);
if (ret_val)
goto out;
}
@@ -819,13 +807,13 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
igb_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
if (ret_val)
goto out;
@@ -833,16 +821,14 @@ s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
if (ret_val)
goto out;
phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
- ret_val = hw->phy.ops.write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL,
- phy_data);
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
if (ret_val)
goto out;
@@ -897,20 +883,18 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
goto out;
hw_dbg("M88E1000 PSCR: %X\n", phy_data);
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_data);
+ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
goto out;
@@ -919,7 +903,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
/* Reset the phy to commit changes. */
phy_data |= MII_CR_RESET;
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_data);
+ ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
if (ret_val)
goto out;
@@ -940,7 +924,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
- ret_val = hw->phy.ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
M88E1000_PHY_PAGE_SELECT,
0x001d);
if (ret_val)
@@ -957,8 +941,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
goto out;
}
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@@ -968,8 +951,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* the reset value of 2.5MHz.
*/
phy_data |= M88E1000_EPSCR_TX_CLK_25;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
if (ret_val)
goto out;
@@ -977,14 +959,12 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- ret_val = hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- phy_data);
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
out:
return ret_val;
@@ -1071,15 +1051,13 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
s32 ret_val;
u16 data;
- ret_val = hw->phy.ops.read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
- &data);
+ ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
goto out;
if (!active) {
data &= ~IGP02E1000_PM_D3_LPLU;
- ret_val = hw->phy.ops.write_phy_reg(hw,
- IGP02E1000_PHY_POWER_MGMT,
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
if (ret_val)
goto out;
@@ -1090,27 +1068,27 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
- ret_val = hw->phy.ops.read_phy_reg(hw,
+ ret_val = phy->ops.read_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
goto out;
data |= IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = hw->phy.ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
goto out;
} else if (phy->smart_speed == e1000_smart_speed_off) {
- ret_val = hw->phy.ops.read_phy_reg(hw,
+ ret_val = phy->ops.read_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = hw->phy.ops.write_phy_reg(hw,
+ ret_val = phy->ops.write_reg(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
@@ -1120,22 +1098,19 @@ s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
(phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
(phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= IGP02E1000_PM_D3_LPLU;
- ret_val = hw->phy.ops.write_phy_reg(hw,
- IGP02E1000_PHY_POWER_MGMT,
+ ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
data);
if (ret_val)
goto out;
/* When LPLU is enabled, we should disable SmartSpeed */
- ret_val = hw->phy.ops.read_phy_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
goto out;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
- ret_val = hw->phy.ops.write_phy_reg(hw,
- IGP01E1000_PHY_PORT_CONFIG,
+ ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
}
@@ -1176,7 +1151,7 @@ s32 igb_check_downshift(struct e1000_hw *hw)
goto out;
}
- ret_val = hw->phy.ops.read_phy_reg(hw, offset, &phy_data);
+ ret_val = phy->ops.read_reg(hw, offset, &phy_data);
if (!ret_val)
phy->speed_downgraded = (phy_data & mask) ? true : false;
@@ -1199,7 +1174,7 @@ static s32 igb_check_polarity_m88(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
@@ -1228,8 +1203,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
* Polarity is determined based on the speed of
* our connection.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
- &data);
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val)
goto out;
@@ -1246,7 +1220,7 @@ static s32 igb_check_polarity_igp(struct e1000_hw *hw)
mask = IGP01E1000_PSSR_POLARITY_REVERSED;
}
- ret_val = hw->phy.ops.read_phy_reg(hw, offset, &data);
+ ret_val = phy->ops.read_reg(hw, offset, &data);
if (!ret_val)
phy->cable_polarity = (data & mask)
@@ -1271,10 +1245,10 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw)
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_AUTONEG_COMPLETE)
@@ -1310,10 +1284,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_STATUS, &phy_status);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
break;
if (phy_status & MII_SR_LINK_STATUS)
@@ -1350,8 +1324,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, index;
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val)
goto out;
@@ -1372,8 +1345,8 @@ out:
*
* The automatic gain control (agc) normalizes the amplitude of the
* received signal, adjusting for the attenuation produced by the
- * cable. By reading the AGC registers, which reperesent the
- * cobination of course and fine gain value, the value can be put
+ * cable. By reading the AGC registers, which represent the
+ * combination of coarse and fine gain value, the value can be put
* into a lookup table to obtain the approximate cable length
* for each channel.
**/
@@ -1392,14 +1365,13 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
/* Read the AGC registers for all channels */
for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
- ret_val = hw->phy.ops.read_phy_reg(hw, agc_reg_array[i],
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
if (ret_val)
goto out;
/*
* Getting bits 15:9, which represent the combination of
- * course and fine gain values. The result is a number
+ * coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
@@ -1456,7 +1428,7 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- if (hw->phy.media_type != e1000_media_type_copper) {
+ if (phy->media_type != e1000_media_type_copper) {
hw_dbg("Phy info is only valid for copper media\n");
ret_val = -E1000_ERR_CONFIG;
goto out;
@@ -1472,33 +1444,29 @@ s32 igb_get_phy_info_m88(struct e1000_hw *hw)
goto out;
}
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
- ? true
- : false;
+ ? true : false;
ret_val = igb_check_polarity_m88(hw);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
if (ret_val)
goto out;
phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = hw->phy.ops.get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
- &phy_data);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
goto out;
@@ -1552,8 +1520,7 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
- &data);
+ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val)
goto out;
@@ -1561,12 +1528,11 @@ s32 igb_get_phy_info_igp(struct e1000_hw *hw)
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = hw->phy.ops.get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
goto out;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
- &data);
+ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
if (ret_val)
goto out;
@@ -1599,12 +1565,12 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- ret_val = hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
if (ret_val)
goto out;
phy_ctrl |= MII_CR_RESET;
- ret_val = hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+ ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
if (ret_val)
goto out;
@@ -1635,7 +1601,7 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
goto out;
}
- ret_val = igb_acquire_phy(hw);
+ ret_val = phy->ops.acquire(hw);
if (ret_val)
goto out;
@@ -1650,74 +1616,14 @@ s32 igb_phy_hw_reset(struct e1000_hw *hw)
udelay(150);
- igb_release_phy(hw);
+ phy->ops.release(hw);
- ret_val = igb_get_phy_cfg_done(hw);
+ ret_val = phy->ops.get_cfg_done(hw);
out:
return ret_val;
}
-/* Internal function pointers */
-
-/**
- * igb_get_phy_cfg_done - Generic PHY configuration done
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family did not implement a family specific
- * get_cfg_done function.
- **/
-static s32 igb_get_phy_cfg_done(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_cfg_done)
- return hw->phy.ops.get_cfg_done(hw);
-
- return 0;
-}
-
-/**
- * igb_release_phy - Generic release PHY
- * @hw: pointer to the HW structure
- *
- * Return if silicon family does not require a semaphore when accessing the
- * PHY.
- **/
-static void igb_release_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.release_phy)
- hw->phy.ops.release_phy(hw);
-}
-
-/**
- * igb_acquire_phy - Generic acquire PHY
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family does not require a semaphore when
- * accessing the PHY.
- **/
-static s32 igb_acquire_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.acquire_phy)
- return hw->phy.ops.acquire_phy(hw);
-
- return 0;
-}
-
-/**
- * igb_phy_force_speed_duplex - Generic force PHY speed/duplex
- * @hw: pointer to the HW structure
- *
- * When the silicon family has not implemented a forced speed/duplex
- * function for the PHY, simply return 0.
- **/
-s32 igb_phy_force_speed_duplex(struct e1000_hw *hw)
-{
- if (hw->phy.ops.force_speed_duplex)
- return hw->phy.ops.force_speed_duplex(hw);
-
- return 0;
-}
-
/**
* igb_phy_init_script_igp3 - Inits the IGP3 PHY
* @hw: pointer to the HW structure
@@ -1730,75 +1636,75 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
/* PHY init IGP 3 */
/* Enable rise/fall, 10-mode work in class-A */
- hw->phy.ops.write_phy_reg(hw, 0x2F5B, 0x9018);
+ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
/* Remove all caps from Replica path filter */
- hw->phy.ops.write_phy_reg(hw, 0x2F52, 0x0000);
+ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
/* Bias trimming for ADC, AFE and Driver (Default) */
- hw->phy.ops.write_phy_reg(hw, 0x2FB1, 0x8B24);
+ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
/* Increase Hybrid poly bias */
- hw->phy.ops.write_phy_reg(hw, 0x2FB2, 0xF8F0);
+ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
/* Add 4% to TX amplitude in Giga mode */
- hw->phy.ops.write_phy_reg(hw, 0x2010, 0x10B0);
+ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
/* Disable trimming (TTT) */
- hw->phy.ops.write_phy_reg(hw, 0x2011, 0x0000);
+ hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
/* Poly DC correction to 94.6% + 2% for all channels */
- hw->phy.ops.write_phy_reg(hw, 0x20DD, 0x249A);
+ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
/* ABS DC correction to 95.9% */
- hw->phy.ops.write_phy_reg(hw, 0x20DE, 0x00D3);
+ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
/* BG temp curve trim */
- hw->phy.ops.write_phy_reg(hw, 0x28B4, 0x04CE);
+ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
/* Increasing ADC OPAMP stage 1 currents to max */
- hw->phy.ops.write_phy_reg(hw, 0x2F70, 0x29E4);
+ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
/* Force 1000 ( required for enabling PHY regs configuration) */
- hw->phy.ops.write_phy_reg(hw, 0x0000, 0x0140);
+ hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
/* Set upd_freq to 6 */
- hw->phy.ops.write_phy_reg(hw, 0x1F30, 0x1606);
+ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
/* Disable NPDFE */
- hw->phy.ops.write_phy_reg(hw, 0x1F31, 0xB814);
+ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
/* Disable adaptive fixed FFE (Default) */
- hw->phy.ops.write_phy_reg(hw, 0x1F35, 0x002A);
+ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
/* Enable FFE hysteresis */
- hw->phy.ops.write_phy_reg(hw, 0x1F3E, 0x0067);
+ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
/* Fixed FFE for short cable lengths */
- hw->phy.ops.write_phy_reg(hw, 0x1F54, 0x0065);
+ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
/* Fixed FFE for medium cable lengths */
- hw->phy.ops.write_phy_reg(hw, 0x1F55, 0x002A);
+ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
/* Fixed FFE for long cable lengths */
- hw->phy.ops.write_phy_reg(hw, 0x1F56, 0x002A);
+ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
/* Enable Adaptive Clip Threshold */
- hw->phy.ops.write_phy_reg(hw, 0x1F72, 0x3FB0);
+ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
/* AHT reset limit to 1 */
- hw->phy.ops.write_phy_reg(hw, 0x1F76, 0xC0FF);
+ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
/* Set AHT master delay to 127 msec */
- hw->phy.ops.write_phy_reg(hw, 0x1F77, 0x1DEC);
+ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
/* Set scan bits for AHT */
- hw->phy.ops.write_phy_reg(hw, 0x1F78, 0xF9EF);
+ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
/* Set AHT Preset bits */
- hw->phy.ops.write_phy_reg(hw, 0x1F79, 0x0210);
+ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
/* Change integ_factor of channel A to 3 */
- hw->phy.ops.write_phy_reg(hw, 0x1895, 0x0003);
+ hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
/* Change prop_factor of channels BCD to 8 */
- hw->phy.ops.write_phy_reg(hw, 0x1796, 0x0008);
+ hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */
- hw->phy.ops.write_phy_reg(hw, 0x1798, 0xD008);
+ hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
/*
* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A
*/
- hw->phy.ops.write_phy_reg(hw, 0x1898, 0xD918);
+ hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */
- hw->phy.ops.write_phy_reg(hw, 0x187A, 0x0800);
+ hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
/*
* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B
*/
- hw->phy.ops.write_phy_reg(hw, 0x0019, 0x008D);
+ hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
/* Enable restart AN on an1000_dis change */
- hw->phy.ops.write_phy_reg(hw, 0x001B, 0x2080);
+ hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
/* Enable wh_fifo read clock in 10/100 modes */
- hw->phy.ops.write_phy_reg(hw, 0x0014, 0x0045);
+ hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
/* Restart AN, Speed selection is 1000 */
- hw->phy.ops.write_phy_reg(hw, 0x0000, 0x1340);
+ hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
return 0;
}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 8f8fe0a780d..3228a862031 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,7 +44,6 @@ enum e1000_smart_speed {
s32 igb_check_downshift(struct e1000_hw *hw);
s32 igb_check_reset_block(struct e1000_hw *hw);
s32 igb_copper_link_autoneg(struct e1000_hw *hw);
-s32 igb_phy_force_speed_duplex(struct e1000_hw *hw);
s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index bdf5d839c4b..0bd7728fe46 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -73,8 +73,75 @@
#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
-#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n)))
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+
+/* IEEE 1588 TIMESYNCH */
+#define E1000_TSYNCTXCTL 0x0B614
+#define E1000_TSYNCTXCTL_VALID (1<<0)
+#define E1000_TSYNCTXCTL_ENABLED (1<<4)
+#define E1000_TSYNCRXCTL 0x0B620
+#define E1000_TSYNCRXCTL_VALID (1<<0)
+#define E1000_TSYNCRXCTL_ENABLED (1<<4)
+enum {
+ E1000_TSYNCRXCTL_TYPE_L2_V2 = 0,
+ E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1),
+ E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2),
+ E1000_TSYNCRXCTL_TYPE_ALL = (1<<3),
+ E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1),
+};
+#define E1000_TSYNCRXCFG 0x05F50
+enum {
+ E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
+ E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
+ E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
+ E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
+ E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
+
+ E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
+ E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
+ E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
+ E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
+ E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
+ E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
+ E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
+ E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
+ E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
+ E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
+};
+#define E1000_SYSTIML 0x0B600
+#define E1000_SYSTIMH 0x0B604
+#define E1000_TIMINCA 0x0B608
+
+#define E1000_RXMTRL 0x0B634
+#define E1000_RXSTMPL 0x0B624
+#define E1000_RXSTMPH 0x0B628
+#define E1000_RXSATRL 0x0B62C
+#define E1000_RXSATRH 0x0B630
+
+#define E1000_TXSTMPL 0x0B618
+#define E1000_TXSTMPH 0x0B61C
+
+#define E1000_ETQF0 0x05CB0
+#define E1000_ETQF1 0x05CB4
+#define E1000_ETQF2 0x05CB8
+#define E1000_ETQF3 0x05CBC
+#define E1000_ETQF4 0x05CC0
+#define E1000_ETQF5 0x05CC4
+#define E1000_ETQF6 0x05CC8
+#define E1000_ETQF7 0x05CCC
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
+#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
+#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
+#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
+#define E1000_SAQF0 E1000_SAQF(0)
+#define E1000_DAQF0 E1000_DAQF(0)
+#define E1000_SPQF0 E1000_SPQF(0)
+#define E1000_FTQF0 E1000_FTQF(0)
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
/* Split and Replication RX Control - RW */
/*
* Convenience macros
@@ -110,7 +177,6 @@
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
-#define E1000_TARC(_n) (0x03840 + (_n << 8))
#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
@@ -226,16 +292,14 @@
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
(0x054E4 + ((_i - 16) * 8)))
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
-#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
+#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
#define E1000_WUS 0x05810 /* Wakeup Status - RO */
#define E1000_MANC 0x05820 /* Management Control - RW */
#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
-#define E1000_HOST_IF 0x08800 /* Host Interface */
-#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
@@ -243,9 +307,7 @@
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
#define E1000_SWSM 0x05B50 /* SW Semaphore */
#define E1000_FWSM 0x05B54 /* FW Semaphore */
-#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
-#define E1000_HICR 0x08F00 /* Host Inteface Control */
/* RSS registers */
#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -254,18 +316,27 @@
#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
/* MSI-X Allocation Register (_i) - RW */
#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4))
-/* MSI-X Table entry addr low reg 0 - RW */
-#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10))
-/* MSI-X Table entry addr upper reg 0 - RW */
-#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10))
-/* MSI-X Table entry message reg 0 - RW */
-#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10))
-/* MSI-X Table entry vector ctrl reg 0 - RW */
-#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10))
/* Redirection Table - RW Array */
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4))
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+/* VT Registers */
+#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE 0x00C8C /* VF Receive Enables */
+#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
+#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
+#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
+/* These act per VF so an array friendly macro is used */
+#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
+#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
+ * Filter - RW */
+
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
#define wrfl() ((void)rd32(E1000_STATUS))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index aebef8e48e7..4e8464b9df2 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -34,25 +34,15 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-struct igb_adapter;
-
-#ifdef CONFIG_IGB_LRO
-#include <linux/inet_lro.h>
-#define MAX_LRO_AGGR 32
-#define MAX_LRO_DESCRIPTORS 8
-#endif
+#include <linux/clocksource.h>
+#include <linux/timecompare.h>
+#include <linux/net_tstamp.h>
-/* Interrupt defines */
-#define IGB_MIN_DYN_ITR 3000
-#define IGB_MAX_DYN_ITR 96000
+struct igb_adapter;
/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */
#define IGB_START_ITR 648
-#define IGB_DYN_ITR_PACKET_THRESHOLD 2
-#define IGB_DYN_ITR_LENGTH_LOW 200
-#define IGB_DYN_ITR_LENGTH_HIGH 1000
-
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_MIN_TXD 80
@@ -67,8 +57,21 @@ struct igb_adapter;
#define IGB_MIN_ITR_USECS 10
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES 4
-#define IGB_MAX_TX_QUEUES 4
+#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \
+ (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4)
+#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
+#define IGB_ABS_MAX_TX_QUEUES 4
+
+#define IGB_MAX_VF_MC_ENTRIES 30
+#define IGB_MAX_VF_FUNCTIONS 8
+#define IGB_MAX_VFTA_ENTRIES 128
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ bool clear_to_send;
+};
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -94,12 +97,9 @@ struct igb_adapter;
#define IGB_RXBUFFER_512 512
#define IGB_RXBUFFER_1024 1024
#define IGB_RXBUFFER_2048 2048
-#define IGB_RXBUFFER_4096 4096
-#define IGB_RXBUFFER_8192 8192
#define IGB_RXBUFFER_16384 16384
-/* Packet Buffer allocations */
-
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -176,29 +176,18 @@ struct igb_ring {
struct napi_struct napi;
int set_itr;
struct igb_ring *buddy;
-#ifdef CONFIG_IGB_LRO
- struct net_lro_mgr lro_mgr;
- bool lro_used;
-#endif
};
};
char name[IFNAMSIZ + 5];
};
-#define IGB_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
-
#define E1000_RX_DESC_ADV(R, i) \
(&(((union e1000_adv_rx_desc *)((R).desc))[i]))
#define E1000_TX_DESC_ADV(R, i) \
(&(((union e1000_adv_tx_desc *)((R).desc))[i]))
#define E1000_TX_CTXTDESC_ADV(R, i) \
(&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
-#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
-#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
-#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
/* board specific private data structure */
@@ -248,7 +237,6 @@ struct igb_adapter {
u64 hw_csum_err;
u64 hw_csum_good;
- u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
bool rx_csum;
u32 gorc;
@@ -262,6 +250,10 @@ struct igb_adapter {
struct napi_struct napi;
struct pci_dev *pdev;
struct net_device_stats net_stats;
+ struct cyclecounter cycles;
+ struct timecounter clock;
+ struct timecompare compare;
+ struct hwtstamp_config hwtstamp_config;
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
@@ -283,27 +275,17 @@ struct igb_adapter {
unsigned int flags;
u32 eeprom_wol;
- /* for ioport free */
- int bars;
- int need_ioport;
-
- struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
-#ifdef CONFIG_IGB_LRO
- unsigned int lro_max_aggr;
- unsigned int lro_aggregated;
- unsigned int lro_flushed;
- unsigned int lro_no_desc;
-#endif
+ struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
unsigned int tx_ring_count;
unsigned int rx_ring_count;
+ unsigned int vfs_allocated_count;
+ struct vf_data_storage *vf_data;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_MSI_ENABLE (1 << 1)
-#define IGB_FLAG_DCA_ENABLED (1 << 2)
-#define IGB_FLAG_IN_NETPOLL (1 << 3)
-#define IGB_FLAG_QUAD_PORT_A (1 << 4)
-#define IGB_FLAG_NEED_CTX_IDX (1 << 5)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_NEED_CTX_IDX (1 << 3)
enum e1000_state_t {
__IGB_TESTING,
@@ -333,24 +315,24 @@ extern void igb_set_ethtool_ops(struct net_device *);
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
- if (hw->phy.ops.reset_phy)
- return hw->phy.ops.reset_phy(hw);
+ if (hw->phy.ops.reset)
+ return hw->phy.ops.reset(hw);
return 0;
}
static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
{
- if (hw->phy.ops.read_phy_reg)
- return hw->phy.ops.read_phy_reg(hw, offset, data);
+ if (hw->phy.ops.read_reg)
+ return hw->phy.ops.read_reg(hw, offset, data);
return 0;
}
static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
{
- if (hw->phy.ops.write_phy_reg)
- return hw->phy.ops.write_phy_reg(hw, offset, data);
+ if (hw->phy.ops.write_reg)
+ return hw->phy.ops.write_reg(hw, offset, data);
return 0;
}
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472a..fb09c8ad9f0 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -88,16 +88,11 @@ static const struct igb_stats igb_gstrings_stats[] = {
{ "rx_long_byte_count", IGB_STAT(stats.gorc) },
{ "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
{ "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
- { "rx_header_split", IGB_STAT(rx_hdr_split) },
+ { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) },
{ "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
{ "tx_smbus", IGB_STAT(stats.mgptc) },
{ "rx_smbus", IGB_STAT(stats.mgprc) },
{ "dropped_smbus", IGB_STAT(stats.mgpdc) },
-#ifdef CONFIG_IGB_LRO
- { "lro_aggregated", IGB_STAT(lro_aggregated) },
- { "lro_flushed", IGB_STAT(lro_flushed) },
- { "lro_no_desc", IGB_STAT(lro_no_desc) },
-#endif
};
#define IGB_QUEUE_STATS_LEN \
@@ -293,15 +288,15 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
static u32 igb_get_tx_csum(struct net_device *netdev)
{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
+ return (netdev->features & NETIF_F_IP_CSUM) != 0;
}
static int igb_set_tx_csum(struct net_device *netdev, u32 data)
{
if (data)
- netdev->features |= NETIF_F_HW_CSUM;
+ netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
else
- netdev->features &= ~NETIF_F_HW_CSUM;
+ netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
return 0;
}
@@ -310,15 +305,13 @@ static int igb_set_tso(struct net_device *netdev, u32 data)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (data)
+ if (data) {
netdev->features |= NETIF_F_TSO;
- else
- netdev->features &= ~NETIF_F_TSO;
-
- if (data)
netdev->features |= NETIF_F_TSO6;
- else
+ } else {
+ netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
+ }
dev_info(&adapter->pdev->dev, "TSO is %s\n",
data ? "Enabled" : "Disabled");
@@ -405,7 +398,7 @@ static void igb_get_regs(struct net_device *netdev,
regs_buff[34] = rd32(E1000_RLPML);
regs_buff[35] = rd32(E1000_RFCTL);
regs_buff[36] = rd32(E1000_MRQC);
- regs_buff[37] = rd32(E1000_VMD_CTL);
+ regs_buff[37] = rd32(E1000_VT_CTL);
/* Transmit */
regs_buff[38] = rd32(E1000_TCTL);
@@ -598,12 +591,12 @@ static int igb_get_eeprom(struct net_device *netdev,
return -ENOMEM;
if (hw->nvm.type == e1000_nvm_eeprom_spi)
- ret_val = hw->nvm.ops.read_nvm(hw, first_word,
+ ret_val = hw->nvm.ops.read(hw, first_word,
last_word - first_word + 1,
eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
- ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1,
+ ret_val = hw->nvm.ops.read(hw, first_word + i, 1,
&eeprom_buff[i]);
if (ret_val)
break;
@@ -650,14 +643,14 @@ static int igb_set_eeprom(struct net_device *netdev,
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
- ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1,
+ ret_val = hw->nvm.ops.read(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
- ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1,
+ ret_val = hw->nvm.ops.read(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -670,7 +663,7 @@ static int igb_set_eeprom(struct net_device *netdev,
for (i = 0; i < last_word - first_word + 1; i++)
eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
- ret_val = hw->nvm.ops.write_nvm(hw, first_word,
+ ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
/* Update the checksum over the first part of the EEPROM if needed
@@ -694,7 +687,7 @@ static void igb_get_drvinfo(struct net_device *netdev,
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
- adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data);
+ adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
@@ -863,23 +856,26 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_RDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_RDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_RDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- /* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 1, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ /* Enable all RX queues before testing. */
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
- { E1000_TDBAL(4), 0x40, 8, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
- { E1000_TDBAH(4), 0x40, 8, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
- { E1000_TDLEN(4), 0x40, 8, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
+ { E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
@@ -926,12 +922,13 @@ static struct igb_reg_test reg_test_82575[] = {
static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
u32 _test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
- writel((_test[pat] & write), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ wr32(reg, (_test[pat] & write));
+ val = rd32(reg);
if (val != (_test[pat] & write & mask)) {
dev_err(&adapter->pdev->dev, "pattern test reg %04X "
"failed: got 0x%08X expected 0x%08X\n",
@@ -946,9 +943,10 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
+ struct e1000_hw *hw = &adapter->hw;
u32 val;
- writel((write & mask), (adapter->hw.hw_addr + reg));
- val = readl(adapter->hw.hw_addr + reg);
+ wr32(reg, write & mask);
+ val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
" got 0x%08X expected 0x%08X\n", reg,
@@ -1014,12 +1012,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
for (i = 0; i < test->array_len; i++) {
switch (test->test_type) {
case PATTERN_TEST:
- REG_PATTERN_TEST(test->reg + (i * test->reg_offset),
+ REG_PATTERN_TEST(test->reg +
+ (i * test->reg_offset),
test->mask,
test->write);
break;
case SET_READ_TEST:
- REG_SET_AND_CHECK(test->reg + (i * test->reg_offset),
+ REG_SET_AND_CHECK(test->reg +
+ (i * test->reg_offset),
test->mask,
test->write);
break;
@@ -1061,7 +1061,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
/* Read and add up the contents of the EEPROM */
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
- if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp))
+ if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp))
< 0) {
*data = 1;
break;
@@ -1091,16 +1091,17 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 mask, i = 0, shared_int = true;
+ u32 mask, ics_mask, i = 0, shared_int = true;
u32 irq = adapter->pdev->irq;
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (adapter->msix_entries) {
+ if (adapter->msix_entries)
/* NOTE: we don't test MSI-X interrupts here, yet */
return 0;
- } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
+
+ if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
*data = 1;
@@ -1116,16 +1117,31 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
}
dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
(shared_int ? "shared" : "unshared"));
-
/* Disable all the interrupts */
wr32(E1000_IMC, 0xFFFFFFFF);
msleep(10);
+ /* Define all writable bits for ICS */
+ switch(hw->mac.type) {
+ case e1000_82575:
+ ics_mask = 0x37F47EDD;
+ break;
+ case e1000_82576:
+ ics_mask = 0x77D4FBFD;
+ break;
+ default:
+ ics_mask = 0x7FFFFFFF;
+ break;
+ }
+
/* Test each interrupt */
- for (; i < 10; i++) {
+ for (; i < 31; i++) {
/* Interrupt to test */
mask = 1 << i;
+ if (!(mask & ics_mask))
+ continue;
+
if (!shared_int) {
/* Disable the interrupt to be reported in
* the cause register and then force the same
@@ -1134,8 +1150,12 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
- wr32(E1000_IMC, ~mask & 0x00007FFF);
- wr32(E1000_ICS, ~mask & 0x00007FFF);
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, mask);
+ wr32(E1000_ICS, mask);
msleep(10);
if (adapter->test_icr & mask) {
@@ -1151,6 +1171,10 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
msleep(10);
@@ -1168,11 +1192,15 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
* test failed.
*/
adapter->test_icr = 0;
- wr32(E1000_IMC, ~mask & 0x00007FFF);
- wr32(E1000_ICS, ~mask & 0x00007FFF);
+
+ /* Flush any pending interrupts */
+ wr32(E1000_ICR, ~0);
+
+ wr32(E1000_IMC, ~mask);
+ wr32(E1000_ICS, ~mask);
msleep(10);
- if (adapter->test_icr) {
+ if (adapter->test_icr & mask) {
*data = 5;
break;
}
@@ -1180,7 +1208,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
}
/* Disable all the interrupts */
- wr32(E1000_IMC, 0xFFFFFFFF);
+ wr32(E1000_IMC, ~0);
msleep(10);
/* Unhook test interrupt handler */
@@ -1244,6 +1272,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
struct igb_ring *tx_ring = &adapter->test_tx_ring;
struct igb_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
+ struct igb_buffer *buffer_info;
u32 rctl;
int i, ret_val;
@@ -1260,7 +1289,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
goto err_nomem;
}
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
&tx_ring->dma);
@@ -1274,7 +1303,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
((u64) tx_ring->dma & 0x00000000FFFFFFFF));
wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
wr32(E1000_TDLEN(0),
- tx_ring->count * sizeof(struct e1000_tx_desc));
+ tx_ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDH(0), 0);
wr32(E1000_TDT(0), 0);
wr32(E1000_TCTL,
@@ -1283,27 +1312,31 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
for (i = 0; i < tx_ring->count; i++) {
- struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+ union e1000_adv_tx_desc *tx_desc;
struct sk_buff *skb;
unsigned int size = 1024;
+ tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret_val = 3;
goto err_nomem;
}
skb_put(skb, size);
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[i].length = skb->len;
- tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
- tx_desc->lower.data = cpu_to_le32(skb->len);
- tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS |
- E1000_TXD_CMD_RS);
- tx_desc->upper.data = 0;
+ buffer_info = &tx_ring->buffer_info[i];
+ buffer_info->skb = skb;
+ buffer_info->length = skb->len;
+ buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+ tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
+ E1000_ADVTXD_PAYLEN_SHIFT;
+ tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
+ tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
+ E1000_TXD_CMD_IFCS |
+ E1000_TXD_CMD_RS |
+ E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT);
}
/* Setup Rx descriptor ring and Rx buffers */
@@ -1319,7 +1352,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
goto err_nomem;
}
- rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
&rx_ring->dma);
if (!rx_ring->desc) {
@@ -1338,16 +1371,17 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
wr32(E1000_RDH(0), 0);
wr32(E1000_RDT(0), 0);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
- rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_RDMTS_HALF |
+ rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
wr32(E1000_RCTL, rctl);
- wr32(E1000_SRRCTL(0), 0);
+ wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
for (i = 0; i < rx_ring->count; i++) {
- struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
+ union e1000_adv_rx_desc *rx_desc;
struct sk_buff *skb;
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL);
if (!skb) {
@@ -1355,11 +1389,11 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
- rx_ring->buffer_info[i].skb = skb;
- rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048,
- PCI_DMA_FROMDEVICE);
- rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
+ buffer_info->skb = skb;
+ buffer_info->dma = pci_map_single(pdev, skb->data,
+ IGB_RXBUFFER_2048,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
memset(skb->data, 0x00, skb->len);
}
@@ -1458,7 +1492,7 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
E1000_CTRL_TFCE |
E1000_CTRL_LRST);
reg |= E1000_CTRL_SLU |
- E1000_CTRL_FD;
+ E1000_CTRL_FD;
wr32(E1000_CTRL, reg);
/* Unset switch control to serdes energy detect */
@@ -1745,6 +1779,15 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
/* return success for non excluded adapter ports */
retval = 0;
break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ /* quad port adapters only support WoL on port A */
+ if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
+ wol->supported = 0;
+ break;
+ }
+ /* return success for non excluded adapter ports */
+ retval = 0;
+ break;
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
@@ -1827,9 +1870,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-/* toggle LED 4 times per second = 2 "blinks" per second */
-#define IGB_ID_INTERVAL (HZ/4)
-
/* bit defines for adapter->led_status */
#define IGB_LED_ON 0
@@ -1921,18 +1961,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
int j;
int i;
-#ifdef CONFIG_IGB_LRO
- int aggregated = 0, flushed = 0, no_desc = 0;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
- flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
- no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
- }
- adapter->lro_aggregated = aggregated;
- adapter->lro_flushed = flushed;
- adapter->lro_no_desc = no_desc;
-#endif
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 9dd13ad12ce..ca842163dce 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007 Intel Corporation.
+ Copyright(c) 2007-2009 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -34,6 +34,7 @@
#include <linux/ipv6.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <linux/net_tstamp.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
@@ -48,12 +49,12 @@
#endif
#include "igb.h"
-#define DRV_VERSION "1.2.45-k2"
+#define DRV_VERSION "1.3.16-k2"
char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -61,8 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
static struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
@@ -105,7 +108,6 @@ static irqreturn_t igb_intr_msi(int irq, void *);
static irqreturn_t igb_msix_other(int irq, void *);
static irqreturn_t igb_msix_rx(int irq, void *);
static irqreturn_t igb_msix_tx(int irq, void *);
-static int igb_clean_rx_ring_msix(struct napi_struct *, int);
#ifdef CONFIG_IGB_DCA
static void igb_update_rx_dca(struct igb_ring *);
static void igb_update_tx_dca(struct igb_ring *);
@@ -115,9 +117,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
-#ifdef CONFIG_IGB_LRO
-static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
-#endif
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
@@ -125,6 +124,16 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
static void igb_vlan_rx_add_vid(struct net_device *, u16);
static void igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
+static void igb_ping_all_vfs(struct igb_adapter *);
+static void igb_msg_task(struct igb_adapter *);
+static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
+static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
+static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
+static void igb_vmm_control(struct igb_adapter *);
+static inline void igb_set_vmolr(struct e1000_hw *, int);
+static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
+static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_suspend(struct pci_dev *, pm_message_t);
#ifdef CONFIG_PM
@@ -139,12 +148,18 @@ static struct notifier_block dca_notifier = {
.priority = 0
};
#endif
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static void igb_netpoll(struct net_device *);
#endif
+#ifdef CONFIG_PCI_IOV
+static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *,
+ const char *, size_t);
+static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *,
+ char *);
+DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs);
+#endif
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
pci_channel_state_t);
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
@@ -178,6 +193,54 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+/**
+ * Scale the NIC clock cycle by a large factor so that
+ * relatively small clock corrections can be added or
+ * substracted at each clock tick. The drawbacks of a
+ * large factor are a) that the clock register overflows
+ * more quickly (not such a big deal) and b) that the
+ * increment per tick has to fit into 24 bits.
+ *
+ * Note that
+ * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
+ * IGB_TSYNC_SCALE
+ * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
+ *
+ * The base scale factor is intentionally a power of two
+ * so that the division in %struct timecounter can be done with
+ * a shift.
+ */
+#define IGB_TSYNC_SHIFT (19)
+#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
+
+/**
+ * The duration of one clock cycle of the NIC.
+ *
+ * @todo This hard-coded value is part of the specification and might change
+ * in future hardware revisions. Add revision check.
+ */
+#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
+
+#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
+# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
+#endif
+
+/**
+ * igb_read_clock - read raw cycle counter (to be used by time counter)
+ */
+static cycle_t igb_read_clock(const struct cyclecounter *tc)
+{
+ struct igb_adapter *adapter =
+ container_of(tc, struct igb_adapter, cycles);
+ struct e1000_hw *hw = &adapter->hw;
+ u64 stamp;
+
+ stamp = rd32(E1000_SYSTIML);
+ stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
+
+ return stamp;
+}
+
#ifdef DEBUG
/**
* igb_get_hw_dev_name - return device name string
@@ -188,9 +251,44 @@ char *igb_get_hw_dev_name(struct e1000_hw *hw)
struct igb_adapter *adapter = hw->back;
return adapter->netdev->name;
}
+
+/**
+ * igb_get_time_str - format current NIC and system time as string
+ */
+static char *igb_get_time_str(struct igb_adapter *adapter,
+ char buffer[160])
+{
+ cycle_t hw = adapter->cycles.read(&adapter->cycles);
+ struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
+ struct timespec sys;
+ struct timespec delta;
+ getnstimeofday(&sys);
+
+ delta = timespec_sub(nic, sys);
+
+ sprintf(buffer,
+ "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
+ hw,
+ (long)nic.tv_sec, nic.tv_nsec,
+ (long)sys.tv_sec, sys.tv_nsec,
+ (long)delta.tv_sec, delta.tv_nsec);
+
+ return buffer;
+}
#endif
/**
+ * igb_desc_unused - calculate if we have unused descriptors
+ **/
+static int igb_desc_unused(struct igb_ring *ring)
+{
+ if (ring->next_to_clean > ring->next_to_use)
+ return ring->next_to_clean - ring->next_to_use - 1;
+
+ return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
* igb_init_module - Driver Registration Routine
*
* igb_init_module is the first routine called when the driver is
@@ -243,6 +341,7 @@ module_exit(igb_exit_module);
static void igb_cache_ring_register(struct igb_adapter *adapter)
{
int i;
+ unsigned int rbase_offset = adapter->vfs_allocated_count;
switch (adapter->hw.mac.type) {
case e1000_82576:
@@ -252,9 +351,11 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
* and continue consuming queues in the same sequence
*/
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
+ adapter->rx_ring[i].reg_idx = rbase_offset +
+ Q_IDX_82576(i);
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
+ adapter->tx_ring[i].reg_idx = rbase_offset +
+ Q_IDX_82576(i);
break;
case e1000_82575:
default:
@@ -354,7 +455,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
a vector number along with a "valid" bit. Sadly, the layout
of the table is somewhat counterintuitive. */
if (rx_queue > IGB_N0_QUEUE) {
- index = (rx_queue >> 1);
+ index = (rx_queue >> 1) + adapter->vfs_allocated_count;
ivar = array_rd32(E1000_IVAR0, index);
if (rx_queue & 0x1) {
/* vector goes into third byte of register */
@@ -369,7 +470,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
array_wr32(E1000_IVAR0, index, ivar);
}
if (tx_queue > IGB_N0_QUEUE) {
- index = (tx_queue >> 1);
+ index = (tx_queue >> 1) + adapter->vfs_allocated_count;
ivar = array_rd32(E1000_IVAR0, index);
if (tx_queue & 0x1) {
/* vector goes into high byte of register */
@@ -407,7 +508,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
/* Turn on MSI-X capability first, or our settings
* won't stick. And it will take days to debug. */
wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
- E1000_GPIE_PBA | E1000_GPIE_EIAME |
+ E1000_GPIE_PBA | E1000_GPIE_EIAME |
E1000_GPIE_NSICR);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -506,9 +607,6 @@ static int igb_request_msix(struct igb_adapter *adapter)
goto out;
ring->itr_register = E1000_EITR(0) + (vector << 2);
ring->itr_val = adapter->itr;
- /* overwrite the poll routine for MSIX, we've already done
- * netif_napi_add */
- ring->napi.poll = &igb_clean_rx_ring_msix;
vector++;
}
@@ -546,6 +644,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
int err;
int numvecs, i;
+ /* Number of supported queues. */
+ /* Having more queues than CPUs doesn't make sense. */
+ adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
+ adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
+
numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
GFP_KERNEL);
@@ -687,7 +790,10 @@ static void igb_irq_enable(struct igb_adapter *adapter)
wr32(E1000_EIAC, adapter->eims_enable_mask);
wr32(E1000_EIAM, adapter->eims_enable_mask);
wr32(E1000_EIMS, adapter->eims_enable_mask);
- wr32(E1000_IMS, E1000_IMS_LSC);
+ if (adapter->vfs_allocated_count)
+ wr32(E1000_MBVFIMR, 0xFF);
+ wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
+ E1000_IMS_DOUTSYNC));
} else {
wr32(E1000_IMS, IMS_ENABLE_MASK);
wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -778,12 +884,12 @@ static void igb_configure(struct igb_adapter *adapter)
igb_rx_fifo_flush_82575(&adapter->hw);
- /* call IGB_DESC_UNUSED which always leaves
+ /* call igb_desc_unused which always leaves
* at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *ring = &adapter->rx_ring[i];
- igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
+ igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
}
@@ -811,6 +917,10 @@ int igb_up(struct igb_adapter *adapter)
if (adapter->msix_entries)
igb_configure_msix(adapter);
+ igb_vmm_control(adapter);
+ igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
+ igb_set_vmolr(hw, adapter->vfs_allocated_count);
+
/* Clear any pending interrupts. */
rd32(E1000_ICR);
igb_irq_enable(adapter);
@@ -856,6 +966,10 @@ void igb_down(struct igb_adapter *adapter)
netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
+
+ /* record the stats before reset*/
+ igb_update_stats(adapter);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -886,11 +1000,14 @@ void igb_reset(struct igb_adapter *adapter)
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
*/
- if (mac->type != e1000_82576) {
- pba = E1000_PBA_34K;
- }
- else {
+ switch (mac->type) {
+ case e1000_82576:
pba = E1000_PBA_64K;
+ break;
+ case e1000_82575:
+ default:
+ pba = E1000_PBA_34K;
+ break;
}
if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
@@ -912,7 +1029,7 @@ void igb_reset(struct igb_adapter *adapter)
/* the tx fifo also stores 16 bytes of information about the tx
* but don't include ethernet FCS because hardware appends it */
min_tx_space = (adapter->max_frame_size +
- sizeof(struct e1000_tx_desc) -
+ sizeof(union e1000_adv_tx_desc) -
ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10;
@@ -956,6 +1073,20 @@ void igb_reset(struct igb_adapter *adapter)
fc->send_xon = 1;
fc->type = fc->original_type;
+ /* disable receive for all VFs and wait one second */
+ if (adapter->vfs_allocated_count) {
+ int i;
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++)
+ adapter->vf_data[i].clear_to_send = false;
+
+ /* ping all the active vfs to let them know we are going down */
+ igb_ping_all_vfs(adapter);
+
+ /* disable transmits and receives */
+ wr32(E1000_VFRE, 0);
+ wr32(E1000_VFTE, 0);
+ }
+
/* Allow time for pending master requests to run */
adapter->hw.mac.ops.reset_hw(&adapter->hw);
wr32(E1000_WUC, 0);
@@ -972,21 +1103,6 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(&adapter->hw);
}
-/**
- * igb_is_need_ioport - determine if an adapter needs ioport resources or not
- * @pdev: PCI device information struct
- *
- * Returns true if an adapter needs ioport resources
- **/
-static int igb_is_need_ioport(struct pci_dev *pdev)
-{
- switch (pdev->device) {
- /* Currently there are no adapters that need ioport resources */
- default:
- return false;
- }
-}
-
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -1025,21 +1141,12 @@ static int __devinit igb_probe(struct pci_dev *pdev,
struct e1000_hw *hw;
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
- int i, err, pci_using_dac;
+ int err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = IGB_EEPROM_APME;
u32 part_num;
- int bars, need_ioport;
- /* do not allocate ioport bars when not needed */
- need_ioport = igb_is_need_ioport(pdev);
- if (need_ioport) {
- bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
- err = pci_enable_device(pdev);
- } else {
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_enable_device_mem(pdev);
- }
+ err = pci_enable_device_mem(pdev);
if (err)
return err;
@@ -1061,7 +1168,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
}
}
- err = pci_request_selected_regions(pdev, bars, igb_driver_name);
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM),
+ igb_driver_name);
if (err)
goto err_pci_reg;
@@ -1076,7 +1185,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
pci_save_state(pdev);
err = -ENOMEM;
- netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
+ netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
+ IGB_ABS_MAX_TX_QUEUES);
if (!netdev)
goto err_alloc_etherdev;
@@ -1089,15 +1199,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
hw = &adapter->hw;
hw->back = adapter;
adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
- adapter->bars = bars;
- adapter->need_ioport = need_ioport;
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
err = -EIO;
- adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
- if (!adapter->hw.hw_addr)
+ hw->hw_addr = ioremap(mmio_start, mmio_len);
+ if (!hw->hw_addr)
goto err_ioremap;
netdev->netdev_ops = &igb_netdev_ops;
@@ -1125,8 +1233,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* Initialize skew-specific constants */
err = ei->get_invariants(hw);
if (err)
- goto err_hw_init;
+ goto err_sw_init;
+ /* setup the private structure */
err = igb_sw_init(adapter);
if (err)
goto err_sw_init;
@@ -1158,27 +1267,25 @@ static int __devinit igb_probe(struct pci_dev *pdev,
"PHY reset is blocked due to SOL/IDER session.\n");
netdev->features = NETIF_F_SG |
- NETIF_F_HW_CSUM |
+ NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_IPV6_CSUM;
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
-#ifdef CONFIG_IGB_LRO
- netdev->features |= NETIF_F_LRO;
-#endif
+ netdev->features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
netdev->vlan_features |= NETIF_F_SG;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LLTX;
adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
/* before reading the NVM, reset the controller to put the device in a
@@ -1205,25 +1312,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &igb_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
-
- init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &igb_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
+ setup_timer(&adapter->watchdog_timer, &igb_watchdog,
+ (unsigned long) adapter);
+ setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
+ (unsigned long) adapter);
INIT_WORK(&adapter->reset_task, igb_reset_task);
INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
- /* Initialize link & ring properties that are user-changeable */
- adapter->tx_ring->count = 256;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = adapter->tx_ring->count;
- adapter->rx_ring->count = 256;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = adapter->rx_ring->count;
-
+ /* Initialize link properties that are user-changeable */
adapter->fc_autoneg = true;
hw->mac.autoneg = true;
hw->phy.autoneg_advertised = 0x2f;
@@ -1231,7 +1328,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
hw->fc.original_type = e1000_fc_default;
hw->fc.type = e1000_fc_default;
- adapter->itr_setting = 3;
+ adapter->itr_setting = IGB_DEFAULT_ITR;
adapter->itr = IGB_START_ITR;
igb_validate_mdi_setting(hw);
@@ -1242,10 +1339,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
* enable the ACPI Magic Packet filter
*/
- if (hw->bus.func == 0 ||
- hw->device_id == E1000_DEV_ID_82575EB_COPPER)
- hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
- &eeprom_data);
+ if (hw->bus.func == 0)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+ else if (hw->bus.func == 1)
+ hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -1265,6 +1362,16 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ /* if quad port adapter, disable WoL on all but port A */
+ if (global_quad_port_a != 0)
+ adapter->eeprom_wol = 0;
+ else
+ adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+ /* Reset for multiple quad port adapters */
+ if (++global_quad_port_a == 4)
+ global_quad_port_a = 0;
+ break;
}
/* initialize the wol settings based on the eeprom settings */
@@ -1287,17 +1394,82 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_register;
+#ifdef CONFIG_PCI_IOV
+ /* since iov functionality isn't critical to base device function we
+ * can accept failure. If it fails we don't allow iov to be enabled */
+ if (hw->mac.type == e1000_82576) {
+ err = pci_enable_sriov(pdev, 0);
+ if (!err)
+ err = device_create_file(&netdev->dev,
+ &dev_attr_num_vfs);
+ if (err)
+ dev_err(&pdev->dev, "Failed to initialize IOV\n");
+ }
+
+#endif
#ifdef CONFIG_IGB_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&pdev->dev, "DCA enabled\n");
/* Always use CB2 mode, difference is masked
* in the CB driver. */
- wr32(E1000_DCA_CTRL, 2);
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
igb_setup_dca(adapter);
}
#endif
+ /*
+ * Initialize hardware timer: we keep it running just in case
+ * that some program needs it later on.
+ */
+ memset(&adapter->cycles, 0, sizeof(adapter->cycles));
+ adapter->cycles.read = igb_read_clock;
+ adapter->cycles.mask = CLOCKSOURCE_MASK(64);
+ adapter->cycles.mult = 1;
+ adapter->cycles.shift = IGB_TSYNC_SHIFT;
+ wr32(E1000_TIMINCA,
+ (1<<24) |
+ IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
+#if 0
+ /*
+ * Avoid rollover while we initialize by resetting the time counter.
+ */
+ wr32(E1000_SYSTIML, 0x00000000);
+ wr32(E1000_SYSTIMH, 0x00000000);
+#else
+ /*
+ * Set registers so that rollover occurs soon to test this.
+ */
+ wr32(E1000_SYSTIML, 0x00000000);
+ wr32(E1000_SYSTIMH, 0xFF800000);
+#endif
+ wrfl();
+ timecounter_init(&adapter->clock,
+ &adapter->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /*
+ * Synchronize our NIC clock against system wall clock. NIC
+ * time stamp reading requires ~3us per sample, each sample
+ * was pretty stable even under load => only require 10
+ * samples for each offset comparison.
+ */
+ memset(&adapter->compare, 0, sizeof(adapter->compare));
+ adapter->compare.source = &adapter->clock;
+ adapter->compare.target = ktime_get_real;
+ adapter->compare.num_samples = 10;
+ timecompare_update(&adapter->compare, 0);
+
+#ifdef DEBUG
+ {
+ char buffer[160];
+ printk(KERN_DEBUG
+ "igb: %s: hw %p initialized timer\n",
+ igb_get_time_str(adapter, buffer),
+ &adapter->hw);
+ }
+#endif
+
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -1330,15 +1502,14 @@ err_eeprom:
if (hw->flash_address)
iounmap(hw->flash_address);
- igb_remove_device(hw);
igb_free_queues(adapter);
err_sw_init:
-err_hw_init:
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev, bars);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -1358,9 +1529,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_IGB_DCA
struct e1000_hw *hw = &adapter->hw;
-#endif
int err;
/* flush_scheduled work may reschedule our watchdog task, so
@@ -1376,7 +1545,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
dev_info(&pdev->dev, "DCA disabled\n");
dca_remove_requester(&pdev->dev);
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- wr32(E1000_DCA_CTRL, 1);
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
}
#endif
@@ -1389,15 +1558,29 @@ static void __devexit igb_remove(struct pci_dev *pdev)
if (!igb_check_reset_block(&adapter->hw))
igb_reset_phy(&adapter->hw);
- igb_remove_device(&adapter->hw);
igb_reset_interrupt_capability(adapter);
igb_free_queues(adapter);
- iounmap(adapter->hw.hw_addr);
- if (adapter->hw.flash_address)
- iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev, adapter->bars);
+#ifdef CONFIG_PCI_IOV
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(pdev);
+ msleep(500);
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+ }
+#endif
+ iounmap(hw->hw_addr);
+ if (hw->flash_address)
+ iounmap(hw->flash_address);
+ pci_release_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM));
free_netdev(netdev);
@@ -1432,11 +1615,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
- /* Number of supported queues. */
- /* Having more queues than CPUs doesn't make sense. */
- adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
- adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
-
/* This call may decrease the number of queues depending on
* interrupt mode. */
igb_set_interrupt_capability(adapter);
@@ -1499,6 +1677,10 @@ static int igb_open(struct net_device *netdev)
* clean_rx handler before we do so. */
igb_configure(adapter);
+ igb_vmm_control(adapter);
+ igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
+ igb_set_vmolr(hw, adapter->vfs_allocated_count);
+
err = igb_request_irq(adapter);
if (err)
goto err_req_irq;
@@ -1574,7 +1756,6 @@ static int igb_close(struct net_device *netdev)
*
* Return 0 on success, negative on failure
**/
-
int igb_setup_tx_resources(struct igb_adapter *adapter,
struct igb_ring *tx_ring)
{
@@ -1588,7 +1769,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
memset(tx_ring->buffer_info, 0, size);
/* round up to nearest 4K */
- tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -1635,7 +1816,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
r_idx = i % adapter->num_tx_queues;
adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
- }
+ }
return err;
}
@@ -1654,13 +1835,13 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int i, j;
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *ring = &(adapter->tx_ring[i]);
+ struct igb_ring *ring = &adapter->tx_ring[i];
j = ring->reg_idx;
wr32(E1000_TDLEN(j),
- ring->count * sizeof(struct e1000_tx_desc));
+ ring->count * sizeof(union e1000_adv_tx_desc));
tdba = ring->dma;
wr32(E1000_TDBAL(j),
- tdba & 0x00000000ffffffffULL);
+ tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(j), tdba >> 32);
ring->head = E1000_TDH(j);
@@ -1680,12 +1861,11 @@ static void igb_configure_tx(struct igb_adapter *adapter)
wr32(E1000_DCA_TXCTRL(j), txctrl);
}
-
-
- /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
+ /* disable queue 0 to prevent tail bump w/o re-configuration */
+ if (adapter->vfs_allocated_count)
+ wr32(E1000_TXDCTL(0), 0);
/* Program the Transmit Control Register */
-
tctl = rd32(E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
@@ -1709,21 +1889,12 @@ static void igb_configure_tx(struct igb_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
int igb_setup_rx_resources(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
struct pci_dev *pdev = adapter->pdev;
int size, desc_len;
-#ifdef CONFIG_IGB_LRO
- size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
- rx_ring->lro_mgr.lro_arr = vmalloc(size);
- if (!rx_ring->lro_mgr.lro_arr)
- goto err;
- memset(rx_ring->lro_mgr.lro_arr, 0, size);
-#endif
-
size = sizeof(struct igb_buffer) * rx_ring->count;
rx_ring->buffer_info = vmalloc(size);
if (!rx_ring->buffer_info)
@@ -1750,10 +1921,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
return 0;
err:
-#ifdef CONFIG_IGB_LRO
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
-#endif
vfree(rx_ring->buffer_info);
dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
"the receive descriptor ring\n");
@@ -1802,13 +1969,13 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/*
* enable stripping of CRC. It's unlikely this will break BMC
* redirection as it did with e1000. Newer features require
* that the HW strips the CRC.
- */
+ */
rctl |= E1000_RCTL_SECRC;
/*
@@ -1852,6 +2019,30 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ /* Attention!!! For SR-IOV PF driver operations you must enable
+ * queue drop for all VF and PF queues to prevent head of line blocking
+ * if an un-trusted VF does not provide descriptors to hardware.
+ */
+ if (adapter->vfs_allocated_count) {
+ u32 vmolr;
+
+ j = adapter->rx_ring[0].reg_idx;
+
+ /* set all queue drop enable bits */
+ wr32(E1000_QDE, ALL_QUEUES);
+ srrctl |= E1000_SRRCTL_DROP_EN;
+
+ /* disable queue 0 to prevent tail write w/o re-config */
+ wr32(E1000_RXDCTL(0), 0);
+
+ vmolr = rd32(E1000_VMOLR(j));
+ if (rctl & E1000_RCTL_LPE)
+ vmolr |= E1000_VMOLR_LPE;
+ if (adapter->num_rx_queues > 0)
+ vmolr |= E1000_VMOLR_RSSE;
+ wr32(E1000_VMOLR(j), vmolr);
+ }
+
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i].reg_idx;
wr32(E1000_SRRCTL(j), srrctl);
@@ -1861,6 +2052,54 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
}
/**
+ * igb_rlpml_set - set maximum receive packet size
+ * @adapter: board private structure
+ *
+ * Configure maximum receivable packet size.
+ **/
+static void igb_rlpml_set(struct igb_adapter *adapter)
+{
+ u32 max_frame_size = adapter->max_frame_size;
+ struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
+
+ if (adapter->vlgrp)
+ max_frame_size += VLAN_TAG_SIZE;
+
+ /* if vfs are enabled we set RLPML to the largest possible request
+ * size and set the VMOLR RLPML to the size we need */
+ if (pf_id) {
+ igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
+ max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
+ }
+
+ wr32(E1000_RLPML, max_frame_size);
+}
+
+/**
+ * igb_configure_vt_default_pool - Configure VT default pool
+ * @adapter: board private structure
+ *
+ * Configure the default pool
+ **/
+static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 pf_id = adapter->vfs_allocated_count;
+ u32 vtctl;
+
+ /* not in sr-iov mode - do nothing */
+ if (!pf_id)
+ return;
+
+ vtctl = rd32(E1000_VT_CTL);
+ vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
+ E1000_VT_CTL_DISABLE_DEF_POOL);
+ vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
+ wr32(E1000_VT_CTL, vtctl);
+}
+
+/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
*
@@ -1872,7 +2111,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rxcsum;
u32 rxdctl;
- int i, j;
+ int i;
/* disable receives while setting up the descriptors */
rctl = rd32(E1000_RCTL);
@@ -1886,14 +2125,14 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = &(adapter->rx_ring[i]);
- j = ring->reg_idx;
+ struct igb_ring *ring = &adapter->rx_ring[i];
+ int j = ring->reg_idx;
rdba = ring->dma;
wr32(E1000_RDBAL(j),
- rdba & 0x00000000ffffffffULL);
+ rdba & 0x00000000ffffffffULL);
wr32(E1000_RDBAH(j), rdba >> 32);
wr32(E1000_RDLEN(j),
- ring->count * sizeof(union e1000_adv_rx_desc));
+ ring->count * sizeof(union e1000_adv_rx_desc));
ring->head = E1000_RDH(j);
ring->tail = E1000_RDT(j);
@@ -1907,16 +2146,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
wr32(E1000_RXDCTL(j), rxdctl);
-#ifdef CONFIG_IGB_LRO
- /* Intitial LRO Settings */
- ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
- ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
- ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
- ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
- ring->lro_mgr.dev = adapter->netdev;
- ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
- ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-#endif
}
if (adapter->num_rx_queues > 1) {
@@ -1941,7 +2170,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
writel(reta.dword,
hw->hw_addr + E1000_RETA(0) + (j & ~3));
}
- mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+ if (adapter->vfs_allocated_count)
+ mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
+ else
+ mrqc = E1000_MRQC_ENABLE_RSS_4Q;
/* Fill out hash function seeds */
for (j = 0; j < 10; j++)
@@ -1966,27 +2198,23 @@ static void igb_configure_rx(struct igb_adapter *adapter)
rxcsum |= E1000_RXCSUM_PCSD;
wr32(E1000_RXCSUM, rxcsum);
} else {
+ /* Enable multi-queue for sr-iov */
+ if (adapter->vfs_allocated_count)
+ wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = rd32(E1000_RXCSUM);
- if (adapter->rx_csum) {
- rxcsum |= E1000_RXCSUM_TUOFL;
+ if (adapter->rx_csum)
+ rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
+ else
+ rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
- /* Enable IPv4 payload checksum for UDP fragments
- * Must be used in conjunction with packet-split. */
- if (adapter->rx_ps_hdr_size)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
- rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* don't need to clear IPPCSE as it defaults to 0 */
- }
wr32(E1000_RXCSUM, rxcsum);
}
- if (adapter->vlgrp)
- wr32(E1000_RLPML,
- adapter->max_frame_size + VLAN_TAG_SIZE);
- else
- wr32(E1000_RLPML, adapter->max_frame_size);
+ /* Set the default pool for the PF's first queue */
+ igb_configure_vt_default_pool(adapter);
+
+ igb_rlpml_set(adapter);
/* Enable Receives */
wr32(E1000_RCTL, rctl);
@@ -2029,14 +2257,10 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
struct igb_buffer *buffer_info)
{
- if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev,
- buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
- buffer_info->dma = 0;
- }
+ buffer_info->dma = 0;
if (buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
@@ -2105,11 +2329,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
-#ifdef CONFIG_IGB_LRO
- vfree(rx_ring->lro_mgr.lro_arr);
- rx_ring->lro_mgr.lro_arr = NULL;
-#endif
-
pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
@@ -2209,15 +2428,18 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
static int igb_set_mac(struct net_device *netdev, void *p)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
- adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
return 0;
}
@@ -2237,7 +2459,7 @@ static void igb_set_multi(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct dev_mc_list *mc_ptr;
- u8 *mta_list;
+ u8 *mta_list = NULL;
u32 rctl;
int i;
@@ -2258,17 +2480,15 @@ static void igb_set_multi(struct net_device *netdev)
}
wr32(E1000_RCTL, rctl);
- if (!netdev->mc_count) {
- /* nothing to program, so clear mc list */
- igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
- mac->rar_entry_count);
- return;
+ if (netdev->mc_count) {
+ mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!mta_list) {
+ dev_err(&adapter->pdev->dev,
+ "failed to allocate multicast filter list\n");
+ return;
+ }
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
/* The shared function expects a packed array of only addresses. */
mc_ptr = netdev->mc_list;
@@ -2278,8 +2498,13 @@ static void igb_set_multi(struct net_device *netdev)
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
mc_ptr = mc_ptr->next;
}
- igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
- mac->rar_entry_count);
+ igb_update_mc_addr_list(hw, mta_list, i,
+ adapter->vfs_allocated_count + 1,
+ mac->rar_entry_count);
+
+ igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
+ igb_restore_vf_multicasts(adapter);
+
kfree(mta_list);
}
@@ -2292,6 +2517,46 @@ static void igb_update_phy_info(unsigned long data)
}
/**
+ * igb_has_link - check shared code for link and determine up/down
+ * @adapter: pointer to driver private info
+ **/
+static bool igb_has_link(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ bool link_active = false;
+ s32 ret_val = 0;
+
+ /* get_link_status is set on LSC (link status) interrupt or
+ * rx sequence error interrupt. get_link_status will stay
+ * false until the e1000_check_for_link establishes link
+ * for copper adapters ONLY
+ */
+ switch (hw->phy.media_type) {
+ case e1000_media_type_copper:
+ if (hw->mac.get_link_status) {
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
+ } else {
+ link_active = true;
+ }
+ break;
+ case e1000_media_type_fiber:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
+ break;
+ case e1000_media_type_internal_serdes:
+ ret_val = hw->mac.ops.check_for_link(hw);
+ link_active = hw->mac.serdes_has_link;
+ break;
+ default:
+ case e1000_media_type_unknown:
+ break;
+ }
+
+ return link_active;
+}
+
+/**
* igb_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
@@ -2307,34 +2572,16 @@ static void igb_watchdog_task(struct work_struct *work)
struct igb_adapter *adapter = container_of(work,
struct igb_adapter, watchdog_task);
struct e1000_hw *hw = &adapter->hw;
-
struct net_device *netdev = adapter->netdev;
struct igb_ring *tx_ring = adapter->tx_ring;
- struct e1000_mac_info *mac = &adapter->hw.mac;
u32 link;
u32 eics = 0;
- s32 ret_val;
int i;
- if ((netif_carrier_ok(netdev)) &&
- (rd32(E1000_STATUS) & E1000_STATUS_LU))
+ link = igb_has_link(adapter);
+ if ((netif_carrier_ok(netdev)) && link)
goto link_up;
- ret_val = hw->mac.ops.check_for_link(&adapter->hw);
- if ((ret_val == E1000_ERR_PHY) &&
- (hw->phy.type == e1000_phy_igp_3) &&
- (rd32(E1000_CTRL) &
- E1000_PHY_CTRL_GBE_DISABLE))
- dev_info(&adapter->pdev->dev,
- "Gigabit has been disabled, downgrading speed\n");
-
- if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
- !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
- link = mac->serdes_has_link;
- else
- link = rd32(E1000_STATUS) &
- E1000_STATUS_LU;
-
if (link) {
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
@@ -2373,6 +2620,9 @@ static void igb_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
+ igb_ping_all_vfs(adapter);
+
+ /* link state has changed, schedule phy info update */
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -2386,6 +2636,10 @@ static void igb_watchdog_task(struct work_struct *work)
netdev->name);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+
+ igb_ping_all_vfs(adapter);
+
+ /* link state has changed, schedule phy info update */
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
@@ -2395,9 +2649,9 @@ static void igb_watchdog_task(struct work_struct *work)
link_up:
igb_update_stats(adapter);
- mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+ hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
- mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+ hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
adapter->colc_old = adapter->stats.colc;
adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
@@ -2408,7 +2662,7 @@ link_up:
igb_update_adaptive(&adapter->hw);
if (!netif_carrier_ok(netdev)) {
- if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
+ if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
@@ -2554,7 +2808,7 @@ static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
if (bytes > 25000) {
if (packets > 35)
retval = low_latency;
- } else if (bytes < 6000) {
+ } else if (bytes < 1500) {
retval = low_latency;
}
break;
@@ -2586,15 +2840,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
adapter->tx_itr,
adapter->tx_ring->total_packets,
adapter->tx_ring->total_bytes);
-
current_itr = max(adapter->rx_itr, adapter->tx_itr);
} else {
current_itr = adapter->rx_itr;
}
/* conservative mode (itr 3) eliminates the lowest_latency setting */
- if (adapter->itr_setting == 3 &&
- current_itr == lowest_latency)
+ if (adapter->itr_setting == 3 && current_itr == lowest_latency)
current_itr = low_latency;
switch (current_itr) {
@@ -2646,6 +2898,7 @@ set_itr_now:
#define IGB_TX_FLAGS_VLAN 0x00000002
#define IGB_TX_FLAGS_TSO 0x00000004
#define IGB_TX_FLAGS_IPV4 0x00000008
+#define IGB_TX_FLAGS_TSTAMP 0x00000010
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
@@ -2711,7 +2964,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
- /* Context index must be unique per ring. */
+ /* For 82575, context index must be unique per ring. */
if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
mss_l4len_idx |= tx_ring->queue_index << 4;
@@ -2756,13 +3009,24 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ __be16 protocol;
+
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+ const struct vlan_ethhdr *vhdr =
+ (const struct vlan_ethhdr*)skb->data;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ } else {
+ protocol = skb->protocol;
+ }
+
+ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
/* XXX what about other V6 headers?? */
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
@@ -2781,6 +3045,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
context_desc->mss_l4len_idx =
cpu_to_le32(tx_ring->queue_index << 4);
+ else
+ context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
@@ -2793,8 +3059,6 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
return true;
}
-
-
return false;
}
@@ -2809,25 +3073,33 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
unsigned int len = skb_headlen(skb);
unsigned int count = 0, i;
unsigned int f;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+
buffer_info = &tx_ring->buffer_info[i];
BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = map[count];
count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
struct skb_frag_struct *frag;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
@@ -2836,19 +3108,10 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(adapter->pdev,
- frag->page,
- frag->page_offset,
- len,
- PCI_DMA_TODEVICE);
-
+ buffer_info->dma = map[count];
count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
}
- i = ((i == 0) ? tx_ring->count - 1 : i - 1);
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[first].next_to_watch = i;
@@ -2871,6 +3134,9 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
if (tx_flags & IGB_TX_FLAGS_VLAN)
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+ if (tx_flags & IGB_TX_FLAGS_TSTAMP)
+ cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+
if (tx_flags & IGB_TX_FLAGS_TSO) {
cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
@@ -2933,7 +3199,7 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
/* We need to check again in a case another CPU has just
* made room available. */
- if (IGB_DESC_UNUSED(tx_ring) < size)
+ if (igb_desc_unused(tx_ring) < size)
return -EBUSY;
/* A reprieve! */
@@ -2945,13 +3211,11 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
static int igb_maybe_stop_tx(struct net_device *netdev,
struct igb_ring *tx_ring, int size)
{
- if (IGB_DESC_UNUSED(tx_ring) >= size)
+ if (igb_desc_unused(tx_ring) >= size)
return 0;
return __igb_maybe_stop_tx(netdev, tx_ring, size);
}
-#define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
-
static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct net_device *netdev,
struct igb_ring *tx_ring)
@@ -2959,11 +3223,10 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_adapter *adapter = netdev_priv(netdev);
unsigned int first;
unsigned int tx_flags = 0;
- unsigned int len;
u8 hdr_len = 0;
+ int count = 0;
int tso = 0;
-
- len = skb_headlen(skb);
+ union skb_shared_tx *shtx;
if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb);
@@ -2984,7 +3247,21 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
/* this is a hard error */
return NETDEV_TX_BUSY;
}
- skb_orphan(skb);
+
+ /*
+ * TODO: check that there currently is no other packet with
+ * time stamping in the queue
+ *
+ * When doing time stamping, keep the connection to the socket
+ * a while longer: it is still needed by skb_hwtstamp_tx(),
+ * called either in igb_tx_hwtstamp() or by our caller when
+ * doing software time stamping.
+ */
+ shtx = skb_tx(skb);
+ if (unlikely(shtx->hardware)) {
+ shtx->in_progress = 1;
+ tx_flags |= IGB_TX_FLAGS_TSTAMP;
+ }
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -2995,7 +3272,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
tx_flags |= IGB_TX_FLAGS_IPV4;
first = tx_ring->next_to_use;
-
tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
&hdr_len) : 0;
@@ -3006,18 +3282,27 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
if (tso)
tx_flags |= IGB_TX_FLAGS_TSO;
- else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- tx_flags |= IGB_TX_FLAGS_CSUM;
+ else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IGB_TX_FLAGS_CSUM;
- igb_tx_queue_adv(adapter, tx_ring, tx_flags,
- igb_tx_map_adv(adapter, tx_ring, skb, first),
- skb->len, hdr_len);
-
- netdev->trans_start = jiffies;
-
- /* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+ /*
+ * count reflects descriptors mapped, if 0 then mapping error
+ * has occured and we need to rewind the descriptor queue
+ */
+ count = igb_tx_map_adv(adapter, tx_ring, skb, first);
+
+ if (count) {
+ igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
+ skb->len, hdr_len);
+ netdev->trans_start = jiffies;
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
return NETDEV_TX_OK;
}
@@ -3028,7 +3313,7 @@ static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
struct igb_ring *tx_ring;
int r_idx = 0;
- r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
+ r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
tx_ring = adapter->multi_tx_table[r_idx];
/* This goes back to the question of how to logically map a tx queue
@@ -3050,8 +3335,8 @@ static void igb_tx_timeout(struct net_device *netdev)
/* Do the reset outside of interrupt context */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
- wr32(E1000_EICS, adapter->eims_enable_mask &
- ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
+ wr32(E1000_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
}
static void igb_reset_task(struct work_struct *work)
@@ -3069,8 +3354,7 @@ static void igb_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *
-igb_get_stats(struct net_device *netdev)
+static struct net_device_stats *igb_get_stats(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3096,7 +3380,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
-#define MAX_STD_JUMBO_FRAME_SIZE 9234
if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
return -EINVAL;
@@ -3104,6 +3387,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
+
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
if (netif_running(netdev))
@@ -3129,6 +3413,12 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
#else
adapter->rx_buffer_len = PAGE_SIZE / 2;
#endif
+
+ /* if sr-iov is enabled we need to force buffer size to 1K or larger */
+ if (adapter->vfs_allocated_count &&
+ (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
+ adapter->rx_buffer_len = IGB_RXBUFFER_1024;
+
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
@@ -3273,8 +3563,7 @@ void igb_update_stats(struct igb_adapter *adapter)
/* Phy Stats */
if (hw->phy.media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
- (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
- &phy_tmp))) {
+ (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
adapter->phy_stats.idle_errors += phy_tmp;
}
@@ -3286,7 +3575,6 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.mgpdc += rd32(E1000_MGTPDC);
}
-
static irqreturn_t igb_msix_other(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3295,15 +3583,24 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
- if (!(icr & E1000_ICR_LSC))
- goto no_link_interrupt;
- hw->mac.get_link_status = 1;
- /* guard against interrupt when we're going down */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
-
-no_link_interrupt:
- wr32(E1000_IMS, E1000_IMS_LSC);
+
+ if(icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
+ /* Check for a mailbox event */
+ if (icr & E1000_ICR_VMMB)
+ igb_msg_task(adapter);
+
+ if (icr & E1000_ICR_LSC) {
+ hw->mac.get_link_status = 1;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__IGB_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
+
+ wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
wr32(E1000_EIMS, adapter->eims_other);
return IRQ_HANDLED;
@@ -3319,6 +3616,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_tx_dca(tx_ring);
#endif
+
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
@@ -3339,13 +3637,11 @@ static void igb_write_itr(struct igb_ring *ring)
if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
switch (hw->mac.type) {
case e1000_82576:
- wr32(ring->itr_register,
- ring->itr_val |
+ wr32(ring->itr_register, ring->itr_val |
0x80000000);
break;
default:
- wr32(ring->itr_register,
- ring->itr_val |
+ wr32(ring->itr_register, ring->itr_val |
(ring->itr_val << 16));
break;
}
@@ -3363,8 +3659,8 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
igb_write_itr(rx_ring);
- if (netif_rx_schedule_prep(&rx_ring->napi))
- __netif_rx_schedule(&rx_ring->napi);
+ if (napi_schedule_prep(&rx_ring->napi))
+ __napi_schedule(&rx_ring->napi);
#ifdef CONFIG_IGB_DCA
if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -3386,11 +3682,11 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
if (hw->mac.type == e1000_82576) {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca_get_tag(cpu) <<
+ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_RXCTRL_CPUID_SHIFT;
} else {
dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca_get_tag(cpu);
+ dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
}
dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
@@ -3413,11 +3709,11 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
if (hw->mac.type == e1000_82576) {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca_get_tag(cpu) <<
+ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
E1000_DCA_TXCTRL_CPUID_SHIFT;
} else {
dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca_get_tag(cpu);
+ dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
}
dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
@@ -3457,7 +3753,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
break;
/* Always use CB2 mode, difference is masked
* in the CB driver. */
- wr32(E1000_DCA_CTRL, 2);
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
if (dca_add_requester(dev) == 0) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
dev_info(&adapter->pdev->dev, "DCA enabled\n");
@@ -3472,7 +3768,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
dca_remove_requester(dev);
dev_info(&adapter->pdev->dev, "DCA disabled\n");
adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
- wr32(E1000_DCA_CTRL, 1);
+ wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
}
break;
}
@@ -3492,6 +3788,322 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
}
#endif /* CONFIG_IGB_DCA */
+static void igb_ping_all_vfs(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
+ ping = E1000_PF_CONTROL_MSG;
+ if (adapter->vf_data[i].clear_to_send)
+ ping |= E1000_VT_MSGTYPE_CTS;
+ igb_write_mbx(hw, &ping, 1, i);
+ }
+}
+
+static int igb_set_vf_multicasts(struct igb_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
+ struct vf_data_storage *vf_data = &adapter->vf_data[vf];
+ int i;
+
+ /* only up to 30 hash values supported */
+ if (n > 30)
+ n = 30;
+
+ /* salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vf_data->num_vf_mc_hashes = n;
+
+ /* VFs are limited to using the MTA hash table for their multicast
+ * addresses */
+ for (i = 0; i < n; i++)
+ vf_data->vf_mc_hashes[i] = hash_list[i];;
+
+ /* Flush and reset the mta with the new values */
+ igb_set_multi(adapter->netdev);
+
+ return 0;
+}
+
+static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct vf_data_storage *vf_data;
+ int i, j;
+
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ vf_data = &adapter->vf_data[i];
+ for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
+ igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
+ }
+}
+
+static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 pool_mask, reg, vid;
+ int i;
+
+ pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+
+ /* remove the vf from the pool */
+ reg &= ~pool_mask;
+
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
+ (reg & E1000_VLVF_VLANID_ENABLE)) {
+ reg = 0;
+ vid = reg & E1000_VLVF_VLANID_MASK;
+ igb_vfta_set(hw, vid, false);
+ }
+
+ wr32(E1000_VLVF(i), reg);
+ }
+}
+
+static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg, i;
+
+ /* It is an error to call this function when VFs are not enabled */
+ if (!adapter->vfs_allocated_count)
+ return -1;
+
+ /* Find the vlan filter for this id */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+ if ((reg & E1000_VLVF_VLANID_ENABLE) &&
+ vid == (reg & E1000_VLVF_VLANID_MASK))
+ break;
+ }
+
+ if (add) {
+ if (i == E1000_VLVF_ARRAY_SIZE) {
+ /* Did not find a matching VLAN ID entry that was
+ * enabled. Search for a free filter entry, i.e.
+ * one without the enable bit set
+ */
+ for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
+ reg = rd32(E1000_VLVF(i));
+ if (!(reg & E1000_VLVF_VLANID_ENABLE))
+ break;
+ }
+ }
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* Found an enabled/available entry */
+ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+
+ /* if !enabled we need to set this up in vfta */
+ if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
+ /* add VID to filter table, if bit already set
+ * PF must have added it outside of table */
+ if (igb_vfta_set(hw, vid, true))
+ reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
+ reg |= E1000_VLVF_VLANID_ENABLE;
+ }
+ reg &= ~E1000_VLVF_VLANID_MASK;
+ reg |= vid;
+
+ wr32(E1000_VLVF(i), reg);
+ return 0;
+ }
+ } else {
+ if (i < E1000_VLVF_ARRAY_SIZE) {
+ /* remove vf from the pool */
+ reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
+ /* if pool is empty then remove entry from vfta */
+ if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
+ reg = 0;
+ igb_vfta_set(hw, vid, false);
+ }
+ wr32(E1000_VLVF(i), reg);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+ int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
+
+ return igb_vlvf_set(adapter, vid, add, vf);
+}
+
+static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* disable mailbox functionality for vf */
+ adapter->vf_data[vf].clear_to_send = false;
+
+ /* reset offloads to defaults */
+ igb_set_vmolr(hw, vf);
+
+ /* reset vlans for device */
+ igb_clear_vf_vfta(adapter, vf);
+
+ /* reset multicast table array for vf */
+ adapter->vf_data[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ igb_set_multi(adapter->netdev);
+}
+
+static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
+ u32 reg, msgbuf[3];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* process all the same items cleared in a function level reset */
+ igb_vf_reset_event(adapter, vf);
+
+ /* set vf mac address */
+ igb_rar_set(hw, vf_mac, vf + 1);
+ igb_set_rah_pool(hw, vf, vf + 1);
+
+ /* enable transmit and receive for vf */
+ reg = rd32(E1000_VFTE);
+ wr32(E1000_VFTE, reg | (1 << vf));
+ reg = rd32(E1000_VFRE);
+ wr32(E1000_VFRE, reg | (1 << vf));
+
+ /* enable mailbox functionality for vf */
+ adapter->vf_data[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, 6);
+ igb_write_mbx(hw, msgbuf, 3, vf);
+}
+
+static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
+{
+ unsigned char *addr = (char *)&msg[1];
+ int err = -1;
+
+ if (is_valid_ether_addr(addr))
+ err = igb_set_vf_mac(adapter, vf, addr);
+
+ return err;
+
+}
+
+static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 msg = E1000_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vf_data[vf].clear_to_send)
+ igb_write_mbx(hw, &msg, 1, vf);
+}
+
+
+static void igb_msg_task(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
+ /* process any reset requests */
+ if (!igb_check_for_rst(hw, vf)) {
+ adapter->vf_data[vf].clear_to_send = false;
+ igb_vf_reset_event(adapter, vf);
+ }
+
+ /* process any messages pending */
+ if (!igb_check_for_msg(hw, vf))
+ igb_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!igb_check_for_ack(hw, vf))
+ igb_rcv_ack_from_vf(adapter, vf);
+
+ }
+}
+
+static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = E1000_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct e1000_hw *hw = &adapter->hw;
+ s32 retval;
+
+ retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ dev_err(&adapter->pdev->dev,
+ "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == E1000_VF_RESET) {
+ igb_vf_reset_msg(adapter, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vf_data[vf].clear_to_send) {
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ igb_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case E1000_VF_SET_MAC_ADDR:
+ retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_MULTICAST:
+ retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
+ break;
+ case E1000_VF_SET_LPE:
+ retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
+ break;
+ case E1000_VF_SET_VLAN:
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
+ retval = -1;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
+
+ igb_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
/**
* igb_intr_msi - Interrupt Handler
* @irq: interrupt number
@@ -3507,19 +4119,24 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(adapter->rx_ring);
+ if(icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
+
if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
hw->mac.get_link_status = 1;
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(&adapter->rx_ring[0].napi);
+ napi_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
/**
- * igb_intr - Interrupt Handler
+ * igb_intr - Legacy Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
@@ -3531,7 +4148,6 @@ static irqreturn_t igb_intr(int irq, void *data)
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
u32 icr = rd32(E1000_ICR);
- u32 eicr = 0;
if (!icr)
return IRQ_NONE; /* Not our interrupt */
@@ -3542,7 +4158,10 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
- eicr = rd32(E1000_EICR);
+ if(icr & E1000_ICR_DOUTSYNC) {
+ /* HW is reporting DMA is out of sync */
+ adapter->stats.doosync++;
+ }
if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
hw->mac.get_link_status = 1;
@@ -3551,11 +4170,31 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
- netif_rx_schedule(&adapter->rx_ring[0].napi);
+ napi_schedule(&adapter->rx_ring[0].napi);
return IRQ_HANDLED;
}
+static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
+{
+ struct igb_adapter *adapter = rx_ring->adapter;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (adapter->itr_setting & 3) {
+ if (adapter->num_rx_queues == 1)
+ igb_set_itr(adapter);
+ else
+ igb_update_ring_itr(rx_ring);
+ }
+
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->msix_entries)
+ wr32(E1000_EIMS, rx_ring->eims_value);
+ else
+ igb_irq_enable(adapter);
+ }
+}
+
/**
* igb_poll - NAPI Rx polling callback
* @napi: napi polling structure
@@ -3564,70 +4203,64 @@ static irqreturn_t igb_intr(int irq, void *data)
static int igb_poll(struct napi_struct *napi, int budget)
{
struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
- struct igb_adapter *adapter = rx_ring->adapter;
- struct net_device *netdev = adapter->netdev;
- int tx_clean_complete, work_done = 0;
+ int work_done = 0;
- /* this poll routine only supports one tx and one rx queue */
#ifdef CONFIG_IGB_DCA
- if (adapter->flags & IGB_FLAG_DCA_ENABLED)
- igb_update_tx_dca(&adapter->tx_ring[0]);
+ if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
+ igb_update_rx_dca(rx_ring);
#endif
- tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
+ igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
+ if (rx_ring->buddy) {
#ifdef CONFIG_IGB_DCA
- if (adapter->flags & IGB_FLAG_DCA_ENABLED)
- igb_update_rx_dca(&adapter->rx_ring[0]);
+ if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
+ igb_update_tx_dca(rx_ring->buddy);
#endif
- igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
+ if (!igb_clean_tx_irq(rx_ring->buddy))
+ work_done = budget;
+ }
- /* If no Tx and not enough Rx work done, exit the polling mode */
- if ((tx_clean_complete && (work_done < budget)) ||
- !netif_running(netdev)) {
- if (adapter->itr_setting & 3)
- igb_set_itr(adapter);
- netif_rx_complete(napi);
- if (!test_bit(__IGB_DOWN, &adapter->state))
- igb_irq_enable(adapter);
- return 0;
+ /* If not enough Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ igb_rx_irq_enable(rx_ring);
}
- return 1;
+ return work_done;
}
-static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
+/**
+ * igb_hwtstamp - utility function which checks for TX time stamp
+ * @adapter: board private structure
+ * @skb: packet that was just sent
+ *
+ * If we were asked to do hardware stamping and such a time stamp is
+ * available, then it must have been for this skb here because we only
+ * allow only one such packet into the queue.
+ */
+static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
{
- struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
- struct igb_adapter *adapter = rx_ring->adapter;
+ union skb_shared_tx *shtx = skb_tx(skb);
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int work_done = 0;
-
-#ifdef CONFIG_IGB_DCA
- if (adapter->flags & IGB_FLAG_DCA_ENABLED)
- igb_update_rx_dca(rx_ring);
-#endif
- igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
-
-
- /* If not enough Rx work done, exit the polling mode */
- if ((work_done == 0) || !netif_running(netdev)) {
- netif_rx_complete(napi);
- if (adapter->itr_setting & 3) {
- if (adapter->num_rx_queues == 1)
- igb_set_itr(adapter);
- else
- igb_update_ring_itr(rx_ring);
+ if (unlikely(shtx->hardware)) {
+ u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
+ if (valid) {
+ u64 regval = rd32(E1000_TXSTMPL);
+ u64 ns;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ regval |= (u64)rd32(E1000_TXSTMPH) << 32;
+ ns = timecounter_cyc2time(&adapter->clock,
+ regval);
+ timecompare_update(&adapter->compare, ns);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ shhwtstamps.syststamp =
+ timecompare_transform(&adapter->compare, ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
-
- if (!test_bit(__IGB_DOWN, &adapter->state))
- wr32(E1000_EIMS, rx_ring->eims_value);
-
- return 0;
}
-
- return 1;
}
/**
@@ -3668,6 +4301,8 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
skb->len;
total_packets += segs;
total_bytes += bytecount;
+
+ igb_tx_hwtstamp(adapter, skb);
}
igb_unmap_and_free_tx_resource(adapter, buffer_info);
@@ -3677,7 +4312,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
if (i == tx_ring->count)
i = 0;
}
-
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
}
@@ -3686,7 +4320,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
if (unlikely(count &&
netif_carrier_ok(netdev) &&
- IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -3742,44 +4376,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
return (count < tx_ring->count);
}
-#ifdef CONFIG_IGB_LRO
- /**
- * igb_get_skb_hdr - helper function for LRO header processing
- * @skb: pointer to sk_buff to be added to LRO packet
- * @iphdr: pointer to ip header structure
- * @tcph: pointer to tcp header structure
- * @hdr_flags: pointer to header flags
- * @priv: pointer to the receive descriptor for the current sk_buff
- **/
-static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
- u64 *hdr_flags, void *priv)
-{
- union e1000_adv_rx_desc *rx_desc = priv;
- u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
- (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
-
- /* Verify that this is a valid IPv4 TCP packet */
- if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
- E1000_RXDADV_PKTTYPE_TCP))
- return -1;
-
- /* Set network headers */
- skb_reset_network_header(skb);
- skb_set_transport_header(skb, ip_hdrlen(skb));
- *iphdr = ip_hdr(skb);
- *tcph = tcp_hdr(skb);
- *hdr_flags = LRO_IPV4 | LRO_TCP;
-
- return 0;
-
-}
-#endif /* CONFIG_IGB_LRO */
-
/**
* igb_receive_skb - helper function to handle rx indications
- * @ring: pointer to receive ring receving this packet
+ * @ring: pointer to receive ring receving this packet
* @status: descriptor status field as written by hardware
- * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @rx_desc: receive descriptor containing vlan and type information.
* @skb: pointer to sk_buff to be indicated to stack
**/
static void igb_receive_skb(struct igb_ring *ring, u8 status,
@@ -3789,31 +4390,23 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
struct igb_adapter * adapter = ring->adapter;
bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
-#ifdef CONFIG_IGB_LRO
- if (adapter->netdev->features & NETIF_F_LRO &&
- skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb_record_rx_queue(skb, ring->queue_index);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
if (vlan_extracted)
- lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
- adapter->vlgrp,
- le16_to_cpu(rx_desc->wb.upper.vlan),
- rx_desc);
+ vlan_gro_receive(&ring->napi, adapter->vlgrp,
+ le16_to_cpu(rx_desc->wb.upper.vlan),
+ skb);
else
- lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
- ring->lro_used = 1;
+ napi_gro_receive(&ring->napi, skb);
} else {
-#endif
if (vlan_extracted)
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->wb.upper.vlan));
else
-
netif_receive_skb(skb);
-#ifdef CONFIG_IGB_LRO
}
-#endif
}
-
static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
u32 status_err, struct sk_buff *skb)
{
@@ -3841,17 +4434,19 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
{
struct igb_adapter *adapter = rx_ring->adapter;
struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
union e1000_adv_rx_desc *rx_desc , *next_rxd;
struct igb_buffer *buffer_info , *next_buffer;
struct sk_buff *skb;
- unsigned int i;
- u32 length, hlen, staterr;
bool cleaned = false;
int cleaned_count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i;
+ u32 length, hlen, staterr;
i = rx_ring->next_to_clean;
+ buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
@@ -3859,25 +4454,22 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
if (*work_done >= budget)
break;
(*work_done)++;
- buffer_info = &rx_ring->buffer_info[i];
- /* HW will not DMA in data larger than the given buffer, even
- * if it parses the (NFS, of course) header to be larger. In
- * that case, it fills the header buffer and spills the rest
- * into the page.
- */
- hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > adapter->rx_ps_hdr_size)
- hlen = adapter->rx_ps_hdr_size;
+ skb = buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ buffer_info->skb = NULL;
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ next_buffer = &rx_ring->buffer_info[i];
length = le16_to_cpu(rx_desc->wb.upper.length);
cleaned = true;
cleaned_count++;
- skb = buffer_info->skb;
- prefetch(skb->data - NET_IP_ALIGN);
- buffer_info->skb = NULL;
if (!adapter->rx_ps_hdr_size) {
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len +
@@ -3887,10 +4479,19 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
goto send_up;
}
+ /* HW will not DMA in data larger than the given buffer, even
+ * if it parses the (NFS, of course) header to be larger. In
+ * that case, it fills the header buffer and spills the rest
+ * into the page.
+ */
+ hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
+ E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > adapter->rx_ps_hdr_size)
+ hlen = adapter->rx_ps_hdr_size;
+
if (!skb_shinfo(skb)->nr_frags) {
pci_unmap_single(pdev, buffer_info->dma,
- adapter->rx_ps_hdr_size +
- NET_IP_ALIGN,
+ adapter->rx_ps_hdr_size + NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
skb_put(skb, hlen);
}
@@ -3916,13 +4517,6 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
skb->truesize += length;
}
-send_up:
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
- prefetch(next_rxd);
- next_buffer = &rx_ring->buffer_info[i];
if (!(staterr & E1000_RXD_STAT_EOP)) {
buffer_info->skb = next_buffer->skb;
@@ -3931,6 +4525,47 @@ send_up:
next_buffer->dma = 0;
goto next_desc;
}
+send_up:
+ /*
+ * If this bit is set, then the RX registers contain
+ * the time stamp. No other packet will be time
+ * stamped until we read these registers, so read the
+ * registers to make them available again. Because
+ * only one packet can be time stamped at a time, we
+ * know that the register values must belong to this
+ * one here and therefore we don't need to compare
+ * any of the additional attributes stored for it.
+ *
+ * If nothing went wrong, then it should have a
+ * skb_shared_tx that we can turn into a
+ * skb_shared_hwtstamps.
+ *
+ * TODO: can time stamping be triggered (thus locking
+ * the registers) without the packet reaching this point
+ * here? In that case RX time stamping would get stuck.
+ *
+ * TODO: in "time stamp all packets" mode this bit is
+ * not set. Need a global flag for this mode and then
+ * always read the registers. Cannot be done without
+ * a race condition.
+ */
+ if (unlikely(staterr & E1000_RXD_STAT_TS)) {
+ u64 regval;
+ u64 ns;
+ struct skb_shared_hwtstamps *shhwtstamps =
+ skb_hwtstamps(skb);
+
+ WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
+ "igb: no RX time stamp available for time stamped packet");
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
+ ns = timecounter_cyc2time(&adapter->clock, regval);
+ timecompare_update(&adapter->compare, ns);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ shhwtstamps->syststamp =
+ timecompare_transform(&adapter->compare, ns);
+ }
if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
dev_kfree_skb_irq(skb);
@@ -3958,19 +4593,11 @@ next_desc:
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
-
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
rx_ring->next_to_clean = i;
- cleaned_count = IGB_DESC_UNUSED(rx_ring);
-
-#ifdef CONFIG_IGB_LRO
- if (rx_ring->lro_used) {
- lro_flush_all(&rx_ring->lro_mgr);
- rx_ring->lro_used = 0;
- }
-#endif
+ cleaned_count = igb_desc_unused(rx_ring);
if (cleaned_count)
igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
@@ -3984,7 +4611,6 @@ next_desc:
return cleaned;
}
-
/**
* igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -3999,10 +4625,17 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
struct igb_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
+ int bufsz;
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
+ if (adapter->rx_ps_hdr_size)
+ bufsz = adapter->rx_ps_hdr_size;
+ else
+ bufsz = adapter->rx_buffer_len;
+ bufsz += NET_IP_ALIGN;
+
while (cleaned_count--) {
rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
@@ -4018,23 +4651,14 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
buffer_info->page_offset ^= PAGE_SIZE / 2;
}
buffer_info->page_dma =
- pci_map_page(pdev,
- buffer_info->page,
+ pci_map_page(pdev, buffer_info->page,
buffer_info->page_offset,
PAGE_SIZE / 2,
PCI_DMA_FROMDEVICE);
}
if (!buffer_info->skb) {
- int bufsz;
-
- if (adapter->rx_ps_hdr_size)
- bufsz = adapter->rx_ps_hdr_size;
- else
- bufsz = adapter->rx_buffer_len;
- bufsz += NET_IP_ALIGN;
skb = netdev_alloc_skb(netdev, bufsz);
-
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@@ -4050,7 +4674,6 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
buffer_info->dma = pci_map_single(pdev, skb->data,
bufsz,
PCI_DMA_FROMDEVICE);
-
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -4120,6 +4743,163 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
+ * igb_hwtstamp_ioctl - control hardware time stamping
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't case any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware
+ * filters. Not all combinations are supported, in particular event
+ * type has to be specified. Matching the kind of event packet is
+ * not supported, with the exception of "all V2 events regardless of
+ * level 2 or 4".
+ *
+ **/
+static int igb_hwtstamp_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config config;
+ u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
+ u32 tsync_rx_ctl_type = 0;
+ u32 tsync_rx_cfg = 0;
+ int is_l4 = 0;
+ int is_l2 = 0;
+ short port = 319; /* PTP */
+ u32 regval;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl_bit = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /*
+ * register TSYNCRXCFG must be set, therefore it is not
+ * possible to time stamp both Sync and Delay_Req messages
+ * => fall back to time stamping all packets
+ */
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = 1;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = 1;
+ is_l4 = 1;
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* enable/disable TX */
+ regval = rd32(E1000_TSYNCTXCTL);
+ regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
+ wr32(E1000_TSYNCTXCTL, regval);
+
+ /* enable/disable RX, define which PTP packets are time stamped */
+ regval = rd32(E1000_TSYNCRXCTL);
+ regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
+ regval = (regval & ~0xE) | tsync_rx_ctl_type;
+ wr32(E1000_TSYNCRXCTL, regval);
+ wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
+
+ /*
+ * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
+ * (Ethertype to filter on)
+ * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
+ * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
+ */
+ wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
+
+ /* L4 Queue Filter[0]: only filter by source and destination port */
+ wr32(E1000_SPQF0, htons(port));
+ wr32(E1000_IMIREXT(0), is_l4 ?
+ ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
+ wr32(E1000_IMIR(0), is_l4 ?
+ (htons(port)
+ | (0<<16) /* immediate interrupt disabled */
+ | 0 /* (1<<17) bit cleared: do not bypass
+ destination port check */)
+ : 0);
+ wr32(E1000_FTQF0, is_l4 ?
+ (0x11 /* UDP */
+ | (1<<15) /* VF not compared */
+ | (1<<27) /* Enable Timestamping */
+ | (7<<28) /* only source port filter enabled,
+ source/target address and protocol
+ masked */)
+ : ((1<<15) | (15<<28) /* all mask bits set = filter not
+ enabled */));
+
+ wrfl();
+
+ adapter->hwtstamp_config = config;
+
+ /* clear TX/RX time stamp registers, just to be sure */
+ regval = rd32(E1000_TXSTMPH);
+ regval = rd32(E1000_RXSTMPH);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
* igb_ioctl -
* @netdev:
* @ifreq:
@@ -4132,6 +4912,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return igb_hwtstamp_ioctl(netdev, ifr, cmd);
default:
return -EOPNOTSUPP;
}
@@ -4158,8 +4940,6 @@ static void igb_vlan_rx_register(struct net_device *netdev,
rctl &= ~E1000_RCTL_CFIEN;
wr32(E1000_RCTL, rctl);
igb_update_mng_vlan(adapter);
- wr32(E1000_RLPML,
- adapter->max_frame_size + VLAN_TAG_SIZE);
} else {
/* disable VLAN tag insert/strip */
ctrl = rd32(E1000_CTRL);
@@ -4170,10 +4950,10 @@ static void igb_vlan_rx_register(struct net_device *netdev,
igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
}
- wr32(E1000_RLPML,
- adapter->max_frame_size);
}
+ igb_rlpml_set(adapter);
+
if (!test_bit(__IGB_DOWN, &adapter->state))
igb_irq_enable(adapter);
}
@@ -4182,24 +4962,25 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 vfta, index;
+ int pf_id = adapter->vfs_allocated_count;
- if ((adapter->hw.mng_cookie.status &
+ if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
return;
- /* add VID to filter table */
- index = (vid >> 5) & 0x7F;
- vfta = array_rd32(E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
- igb_write_vfta(&adapter->hw, index, vfta);
+
+ /* add vid to vlvf if sr-iov is enabled,
+ * if that fails add directly to filter table */
+ if (igb_vlvf_set(adapter, vid, true, pf_id))
+ igb_vfta_set(hw, vid, true);
+
}
static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 vfta, index;
+ int pf_id = adapter->vfs_allocated_count;
igb_irq_disable(adapter);
vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -4215,11 +4996,10 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
return;
}
- /* remove VID from filter table */
- index = (vid >> 5) & 0x7F;
- vfta = array_rd32(E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
- igb_write_vfta(&adapter->hw, index, vfta);
+ /* remove vid from vlvf if sr-iov is enabled,
+ * if not in vlvf remove from vfta */
+ if (igb_vlvf_set(adapter, vid, false, pf_id))
+ igb_vfta_set(hw, vid, false);
}
static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -4276,7 +5056,6 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
return 0;
}
-
static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4368,10 +5147,7 @@ static int igb_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (adapter->need_ioport)
- err = pci_enable_device(pdev);
- else
- err = pci_enable_device_mem(pdev);
+ err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
"igb: Cannot enable PCI device from suspend\n");
@@ -4392,6 +5168,11 @@ static int igb_resume(struct pci_dev *pdev)
/* e1000_power_up_phy(adapter); */
igb_reset(adapter);
+
+ /* let the f/w know that the h/w is now under the control of the
+ * driver. */
+ igb_get_hw_control(adapter);
+
wr32(E1000_WUS, ~0);
if (netif_running(netdev)) {
@@ -4402,10 +5183,6 @@ static int igb_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /* let the f/w know that the h/w is now under the control of the
- * driver. */
- igb_get_hw_control(adapter);
-
return 0;
}
#endif
@@ -4424,22 +5201,27 @@ static void igb_shutdown(struct pci_dev *pdev)
static void igb_netpoll(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
int i;
- int work_done = 0;
- igb_irq_disable(adapter);
- adapter->flags |= IGB_FLAG_IN_NETPOLL;
-
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_clean_tx_irq(&adapter->tx_ring[i]);
+ if (!adapter->msix_entries) {
+ igb_irq_disable(adapter);
+ napi_schedule(&adapter->rx_ring[0].napi);
+ return;
+ }
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_clean_rx_irq_adv(&adapter->rx_ring[i],
- &work_done,
- adapter->rx_ring[i].napi.weight);
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igb_ring *tx_ring = &adapter->tx_ring[i];
+ wr32(E1000_EIMC, tx_ring->eims_value);
+ igb_clean_tx_irq(tx_ring);
+ wr32(E1000_EIMS, tx_ring->eims_value);
+ }
- adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
- igb_irq_enable(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = &adapter->rx_ring[i];
+ wr32(E1000_EIMC, rx_ring->eims_value);
+ napi_schedule(&rx_ring->napi);
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -4482,12 +5264,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result;
int err;
- if (adapter->need_ioport)
- err = pci_enable_device(pdev);
- else
- err = pci_enable_device_mem(pdev);
-
- if (err) {
+ if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
@@ -4540,4 +5317,172 @@ static void igb_io_resume(struct pci_dev *pdev)
igb_get_hw_control(adapter);
}
+static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
+{
+ u32 reg_data;
+
+ reg_data = rd32(E1000_VMOLR(vfn));
+ reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
+ E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
+ E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
+ E1000_VMOLR_AUPE | /* Accept untagged packets */
+ E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ wr32(E1000_VMOLR(vfn), reg_data);
+}
+
+static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
+ int vfn)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 vmolr;
+
+ vmolr = rd32(E1000_VMOLR(vfn));
+ vmolr &= ~E1000_VMOLR_RLPML_MASK;
+ vmolr |= size | E1000_VMOLR_LPE;
+ wr32(E1000_VMOLR(vfn), vmolr);
+
+ return 0;
+}
+
+static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
+{
+ u32 reg_data;
+
+ reg_data = rd32(E1000_RAH(entry));
+ reg_data &= ~E1000_RAH_POOL_MASK;
+ reg_data |= E1000_RAH_POOL_1 << pool;;
+ wr32(E1000_RAH(entry), reg_data);
+}
+
+static void igb_set_mc_list_pools(struct igb_adapter *adapter,
+ int entry_count, u16 total_rar_filters)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int i = adapter->vfs_allocated_count + 1;
+
+ if ((i + entry_count) < total_rar_filters)
+ total_rar_filters = i + entry_count;
+
+ for (; i < total_rar_filters; i++)
+ igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
+}
+
+static int igb_set_vf_mac(struct igb_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
+
+ igb_rar_set(hw, mac_addr, rar_entry);
+
+ memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
+
+ igb_set_rah_pool(hw, vf, rar_entry);
+
+ return 0;
+}
+
+static void igb_vmm_control(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg_data;
+
+ if (!adapter->vfs_allocated_count)
+ return;
+
+ /* VF's need PF reset indication before they
+ * can send/receive mail */
+ reg_data = rd32(E1000_CTRL_EXT);
+ reg_data |= E1000_CTRL_EXT_PFRSTD;
+ wr32(E1000_CTRL_EXT, reg_data);
+
+ igb_vmdq_set_loopback_pf(hw, true);
+ igb_vmdq_set_replication_pf(hw, true);
+}
+
+#ifdef CONFIG_PCI_IOV
+static ssize_t igb_show_num_vfs(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
+
+ return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
+}
+
+static ssize_t igb_set_num_vfs(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int num_vfs, i;
+ unsigned char mac_addr[ETH_ALEN];
+ int err;
+
+ sscanf(buf, "%u", &num_vfs);
+
+ if (num_vfs > 7)
+ num_vfs = 7;
+
+ /* value unchanged do nothing */
+ if (num_vfs == adapter->vfs_allocated_count)
+ return count;
+
+ if (netdev->flags & IFF_UP)
+ igb_close(netdev);
+
+ igb_reset_interrupt_capability(adapter);
+ igb_free_queues(adapter);
+ adapter->tx_ring = NULL;
+ adapter->rx_ring = NULL;
+ adapter->vfs_allocated_count = 0;
+
+ /* reclaim resources allocated to VFs since we are changing count */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(pdev);
+ msleep(500);
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+ }
+
+ if (num_vfs) {
+ adapter->vf_data = kcalloc(num_vfs,
+ sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!adapter->vf_data) {
+ dev_err(&pdev->dev, "Could not allocate VF private "
+ "data - IOV enable failed\n");
+ } else {
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (!err) {
+ adapter->vfs_allocated_count = num_vfs;
+ dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
+ for (i = 0; i < adapter->vfs_allocated_count; i++) {
+ random_ether_addr(mac_addr);
+ igb_set_vf_mac(adapter, i, mac_addr);
+ }
+ } else {
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ }
+ }
+ }
+
+ igb_set_interrupt_capability(adapter);
+ igb_alloc_queues(adapter);
+ igb_reset(adapter);
+
+ if (netdev->flags & IFF_UP)
+ igb_open(netdev);
+
+ return count;
+}
+#endif /* CONFIG_PCI_IOV */
/* igb_main.c */