summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2010-06-01 22:53:36 -0400
committerLen Brown <len.brown@intel.com>2010-06-01 22:53:36 -0400
commitb42f5b0f0fd8c1c442c1b29a3fbcb338e8bd7732 (patch)
tree194e13dfa85d2d2af8bd125acd80a445ee0def62 /drivers/net/e1000e
parentfe955682d2153b35dffcf1673dff0491096a3f0a (diff)
parent0a76a34ff0804f1f413807b2e2d12117c2b602ca (diff)
Merge branches 'bugzilla-14668' and 'misc-2.6.35' into release
Diffstat (limited to 'drivers/net/e1000e')
-rw-r--r--drivers/net/e1000e/82571.c29
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/es2lan.c11
-rw-r--r--drivers/net/e1000e/ethtool.c48
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c391
-rw-r--r--drivers/net/e1000e/lib.c60
-rw-r--r--drivers/net/e1000e/netdev.c866
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/e1000e/phy.c21
11 files changed, 1029 insertions, 462 deletions
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 90155552ea0..f654db9121d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -234,9 +234,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -271,6 +268,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /*
+ * ARC supported; valid only if manageability features are
+ * enabled.
+ */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
break;
case e1000_82574:
case e1000_82583:
@@ -281,6 +288,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+
+ /* FWSM register */
+ mac->has_fwsm = true;
break;
}
@@ -323,7 +333,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
/*
- * Initialze device specific counter of SMBI acquisition
+ * Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
@@ -993,9 +1003,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
+ e1000e_enable_tx_pkt_filtering(hw);
+ /* fall through */
case e1000_82574:
case e1000_82583:
- e1000e_enable_tx_pkt_filtering(hw);
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
@@ -1137,8 +1148,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -1642,8 +1651,6 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
@@ -1845,7 +1852,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
@@ -1862,7 +1869,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .pba = 20,
+ .pba = 36,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d689..4dc02c71ffd 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -138,6 +138,11 @@
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
+#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
+
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
@@ -214,6 +219,8 @@
#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -622,6 +629,8 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ee32b9b27a9..c0b3db40bd7 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -43,25 +43,16 @@
struct e1000_info;
-#define e_printk(level, adapter, format, arg...) \
- printk(level "%s: %s: " format, pci_name(adapter->pdev), \
- adapter->netdev->name, ## arg)
-
-#ifdef DEBUG
#define e_dbg(format, arg...) \
- e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
-#else
-#define e_dbg(format, arg...) do { (void)(hw); } while (0)
-#endif
-
+ netdev_dbg(hw->adapter->netdev, format, ## arg)
#define e_err(format, arg...) \
- e_printk(KERN_ERR, adapter, format, ## arg)
+ netdev_err(adapter->netdev, format, ## arg)
#define e_info(format, arg...) \
- e_printk(KERN_INFO, adapter, format, ## arg)
+ netdev_info(adapter->netdev, format, ## arg)
#define e_warn(format, arg...) \
- e_printk(KERN_WARNING, adapter, format, ## arg)
+ netdev_warn(adapter->netdev, format, ## arg)
#define e_notice(format, arg...) \
- e_printk(KERN_NOTICE, adapter, format, ## arg)
+ netdev_notice(adapter->netdev, format, ## arg)
/* Interrupt modes, as used by the IntMode parameter */
@@ -159,6 +150,9 @@ struct e1000_info;
#define HV_M_STATUS_SPEED_1000 0x0200
#define HV_M_STATUS_LINK_UP 0x0040
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT 100
+
enum e1000_boards {
board_82571,
board_82572,
@@ -195,6 +189,8 @@ struct e1000_buffer {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
+ unsigned int segs;
+ unsigned int bytecount;
u16 mapped_as_page;
};
/* Rx */
@@ -370,6 +366,8 @@ struct e1000_adapter {
struct work_struct update_phy_task;
struct work_struct led_blink_task;
struct work_struct print_hang_task;
+
+ bool idle_check;
};
struct e1000_info {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 27d21589a69..38d79a66905 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -221,9 +221,12 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC supported; valid only if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (er32(FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
/* Adaptive IFS not supported */
mac->adaptive_ifs = false;
@@ -1380,8 +1383,6 @@ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330..2c521218102 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
netdev->features &= ~NETIF_F_TSO6;
}
- e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->flags |= FLAG_TSO_FORCE;
return 0;
}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
if (tx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
tx_ring->buffer_info[i].dma,
tx_ring->buffer_info[i].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (tx_ring->buffer_info[i].skb)
dev_kfree_skb(tx_ring->buffer_info[i].skb);
}
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
if (rx_ring->buffer_info[i].dma)
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
rx_ring->buffer_info[i].dma,
- 2048, PCI_DMA_FROMDEVICE);
+ 2048, DMA_FROM_DEVICE);
if (rx_ring->buffer_info[i].skb)
dev_kfree_skb(rx_ring->buffer_info[i].skb);
}
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ tx_ring->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- pci_map_single(pdev, skb->data, 2048,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ rx_ring->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
- pci_dma_sync_single_for_device(pdev,
+ dma_sync_single_for_device(&pdev->dev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
- pci_dma_sync_single_for_cpu(pdev,
+ dma_sync_single_for_cpu(&pdev->dev,
rx_ring->buffer_info[l].dma, 2048,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rx_ring->buffer_info[l].skb, 1024);
@@ -1736,6 +1737,12 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
dev_open(netdev);
} else {
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
+ clear_bit(__E1000_TESTING, &adapter->state);
+ dev_open(netdev);
+ set_bit(__E1000_TESTING, &adapter->state);
+ }
+
e_info("online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
@@ -1747,6 +1754,9 @@ static void e1000_diag_test(struct net_device *netdev,
data[2] = 0;
data[3] = 0;
+ if (!if_running && (adapter->flags & FLAG_HAS_AMT))
+ dev_close(netdev);
+
clear_bit(__E1000_TESTING, &adapter->state);
}
msleep_interruptible(4 * 1000);
@@ -1889,7 +1899,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (adapter->itr_setting <= 3)
+ if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1914,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
+ ((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
- if (ec->rx_coalesce_usecs <= 3) {
+ if (ec->rx_coalesce_usecs == 4) {
+ adapter->itr = adapter->itr_setting = 4;
+ } else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 8bdcd5f24ef..5d1220d188d 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -208,6 +208,8 @@ enum e1e_registers {
E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
+ E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
E1000_GCR = 0x05B00, /* PCI-Ex Control */
E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
@@ -380,6 +382,7 @@ enum e1e_registers {
#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@@ -828,6 +831,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
bool adaptive_ifs;
+ bool has_fwsm;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
@@ -898,6 +902,7 @@ struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
+ u16 refresh_time; /* Flow control refresh timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_mode current_mode; /* FC mode in effect */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c8..b2507d93de9 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
#define E1000_ICH_MNG_IAMT_MODE 0x2
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
+ u32 ctrl;
s32 ret_val = 0;
phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+ /*
+ * The MAC-PHY interconnect may still be in SMBus mode
+ * after Sx->S0. Toggle the LANPHYPC Value bit to force
+ * the interconnect to PCIe mode, but only if there is no
+ * firmware present otherwise firmware will have done it.
+ */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+ ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+ ew32(CTRL, ctrl);
+ udelay(10);
+ ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+ ew32(CTRL, ctrl);
+ msleep(50);
+ }
+
+ /*
+ * Reset the PHY before any acccess to it. Doing so, ensures that
+ * the PHY is in a known good state before we read/write PHY registers.
+ * The generic reset is sufficient here, because we haven't determined
+ * the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
phy->id = e1000_phy_unknown;
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
@@ -300,6 +330,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_82577;
phy->ops.get_info = e1000_get_phy_info_82577;
phy->ops.commit = e1000e_phy_sw_reset;
+ break;
case e1000_phy_82578:
phy->ops.check_polarity = e1000_check_polarity_m88;
phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
@@ -472,8 +503,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
if (mac->type == e1000_ich8lan)
mac->rar_entry_count--;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid = true;
+ /* FWSM register */
+ mac->has_fwsm = true;
+ /* ARC subsystem not supported */
+ mac->arc_subsystem_valid = false;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
@@ -657,8 +690,6 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
mutex_unlock(&nvm_mutex);
-
- return;
}
static DEFINE_MUTEX(swflag_mutex);
@@ -737,8 +768,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
ew32(EXTCNF_CTRL, extcnf_ctrl);
mutex_unlock(&swflag_mutex);
-
- return;
}
/**
@@ -785,11 +814,16 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
+ struct e1000_adapter *adapter = hw->adapter;
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
- s32 ret_val;
+ s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
+ if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
+ !(hw->mac.type == e1000_pchlan))
+ return ret_val;
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@@ -801,97 +835,87 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
- if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
- (hw->mac.type == e1000_pchlan)) {
- struct e1000_adapter *adapter = hw->adapter;
-
- /* Check if SW needs to configure the PHY */
- if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
- (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
- (hw->mac.type == e1000_pchlan))
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
- else
- sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+ if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+ (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
+ (hw->mac.type == e1000_pchlan))
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+ else
+ sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
- data = er32(FEXTNVM);
- if (!(data & sw_cfg_mask))
- goto out;
+ data = er32(FEXTNVM);
+ if (!(data & sw_cfg_mask))
+ goto out;
- /* Wait for basic configuration completes before proceeding */
- e1000_lan_init_done_ich8lan(hw);
+ /*
+ * Make sure HW does not configure LCD from PHY
+ * extended configuration before SW configuration
+ */
+ data = er32(EXTCNF_CTRL);
+ if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ goto out;
+
+ cnf_size = er32(EXTCNF_SIZE);
+ cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+ cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+ if (!cnf_size)
+ goto out;
+
+ cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+ cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+ if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
+ (hw->mac.type == e1000_pchlan)) {
/*
- * Make sure HW does not configure LCD from PHY
- * extended configuration before SW configuration
+ * HW configures the SMBus address and LEDs when the
+ * OEM and LCD Write Enable bits are set in the NVM.
+ * When both NVM bits are cleared, SW will configure
+ * them instead.
*/
- data = er32(EXTCNF_CTRL);
- if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+ data = er32(STRAP);
+ data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+ reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
+ reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
+ reg_data);
+ if (ret_val)
goto out;
- cnf_size = er32(EXTCNF_SIZE);
- cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
- cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
- if (!cnf_size)
+ data = er32(LEDCTL);
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+ (u16)data);
+ if (ret_val)
goto out;
+ }
- cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
- cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
-
- if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
- (hw->mac.type == e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
- * OEM and LCD Write Enable bits are set in the NVM.
- * When both NVM bits are cleared, SW will configure
- * them instead.
- */
- data = er32(STRAP);
- data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
- reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
- reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
- ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
- reg_data);
- if (ret_val)
- goto out;
-
- data = er32(LEDCTL);
- ret_val = e1000_write_phy_reg_hv_locked(hw,
- HV_LED_CONFIG,
- (u16)data);
- if (ret_val)
- goto out;
- }
- /* Configure LCD from extended configuration region. */
+ /* Configure LCD from extended configuration region. */
- /* cnf_base_addr is in DWORD */
- word_addr = (u16)(cnf_base_addr << 1);
+ /* cnf_base_addr is in DWORD */
+ word_addr = (u16)(cnf_base_addr << 1);
- for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
- if (ret_val)
- goto out;
+ for (i = 0; i < cnf_size; i++) {
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+ &reg_data);
+ if (ret_val)
+ goto out;
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
- 1, &reg_addr);
- if (ret_val)
- goto out;
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+ 1, &reg_addr);
+ if (ret_val)
+ goto out;
- /* Save off the PHY page for future writes. */
- if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
- phy_page = reg_data;
- continue;
- }
+ /* Save off the PHY page for future writes. */
+ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+ phy_page = reg_data;
+ continue;
+ }
- reg_addr &= PHY_REG_MASK;
- reg_addr |= phy_page;
+ reg_addr &= PHY_REG_MASK;
+ reg_addr |= phy_page;
- ret_val = phy->ops.write_reg_locked(hw,
- (u32)reg_addr,
- reg_data);
- if (ret_val)
- goto out;
- }
+ ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+ reg_data);
+ if (ret_val)
+ goto out;
}
out:
@@ -1229,30 +1253,26 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
}
/**
- * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
* @hw: pointer to the HW structure
- *
- * Resets the PHY
- * This is a function pointer entry point called by drivers
- * or other shared routines.
**/
-static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 reg;
- ret_val = e1000e_phy_hw_reset_generic(hw);
- if (ret_val)
- return ret_val;
-
- /* Allow time for h/w to get to a quiescent state after reset */
- mdelay(10);
+ if (e1000_check_reset_block(hw))
+ goto out;
/* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan) {
+ switch (hw->mac.type) {
+ case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
- return ret_val;
+ goto out;
+ break;
+ default:
+ break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1265,11 +1285,32 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_oem_bits_config_ich8lan(hw, true);
out:
- return 0;
+ return ret_val;
+}
+
+/**
+ * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ * @hw: pointer to the HW structure
+ *
+ * Resets the PHY
+ * This is a function pointer entry point called by drivers
+ * or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
+
+out:
+ return ret_val;
}
/**
@@ -1622,7 +1663,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Check if the flash descriptor is valid */
if (hsfsts.hsf_status.fldesvalid == 0) {
e_dbg("Flash descriptor invalid. "
- "SW Sequencing must be used.");
+ "SW Sequencing must be used.\n");
return -E1000_ERR_NVM;
}
@@ -1671,7 +1712,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
- e_dbg("Flash controller busy, cannot get access");
+ e_dbg("Flash controller busy, cannot get access\n");
}
}
@@ -1822,7 +1863,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
continue;
} else if (hsfsts.hsf_status.flcdone == 0) {
e_dbg("Timeout error - flash cycle "
- "did not complete.");
+ "did not complete.\n");
break;
}
}
@@ -1908,18 +1949,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1975,8 +2012,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val) {
/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
e_dbg("Flash commit failed.\n");
- nvm->ops.release(hw);
- goto out;
+ goto release;
}
/*
@@ -1987,18 +2023,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
+
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/*
* And invalidate the previously valid segment by setting
@@ -2008,10 +2041,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
- if (ret_val) {
- nvm->ops.release(hw);
- goto out;
- }
+ if (ret_val)
+ goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -2019,14 +2050,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
+release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
- e1000e_reload_nvm(hw);
- msleep(10);
+ if (!ret_val) {
+ e1000e_reload_nvm(hw);
+ msleep(10);
+ }
out:
if (ret_val)
@@ -2487,9 +2521,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
- if (ret_val) {
+ if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
- }
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
@@ -2528,14 +2561,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
- /* Clear PHY Reset Asserted bit */
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- }
-
/*
- * PHY HW reset requires MAC CORE reset at the same
+ * Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
@@ -2549,39 +2576,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
- /* Perform any necessary post-reset workarounds */
- if (hw->mac.type == e1000_pchlan)
- ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
-
- if (ctrl & E1000_CTRL_PHY_RST)
+ if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);
+ if (ret_val)
+ goto out;
- if (hw->mac.type >= e1000_ich10lan) {
- e1000_lan_init_done_ich8lan(hw);
- } else {
- ret_val = e1000e_get_auto_rd_done(hw);
- if (ret_val) {
- /*
- * When auto config read does not complete, do not
- * return with an error. This can happen in situations
- * where there is no eeprom and prevents getting link.
- */
- e_dbg("Auto Read Done did not complete\n");
- }
- }
- /* Dummy read to clear the phy wakeup bit after lcd reset */
- if (hw->mac.type == e1000_pchlan)
- e1e_rphy(hw, BM_WUC, &reg);
-
- ret_val = e1000_sw_lcd_config_ich8lan(hw);
- if (ret_val)
- goto out;
-
- if (hw->mac.type == e1000_pchlan) {
- ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+ ret_val = e1000_post_phy_reset_ich8lan(hw);
if (ret_val)
goto out;
}
+
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@@ -2748,8 +2752,6 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
ew32(RFCTL, reg);
-
- return;
}
/**
@@ -2799,6 +2801,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
ew32(FCTTV, hw->fc.pause_time);
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
+ ew32(FCRTV_PCH, hw->fc.refresh_time);
+
ret_val = hw->phy.ops.write_reg(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 27),
hw->fc.pause_time);
@@ -3127,8 +3131,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
default:
break;
}
-
- return;
}
/**
@@ -3265,33 +3267,50 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
}
/**
- * e1000_get_cfg_done_ich8lan - Read config done bit
+ * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
* @hw: pointer to the HW structure
*
- * Read the management control register for the config done bit for
- * completion status. NOTE: silicon which is EEPROM-less will fail trying
- * to read the config done bit, so an error is *ONLY* logged and returns
- * 0. If we were to return with error, EEPROM-less silicon
- * would not be able to be reset or change link.
+ * Read appropriate register for the config done bit for completion status
+ * and configure the PHY through s/w for EEPROM-less parts.
+ *
+ * NOTE: some silicon which is EEPROM-less will fail trying to read the
+ * config done bit, so only an error is logged and continues. If we were
+ * to return with error, EEPROM-less silicon would not be able to be reset
+ * or change link.
**/
static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
{
+ s32 ret_val = 0;
u32 bank = 0;
+ u32 status;
- if (hw->mac.type >= e1000_pchlan) {
- u32 status = er32(STATUS);
+ e1000e_get_cfg_done(hw);
- if (status & E1000_STATUS_PHYRA)
- ew32(STATUS, status & ~E1000_STATUS_PHYRA);
- else
- e_dbg("PHY Reset Asserted not set - needs delay\n");
+ /* Wait for indication from h/w that it has completed basic config */
+ if (hw->mac.type >= e1000_ich10lan) {
+ e1000_lan_init_done_ich8lan(hw);
+ } else {
+ ret_val = e1000e_get_auto_rd_done(hw);
+ if (ret_val) {
+ /*
+ * When auto config read does not complete, do not
+ * return with an error. This can happen in situations
+ * where there is no eeprom and prevents getting link.
+ */
+ e_dbg("Auto Read Done did not complete\n");
+ ret_val = 0;
+ }
}
- e1000e_get_cfg_done(hw);
+ /* Clear PHY Reset Asserted bit */
+ status = er32(STATUS);
+ if (status & E1000_STATUS_PHYRA)
+ ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+ else
+ e_dbg("PHY Reset Asserted not set - needs delay\n");
/* If EEPROM is not marked present, init the IGP 3 PHY manually */
- if ((hw->mac.type != e1000_ich10lan) &&
- (hw->mac.type != e1000_pchlan)) {
+ if (hw->mac.type <= e1000_ich9lan) {
if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
(hw->phy.type == e1000_phy_igp_3)) {
e1000e_phy_init_script_igp3(hw);
@@ -3300,11 +3319,11 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
/* Maybe we should do a basic PHY config */
e_dbg("EEPROM not present\n");
- return -E1000_ERR_CONFIG;
+ ret_val = -E1000_ERR_CONFIG;
}
}
- return 0;
+ return ret_val;
}
/**
@@ -3320,8 +3339,6 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw) ||
hw->phy.ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
-
- return;
}
/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c..a968e3a416a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
u32 status;
status = er32(STATUS);
- if (status & E1000_STATUS_SPEED_1000) {
+ if (status & E1000_STATUS_SPEED_1000)
*speed = SPEED_1000;
- e_dbg("1000 Mbs, ");
- } else if (status & E1000_STATUS_SPEED_100) {
+ else if (status & E1000_STATUS_SPEED_100)
*speed = SPEED_100;
- e_dbg("100 Mbs, ");
- } else {
+ else
*speed = SPEED_10;
- e_dbg("10 Mbs, ");
- }
- if (status & E1000_STATUS_FD) {
+ if (status & E1000_STATUS_FD)
*duplex = FULL_DUPLEX;
- e_dbg("Full Duplex\n");
- } else {
+ else
*duplex = HALF_DUPLEX;
- e_dbg("Half Duplex\n");
- }
+
+ e_dbg("%u Mbps, %s Duplex\n",
+ *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+ *duplex == FULL_DUPLEX ? "Full" : "Half");
return 0;
}
@@ -2275,6 +2272,11 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
u32 hicr;
u8 i;
+ if (!(hw->mac.arc_subsystem_valid)) {
+ e_dbg("ARC subsystem not valid.\n");
+ return -E1000_ERR_HOST_INTERFACE_COMMAND;
+ }
+
/* Check that the host interface is enabled. */
hicr = er32(HICR);
if ((hicr & E1000_HICR_EN) == 0) {
@@ -2518,10 +2520,11 @@ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
}
/**
- * e1000e_enable_mng_pass_thru - Enable processing of ARP's
+ * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -2531,11 +2534,10 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
manc = er32(MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
- return ret_val;
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
+ goto out;
- if (hw->mac.arc_subsystem_valid) {
+ if (hw->mac.has_fwsm) {
fwsm = er32(FWSM);
factps = er32(FACTPS);
@@ -2543,16 +2545,28 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
((fwsm & E1000_FWSM_MODE_MASK) ==
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
ret_val = true;
- return ret_val;
+ goto out;
}
- } else {
- if ((manc & E1000_MANC_SMBUS_EN) &&
- !(manc & E1000_MANC_ASF_EN)) {
+ } else if ((hw->mac.type == e1000_82574) ||
+ (hw->mac.type == e1000_82583)) {
+ u16 data;
+
+ factps = er32(FACTPS);
+ e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+ if (!(factps & E1000_FACTPS_MNGCG) &&
+ ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+ (e1000_mng_mode_pt << 13))) {
ret_val = true;
- return ret_val;
+ goto out;
}
+ } else if ((manc & E1000_MANC_SMBUS_EN) &&
+ !(manc & E1000_MANC_ASF_EN)) {
+ ret_val = true;
+ goto out;
}
+out:
return ret_val;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index dbf81788bb4..24507f3b8b1 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -45,11 +47,12 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/pm_qos_params.h>
+#include <linux/pm_runtime.h>
#include <linux/aer.h>
#include "e1000.h"
-#define DRV_VERSION "1.0.2-k2"
+#define DRV_VERSION "1.0.2-k4"
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pchlan] = &e1000_pch_info,
};
+struct e1000_reg_info {
+ u32 ofs;
+ char *name;
+};
+
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+ /* General Registers */
+ {E1000_CTRL, "CTRL"},
+ {E1000_STATUS, "STATUS"},
+ {E1000_CTRL_EXT, "CTRL_EXT"},
+
+ /* Interrupt Registers */
+ {E1000_ICR, "ICR"},
+
+ /* RX Registers */
+ {E1000_RCTL, "RCTL"},
+ {E1000_RDLEN, "RDLEN"},
+ {E1000_RDH, "RDH"},
+ {E1000_RDT, "RDT"},
+ {E1000_RDTR, "RDTR"},
+ {E1000_RXDCTL(0), "RXDCTL"},
+ {E1000_ERT, "ERT"},
+ {E1000_RDBAL, "RDBAL"},
+ {E1000_RDBAH, "RDBAH"},
+ {E1000_RDFH, "RDFH"},
+ {E1000_RDFT, "RDFT"},
+ {E1000_RDFHS, "RDFHS"},
+ {E1000_RDFTS, "RDFTS"},
+ {E1000_RDFPC, "RDFPC"},
+
+ /* TX Registers */
+ {E1000_TCTL, "TCTL"},
+ {E1000_TDBAL, "TDBAL"},
+ {E1000_TDBAH, "TDBAH"},
+ {E1000_TDLEN, "TDLEN"},
+ {E1000_TDH, "TDH"},
+ {E1000_TDT, "TDT"},
+ {E1000_TIDV, "TIDV"},
+ {E1000_TXDCTL(0), "TXDCTL"},
+ {E1000_TADV, "TADV"},
+ {E1000_TARC(0), "TARC"},
+ {E1000_TDFH, "TDFH"},
+ {E1000_TDFT, "TDFT"},
+ {E1000_TDFHS, "TDFHS"},
+ {E1000_TDFTS, "TDFTS"},
+ {E1000_TDFPC, "TDFPC"},
+
+ /* List Terminator */
+ {}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+ int n = 0;
+ char rname[16];
+ u32 regs[8];
+
+ switch (reginfo->ofs) {
+ case E1000_RXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_RXDCTL(n));
+ break;
+ case E1000_TXDCTL(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TXDCTL(n));
+ break;
+ case E1000_TARC(0):
+ for (n = 0; n < 2; n++)
+ regs[n] = __er32(hw, E1000_TARC(n));
+ break;
+ default:
+ printk(KERN_INFO "%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
+ return;
+ }
+
+ snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+ printk(KERN_INFO "%-15s ", rname);
+ for (n = 0; n < 2; n++)
+ printk(KERN_CONT "%08x ", regs[n]);
+ printk(KERN_CONT "\n");
+}
+
+
+/*
+ * e1000e_dump - Print registers, tx-ring and rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_reg_info *reginfo;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc;
+ struct my_u0 { u64 a; u64 b; } *u0;
+ struct e1000_buffer *buffer_info;
+ struct e1000_ring *rx_ring = adapter->rx_ring;
+ union e1000_rx_desc_packet_split *rx_desc_ps;
+ struct e1000_rx_desc *rx_desc;
+ struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
+ u32 staterr;
+ int i = 0;
+
+ if (!netif_msg_hw(adapter))
+ return;
+
+ /* Print netdevice Info */
+ if (netdev) {
+ dev_info(&adapter->pdev->dev, "Net device Info\n");
+ printk(KERN_INFO "Device Name state "
+ "trans_start last_rx\n");
+ printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
+ netdev->name,
+ netdev->state,
+ netdev->trans_start,
+ netdev->last_rx);
+ }
+
+ /* Print Registers */
+ dev_info(&adapter->pdev->dev, "Register Dump\n");
+ printk(KERN_INFO " Register Name Value\n");
+ for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+ reginfo->name; reginfo++) {
+ e1000_regdump(hw, reginfo);
+ }
+
+ /* Print TX Ring Summary */
+ if (!netdev || !netif_running(netdev))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
+ " leng ntw timestamp\n");
+ buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+ printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
+
+ /* Print TX Rings */
+ if (!netif_msg_tx_done(adapter))
+ goto rx_ring_summary;
+
+ dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+ /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+ *
+ * Legacy Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] (Reserved on Write Back) |
+ * +--------------------------------------------------------------+
+ * 8 | Special | CSS | Status | CMD | CSO | Length |
+ * +--------------------------------------------------------------+
+ * 63 48 47 36 35 32 31 24 23 16 15 0
+ *
+ * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+ * 63 48 47 40 39 32 31 16 15 8 7 0
+ * +----------------------------------------------------------------+
+ * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
+ * +----------------------------------------------------------------+
+ * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * Extended Data Descriptor (DTYP=0x1)
+ * +----------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +----------------------------------------------------------------+
+ * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
+ * +----------------------------------------------------------------+
+ * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
+ */
+ printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Legacy format\n");
+ printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Context format\n");
+ printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
+ " [bi->dma ] leng ntw timestamp bi->skb "
+ "<-- Ext Data format\n");
+ for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ tx_desc = E1000_TX_DESC(*tx_ring, i);
+ buffer_info = &tx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)tx_desc;
+ printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
+ "%04X %3X %016llX %p",
+ (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
+ le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->length,
+ buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
+ buffer_info->skb);
+ if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ buffer_info->length, true);
+ }
+
+ /* Print RX Rings Summary */
+rx_ring_summary:
+ dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+ printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ printk(KERN_INFO " %5d %5X %5X\n", 0,
+ rx_ring->next_to_use, rx_ring->next_to_clean);
+
+ /* Print RX Rings */
+ if (!netif_msg_rx_status(adapter))
+ goto exit;
+
+ dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+ switch (adapter->rx_ps_pages) {
+ case 1:
+ case 2:
+ case 3:
+ /* [Extended] Packet Split Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * 0 | Buffer Address 0 [63:0] |
+ * +-----------------------------------------------------+
+ * 8 | Buffer Address 1 [63:0] |
+ * +-----------------------------------------------------+
+ * 16 | Buffer Address 2 [63:0] |
+ * +-----------------------------------------------------+
+ * 24 | Buffer Address 3 [63:0] |
+ * +-----------------------------------------------------+
+ */
+ printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
+ "[buffer 1 63:0 ] "
+ "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
+ "[bi->skb] <-- Ext Pkt Split format\n");
+ /* [Extended] Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 13 12 8 7 4 3 0
+ * +------------------------------------------------------+
+ * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
+ * | Checksum | Ident | | Queue | | Type |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
+ */
+ printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
+ "[vl l0 ee es] "
+ "[ l3 l2 l1 hs] [reserved ] ---------------- "
+ "[bi->skb] <-- Ext Rx Write-Back format\n");
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+ u1 = (struct my_u1 *)rx_desc_ps;
+ staterr =
+ le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+ if (staterr & E1000_RXD_STAT_DD) {
+ /* Descriptor Done */
+ printk(KERN_INFO "RWB[0x%03X] %016llX "
+ "%016llX %016llX %016llX "
+ "---------------- %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ buffer_info->skb);
+ } else {
+ printk(KERN_INFO "R [0x%03X] %016llX "
+ "%016llX %016llX %016llX %016llX %p", i,
+ le64_to_cpu(u1->a),
+ le64_to_cpu(u1->b),
+ le64_to_cpu(u1->c),
+ le64_to_cpu(u1->d),
+ (u64)buffer_info->dma,
+ buffer_info->skb);
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ phys_to_virt(buffer_info->dma),
+ adapter->rx_ps_bsize0, true);
+ }
+
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+ }
+ break;
+ default:
+ case 0:
+ /* Legacy Receive Descriptor Format
+ *
+ * +-----------------------------------------------------+
+ * | Buffer Address [63:0] |
+ * +-----------------------------------------------------+
+ * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
+ * +-----------------------------------------------------+
+ * 63 48 47 40 39 32 31 16 15 0
+ */
+ printk(KERN_INFO "Rl[desc] [address 63:0 ] "
+ "[vl er S cks ln] [bi->dma ] [bi->skb] "
+ "<-- Legacy format\n");
+ for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
+ rx_desc = E1000_RX_DESC(*rx_ring, i);
+ buffer_info = &rx_ring->buffer_info[i];
+ u0 = (struct my_u0 *)rx_desc;
+ printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
+ "%016llX %p",
+ i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
+ (u64)buffer_info->dma, buffer_info->skb);
+ if (i == rx_ring->next_to_use)
+ printk(KERN_CONT " NTU\n");
+ else if (i == rx_ring->next_to_clean)
+ printk(KERN_CONT " NTC\n");
+ else
+ printk(KERN_CONT "\n");
+
+ if (netif_msg_pktdata(adapter))
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, phys_to_virt(buffer_info->dma),
+ adapter->rx_buffer_len, true);
+ }
+ }
+
+exit:
+ return;
+}
+
/**
* e1000_desc_unused - calculate if we have unused descriptors
**/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb;
map_skb:
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
break;
@@ -190,26 +548,23 @@ map_skb:
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, adapter->hw.hw_addr + rx_ring->tail);
+ }
i++;
if (i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, ps_page->dma)) {
+ ps_page->dma = dma_map_page(&pdev->dev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ ps_page->dma)) {
dev_err(&adapter->pdev->dev,
"RX DMA page map failed\n");
adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
buffer_info->skb = skb;
- buffer_info->dma = pci_map_single(pdev, skb->data,
+ buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
dev_err(&pdev->dev, "RX DMA map failed\n");
adapter->rx_dma_failed++;
/* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
+ if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+ }
+
i++;
if (i == rx_ring->count)
i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
no_buffers:
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
-
- if (!(i--))
- i = (rx_ring->count - 1);
-
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- /*
- * Hardware increments by 16 bytes, but packet split
- * descriptors are 32 bytes...so we increment tail
- * twice as much.
- */
- writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
- }
+ rx_ring->next_to_use = i;
}
/**
@@ -366,10 +714,10 @@ check_page:
}
if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
+ buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev,
+ dma_unmap_single(&pdev->dev,
buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
else
- pci_unmap_single(adapter->pdev, buffer_info->dma,
- buffer_info->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
cleaned = (i == eop);
if (cleaned) {
- struct sk_buff *skb = buffer_info->skb;
- unsigned int segs, bytecount;
- segs = skb_shinfo(skb)->gso_segs ?: 1;
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * skb_headlen(skb)) +
- skb->len;
- total_tx_packets += segs;
- total_tx_bytes += bytecount;
+ total_tx_packets += buffer_info->segs;
+ total_tx_bytes += buffer_info->bytecount;
}
e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
cleaned = 1;
cleaned_count++;
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
/* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
* kmap_atomic, so we can't hold the mapping
* very long
*/
- pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
memcpy(skb_tail_pointer(skb), vaddr, l1);
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
- pci_dma_sync_single_for_device(pdev, ps_page->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, ps_page->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
/* remove the CRC */
if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
break;
ps_page = &buffer_info->ps_pages[j];
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
skb_fill_page_desc(skb, j, ps_page->page, 0, length);
ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
cleaned = true;
cleaned_count++;
- pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->dma) {
if (adapter->clean_rx == e1000_clean_rx_irq)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
- pci_unmap_page(pdev, buffer_info->dma,
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
- pci_unmap_single(pdev, buffer_info->dma,
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
- pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
ps_page->dma = 0;
put_page(ps_page->page);
ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
pci_disable_msi(adapter->pdev);
adapter->flags &= ~FLAG_MSI_ENABLED;
}
-
- return;
}
/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/* Don't do anything; this is the system default */
break;
}
-
- return;
}
/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-static void e1000_init_manageability(struct e1000_adapter *adapter)
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 manc, manc2h;
+ u32 manc, manc2h, mdef, i, j;
if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
*/
manc |= E1000_MANC_EN_MNG2HOST;
manc2h = er32(MANC2H);
-#define E1000_MNG2HOST_PORT_623 (1 << 5)
-#define E1000_MNG2HOST_PORT_664 (1 << 6)
- manc2h |= E1000_MNG2HOST_PORT_623;
- manc2h |= E1000_MNG2HOST_PORT_664;
+
+ switch (hw->mac.type) {
+ default:
+ manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+ break;
+ case e1000_82574:
+ case e1000_82583:
+ /*
+ * Check if IPMI pass-through decision filter already exists;
+ * if so, enable it.
+ */
+ for (i = 0, j = 0; i < 8; i++) {
+ mdef = er32(MDEF(i));
+
+ /* Ignore filters with anything other than IPMI ports */
+ if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ continue;
+
+ /* Enable this decision filter in MANC2H */
+ if (mdef)
+ manc2h |= (1 << i);
+
+ j |= mdef;
+ }
+
+ if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+ break;
+
+ /* Create new decision filter in an empty filter */
+ for (i = 0, j = 0; i < 8; i++)
+ if (er32(MDEF(i)) == 0) {
+ ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+ E1000_MDEF_PORT_664));
+ manc2h |= (1 << 1);
+ j++;
+ break;
+ }
+
+ if (!j)
+ e_warn("Unable to create IPMI pass-through filter\n");
+ break;
+ }
+
ew32(MANC2H, manc2h);
ew32(MANC, manc);
}
@@ -2524,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* excessive C-state transition latencies result in
* dropped transactions.
*/
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name, 55);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req, 55);
} else {
- pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_update_request(
+ adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
}
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr;
+ struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
i = 0;
- netdev_for_each_mc_addr(mc_ptr, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN),
- mc_ptr->dmi_addr, ETH_ALEN);
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_set_multi(adapter->netdev);
e1000_restore_vlan(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->high_water = 0x5000;
fc->low_water = 0x3000;
}
+ fc->refresh_time = 0x1000;
} else {
if ((adapter->flags & FLAG_HAS_ERT) &&
(adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
if (mac->ops.init_hw(hw))
e_err("Hardware Error\n");
- /* additional part of the flow-control workaround above */
- if (hw->mac.type == e1000_pchlan)
- ew32(FCRTV_PCH, 0x1000);
-
e1000_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2824,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter)
/* DMA latency requirement to workaround early-receive/jumbo issue */
if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name,
+ adapter->netdev->pm_qos_req =
+ pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* hardware has been reset, we need to reload some things */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
netif_wake_queue(adapter->netdev);
/* fire a link change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
+
return 0;
}
@@ -2887,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter)
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
- if (adapter->flags & FLAG_HAS_ERT)
- pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
- adapter->netdev->name);
+ if (adapter->flags & FLAG_HAS_ERT) {
+ pm_qos_remove_request(
+ adapter->netdev->pm_qos_req);
+ adapter->netdev->pm_qos_req = NULL;
+ }
/*
* TODO: for power management, we could drop the link and
@@ -3083,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
/* disallow open during test */
if (test_bit(__E1000_TESTING, &adapter->state))
return -EBUSY;
+ pm_runtime_get_sync(&pdev->dev);
+
netif_carrier_off(netdev);
/* allocate transmit descriptors */
@@ -3101,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
+ /*
+ * If AMT is enabled, let the firmware know that the network
+ * interface is now open and reset the part to a known state.
+ */
+ if (adapter->flags & FLAG_HAS_AMT) {
+ e1000_get_hw_control(adapter);
+ e1000e_reset(adapter);
+ }
+
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3109,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/*
- * If AMT is enabled, let the firmware know that the network
- * interface is now open
- */
- if (adapter->flags & FLAG_HAS_AMT)
- e1000_get_hw_control(adapter);
-
- /*
* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
@@ -3149,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
netif_start_queue(netdev);
+ adapter->idle_check = true;
+ pm_runtime_put(&pdev->dev);
+
/* fire a link status change interrupt to start the watchdog */
- ew32(ICS, E1000_ICS_LSC);
+ if (adapter->msix_entries)
+ ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+ else
+ ew32(ICS, E1000_ICS_LSC);
return 0;
@@ -3162,6 +3551,7 @@ err_setup_rx:
e1000e_free_tx_resources(adapter);
err_setup_tx:
e1000e_reset(adapter);
+ pm_runtime_put_sync(&pdev->dev);
return err;
}
@@ -3180,11 +3570,17 @@ err_setup_tx:
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
- e1000e_down(adapter);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state)) {
+ e1000e_down(adapter);
+ e1000_free_irq(adapter);
+ }
e1000_power_down_phy(adapter);
- e1000_free_irq(adapter);
e1000e_free_tx_resources(adapter);
e1000e_free_rx_resources(adapter);
@@ -3206,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
if (adapter->flags & FLAG_HAS_AMT)
e1000_release_hw_control(adapter);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
/**
@@ -3550,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
e1000e_enable_receives(adapter);
goto link_up;
}
@@ -3561,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
bool txb2b = 1;
+
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
schedule_work(&adapter->reset_task);
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+ LINK_TIMEOUT);
}
}
@@ -3705,6 +4113,22 @@ link_up:
}
}
+ /* Simple mode for Interrupt Throttle Rate (ITR) */
+ if (adapter->itr_setting == 4) {
+ /*
+ * Symmetric Tx/Rx gets a reduced ITR=2000;
+ * Total asymmetrical Tx or Rx gets ITR=8000;
+ * everyone else is between 2000-8000.
+ */
+ u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+ u32 dif = (adapter->gotc > adapter->gorc ?
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
+ u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+ ew32(ITR, 1000000000 / (itr * 256));
+ }
+
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->msix_entries)
ew32(ICS, adapter->rx_ring->ims_val);
@@ -3879,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
- unsigned int f;
+ unsigned int f, bytecount, segs;
i = tx_ring->next_to_use;
@@ -3890,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_single(pdev, skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_single(&pdev->dev,
+ skb->data + offset,
+ size, DMA_TO_DEVICE);
buffer_info->mapped_as_page = false;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3925,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->length = size;
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
- buffer_info->dma = pci_map_page(pdev, frag->page,
+ buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
offset, size,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
- if (pci_dma_mapping_error(pdev, buffer_info->dma))
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
len -= size;
@@ -3938,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
}
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].segs = segs;
+ tx_ring->buffer_info[i].bytecount = bytecount;
tx_ring->buffer_info[first].next_to_watch = i;
return count;
@@ -4105,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
- unsigned int len = skb->len - skb->data_len;
+ unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
unsigned int mss;
int count = 0;
@@ -4155,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
}
}
@@ -4241,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
e1000e_reinit_locked(adapter);
}
@@ -4475,13 +4908,15 @@ out:
return retval;
}
-static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status;
- u32 wufc = adapter->wol;
+ /* Runtime suspend should only enable wakeup for link changes */
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
int retval = 0;
netif_device_detach(netdev);
@@ -4651,20 +5086,13 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_OPS
+static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
- int retval;
- bool wake;
-
- retval = __e1000_shutdown(pdev, &wake);
- if (!retval)
- e1000_complete_shutdown(pdev, true, wake);
-
- return retval;
+ return !!adapter->tx_ring->buffer_info;
}
-static int e1000_resume(struct pci_dev *pdev)
+static int __e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4677,18 +5105,6 @@ static int e1000_resume(struct pci_dev *pdev)
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
- err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "Cannot enable PCI device from suspend\n");
- return err;
- }
-
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
err = e1000_request_irq(adapter);
@@ -4729,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
e1000e_reset(adapter);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev))
e1000e_up(adapter);
@@ -4746,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
return 0;
}
-#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int e1000_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __e1000_shutdown(pdev, &wake, false);
+ if (!retval)
+ e1000_complete_shutdown(pdev, true, wake);
+
+ return retval;
+}
+
+static int e1000_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter))
+ adapter->idle_check = true;
+
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_RUNTIME
+static int e1000_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (e1000e_pm_ready(adapter)) {
+ bool wake;
+
+ __e1000_shutdown(pdev, &wake, true);
+ }
+
+ return 0;
+}
+
+static int e1000_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ if (adapter->idle_check) {
+ adapter->idle_check = false;
+ if (!e1000e_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC);
+ }
+
+ return -EBUSY;
+}
+
+static int e1000_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ if (!e1000e_pm_ready(adapter))
+ return 0;
+
+ adapter->idle_check = !dev->power.runtime_auto;
+ return __e1000_resume(pdev);
+}
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM_OPS */
static void e1000_shutdown(struct pci_dev *pdev)
{
bool wake = false;
- __e1000_shutdown(pdev, &wake);
+ __e1000_shutdown(pdev, &wake, false);
if (system_state == SYSTEM_POWER_OFF)
e1000_complete_shutdown(pdev, false, wake);
@@ -4826,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
+ pdev->state_saved = true;
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4855,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- e1000_init_manageability(adapter);
+ e1000_init_manageability_pt(adapter);
if (netif_running(netdev)) {
if (e1000e_up(adapter)) {
@@ -4968,16 +5459,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA "
"configuration, aborting\n");
@@ -5008,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
+ netdev->irq = pdev->irq;
+
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
hw = &adapter->hw;
@@ -5228,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_print_device_info(adapter);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ }
+ pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
+
return 0;
err_register:
@@ -5270,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+ pm_runtime_get_sync(&pdev->dev);
/*
* flush_scheduled work may reschedule our watchdog task, so
* explicitly disable watchdog tasks from being rescheduled
*/
- set_bit(__E1000_DOWN, &adapter->state);
+ if (!down)
+ set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -5289,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
+ /* Don't lie to e1000_close() down the road. */
+ if (!down)
+ clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev);
+ if (pci_dev_run_wake(pdev)) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ }
+ pm_runtime_put_noidle(&pdev->dev);
+
/*
* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
@@ -5380,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5390,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+#ifdef CONFIG_PM_OPS
+static const struct dev_pm_ops e1000_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
+ e1000_runtime_resume, e1000_idle)
+};
+#endif
+
/* PCI Device API Driver */
static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = e1000_suspend,
- .resume = e1000_resume,
+#ifdef CONFIG_PM_OPS
+ .driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
@@ -5414,10 +5933,9 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
- printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
- e1000e_driver_name, e1000e_driver_version);
- printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
- e1000e_driver_name);
+ pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+ e1000e_driver_version);
+ pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae..a150e48a117 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
{ /* Transmit Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Transmit Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
.err = "using default of "
@@ -286,7 +286,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Interrupt Delay */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Receive Absolute Interrupt Delay */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Throttling Rate */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
.err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->itr_setting = adapter->itr;
adapter->itr = 20000;
break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) "
+ "mode\n", opt.name);
+ adapter->itr_setting = 4;
+ break;
default:
/*
* Save the setting, because the dynamic bits
@@ -381,7 +386,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Interrupt Mode */
- struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
.err = "defaulting to 2 (MSI-X)",
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Smart Power Down */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
.err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* CRC Stripping */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
.err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Kumeran Lock Loss Workaround */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
}
}
{ /* Write-protect NVM */
- const struct e1000_option opt = {
+ static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
.err = "defaulting to Enabled",
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 7f3ceb9dad6..b4ac82d51b2 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -3116,9 +3116,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
* e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
* @hw: pointer to the HW structure
*
- * Calls the PHY setup function to force speed and duplex. Clears the
- * auto-crossover to force MDI manually. Waits for link and returns
- * successful if link up is successful, else -E1000_ERR_PHY (-2).
+ * Calls the PHY setup function to force speed and duplex.
**/
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
{
@@ -3137,23 +3135,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
if (ret_val)
goto out;
- /*
- * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
- if (ret_val)
- goto out;
-
- phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
- phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
-
- ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
- if (ret_val)
- goto out;
-
- e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
-
udelay(1);
if (phy->autoneg_wait_to_complete) {