summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c385
1 files changed, 229 insertions, 156 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 97e71a4fe8e..a373ccb308d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
- Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
@@ -22,51 +22,13 @@
Contact Information:
Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "e1000.h"
-/* Change Log
- * 7.0.33 3-Feb-2006
- * o Added another fix for the pass false carrier bit
- * 7.0.32 24-Jan-2006
- * o Need to rebuild with noew version number for the pass false carrier
- * fix in e1000_hw.c
- * 7.0.30 18-Jan-2006
- * o fixup for tso workaround to disable it for pci-x
- * o fix mem leak on 82542
- * o fixes for 10 Mb/s connections and incorrect stats
- * 7.0.28 01/06/2006
- * o hardware workaround to only set "speed mode" bit for 1G link.
- * 7.0.26 12/23/2005
- * o wake on lan support modified for device ID 10B5
- * o fix dhcp + vlan issue not making it to the iAMT firmware
- * 7.0.24 12/9/2005
- * o New hardware support for the Gigabit NIC embedded in the south bridge
- * o Fixes to the recycling logic (skb->tail) from IBM LTC
- * 6.3.9 12/16/2005
- * o incorporate fix for recycled skbs from IBM LTC
- * 6.3.7 11/18/2005
- * o Honor eeprom setting for enabling/disabling Wake On Lan
- * 6.3.5 11/17/2005
- * o Fix memory leak in rx ring handling for PCI Express adapters
- * 6.3.4 11/8/05
- * o Patch from Jesper Juhl to remove redundant NULL checks for kfree
- * 6.3.2 9/20/05
- * o Render logic that sets/resets DRV_LOAD as inline functions to
- * avoid code replication. If f/w is AMT then set DRV_LOAD only when
- * network interface is open.
- * o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
- * o Adjust PBA partioning for Jumbo frames using MTU size and not
- * rx_buffer_len
- * 6.3.1 9/19/05
- * o Use adapter->tx_timeout_factor in Tx Hung Detect logic
- * (e1000_clean_tx_irq)
- * o Support for 8086:10B5 device (Quad Port)
- */
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#ifndef CONFIG_E1000_NAPI
@@ -74,9 +36,9 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
+#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
-static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
/* e1000_pci_tbl - PCI Device ID Table
*
@@ -208,8 +170,8 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
static void e1000_tx_timeout(struct net_device *dev);
static void e1000_reset_task(struct net_device *dev);
static void e1000_smartspeed(struct e1000_adapter *adapter);
-static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
- struct sk_buff *skb);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+ struct sk_buff *skb);
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -227,6 +189,16 @@ static void e1000_shutdown(struct pci_dev *pdev);
static void e1000_netpoll (struct net_device *netdev);
#endif
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+ .error_detected = e1000_io_error_detected,
+ .slot_reset = e1000_io_slot_reset,
+ .resume = e1000_io_resume,
+};
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
@@ -238,7 +210,8 @@ static struct pci_driver e1000_driver = {
.suspend = e1000_suspend,
.resume = e1000_resume,
#endif
- .shutdown = e1000_shutdown
+ .shutdown = e1000_shutdown,
+ .err_handler = &e1000_err_handler
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -293,7 +266,7 @@ module_exit(e1000_exit_module);
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
@@ -307,7 +280,7 @@ e1000_irq_disable(struct e1000_adapter *adapter)
* @adapter: board private structure
**/
-static inline void
+static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
@@ -348,10 +321,10 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the netowrk i/f is closed.
- *
+ *
**/
-static inline void
+static void
e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -361,6 +334,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
@@ -379,13 +353,13 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
* @adapter: address of board private structure
*
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that
- * the driver is loaded. For AMT version (only with 82573)
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the netowrk i/f is open.
- *
+ *
**/
-static inline void
+static void
e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
@@ -394,6 +368,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
+ case e1000_80003es2lan:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
@@ -421,7 +396,7 @@ e1000_up(struct e1000_adapter *adapter)
uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
if (mii_reg & MII_CR_POWER_DOWN)
- e1000_phy_reset(&adapter->hw);
+ e1000_phy_hw_reset(&adapter->hw);
}
e1000_set_multi(netdev);
@@ -711,8 +686,8 @@ e1000_probe(struct pci_dev *pdev,
DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
/* if ksp3, indicate if it's port a being setup */
- if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
- e1000_ksp3_port_a == 0)
+ if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+ e1000_ksp3_port_a == 0)
adapter->ksp3_port_a = 1;
e1000_ksp3_port_a++;
/* Reset for multiple KP3 adapters */
@@ -740,9 +715,9 @@ e1000_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- /* hard_start_xmit is safe against parallel locking */
- netdev->features |= NETIF_F_LLTX;
-
+ /* hard_start_xmit is safe against parallel locking */
+ netdev->features |= NETIF_F_LLTX;
+
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* before reading the EEPROM, reset the controller to
@@ -972,8 +947,8 @@ e1000_sw_init(struct e1000_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+ adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
hw->max_frame_size = netdev->mtu +
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1181,7 +1156,7 @@ e1000_close(struct net_device *netdev)
* @start: address of beginning of memory
* @len: length of memory
**/
-static inline boolean_t
+static boolean_t
e1000_check_64k_bound(struct e1000_adapter *adapter,
void *start, unsigned long len)
{
@@ -1599,14 +1574,21 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
rctl |= E1000_RCTL_LPE;
/* Setup buffer sizes */
- if (adapter->hw.mac_type >= e1000_82571) {
- /* We can now specify buffers in 1K increments.
- * BSIZE and BSEX are ignored in this case. */
- rctl |= adapter->rx_buffer_len << 0x11;
- } else {
- rctl &= ~E1000_RCTL_SZ_4096;
- rctl |= E1000_RCTL_BSEX;
- switch (adapter->rx_buffer_len) {
+ rctl &= ~E1000_RCTL_SZ_4096;
+ rctl |= E1000_RCTL_BSEX;
+ switch (adapter->rx_buffer_len) {
+ case E1000_RXBUFFER_256:
+ rctl |= E1000_RCTL_SZ_256;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_512:
+ rctl |= E1000_RCTL_SZ_512;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
+ case E1000_RXBUFFER_1024:
+ rctl |= E1000_RCTL_SZ_1024;
+ rctl &= ~E1000_RCTL_BSEX;
+ break;
case E1000_RXBUFFER_2048:
default:
rctl |= E1000_RCTL_SZ_2048;
@@ -1621,7 +1603,6 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
case E1000_RXBUFFER_16384:
rctl |= E1000_RCTL_SZ_16384;
break;
- }
}
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -1715,7 +1696,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82571) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
/* Reset delay timers after every interrupt */
- ctrl_ext |= E1000_CTRL_EXT_CANC;
+ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
/* Auto-Mask interrupts upon ICR read. */
ctrl_ext |= E1000_CTRL_EXT_IAME;
@@ -1807,7 +1788,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
}
-static inline void
+static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
@@ -2247,6 +2228,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
if (link) {
if (!netif_carrier_ok(netdev)) {
+ boolean_t txb2b = 1;
e1000_get_speed_and_duplex(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@@ -2260,23 +2242,22 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
* and adjust the timeout factor */
netdev->tx_queue_len = adapter->tx_queue_len;
adapter->tx_timeout_factor = 1;
- adapter->txb2b = 1;
switch (adapter->link_speed) {
case SPEED_10:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 8;
break;
case SPEED_100:
- adapter->txb2b = 0;
+ txb2b = 0;
netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
- if ((adapter->hw.mac_type == e1000_82571 ||
+ if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
- adapter->txb2b == 0) {
+ txb2b == 0) {
#define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
@@ -2400,7 +2381,7 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
-static inline int
+static int
e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2422,7 +2403,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
mss = skb_shinfo(skb)->tso_size;
- if (skb->protocol == ntohs(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
skb->nh.iph->tot_len = 0;
skb->nh.iph->check = 0;
skb->h.th->check =
@@ -2480,7 +2461,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return FALSE;
}
-static inline boolean_t
+static boolean_t
e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
@@ -2516,7 +2497,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_MAX_TXD_PWR 12
#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
-static inline int
+static int
e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
unsigned int nr_frags, unsigned int mss)
@@ -2625,7 +2606,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
return count;
}
-static inline void
+static void
e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
int tx_flags, int count)
{
@@ -2689,7 +2670,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
-static inline int
+static int
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
{
uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
@@ -2716,7 +2697,7 @@ no_fifo_stall_required:
}
#define MINIMUM_DHCP_PACKET_SIZE 282
-static inline int
+static int
e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2764,7 +2745,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int nr_frags = 0;
unsigned int mss = 0;
int count = 0;
- int tso;
+ int tso;
unsigned int f;
len -= skb->data_len;
@@ -2777,7 +2758,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#ifdef NETIF_F_TSO
mss = skb_shinfo(skb)->tso_size;
- /* The controller does a simple calculation to
+ /* The controller does a simple calculation to
* make sure there is enough room in the FIFO before
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
@@ -2800,7 +2781,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
case e1000_82573:
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- printk(KERN_ERR
+ printk(KERN_ERR
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2901,7 +2882,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must. */
- if (likely(skb->protocol == ntohs(ETH_P_IP)))
+ if (likely(skb->protocol == htons(ETH_P_IP)))
tx_flags |= E1000_TX_FLAGS_IPV4;
e1000_tx_queue(adapter, tx_ring, tx_flags,
@@ -2982,8 +2963,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* Adapter-specific max frame size limits. */
switch (adapter->hw.mac_type) {
- case e1000_82542_rev2_0:
- case e1000_82542_rev2_1:
+ case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
return -EINVAL;
@@ -3017,27 +2997,32 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
break;
}
-
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
- adapter->rx_buffer_len = max_frame;
- E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
- } else {
- if(unlikely((adapter->hw.mac_type < e1000_82543) &&
- (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
- DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
- "on 82542\n");
- return -EINVAL;
- } else {
- if(max_frame <= E1000_RXBUFFER_2048)
- adapter->rx_buffer_len = E1000_RXBUFFER_2048;
- else if(max_frame <= E1000_RXBUFFER_4096)
- adapter->rx_buffer_len = E1000_RXBUFFER_4096;
- else if(max_frame <= E1000_RXBUFFER_8192)
- adapter->rx_buffer_len = E1000_RXBUFFER_8192;
- else if(max_frame <= E1000_RXBUFFER_16384)
- adapter->rx_buffer_len = E1000_RXBUFFER_16384;
- }
- }
+ /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ * means we reserve 2 more, this pushes us to allocate from the next
+ * larger slab size
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
+
+ if (max_frame <= E1000_RXBUFFER_256)
+ adapter->rx_buffer_len = E1000_RXBUFFER_256;
+ else if (max_frame <= E1000_RXBUFFER_512)
+ adapter->rx_buffer_len = E1000_RXBUFFER_512;
+ else if (max_frame <= E1000_RXBUFFER_1024)
+ adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+ else if (max_frame <= E1000_RXBUFFER_2048)
+ adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+ else if (max_frame <= E1000_RXBUFFER_4096)
+ adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+ else if (max_frame <= E1000_RXBUFFER_8192)
+ adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+ else if (max_frame <= E1000_RXBUFFER_16384)
+ adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+ /* adjust allocation if LPE protects us, and we aren't using SBP */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+ if (!adapter->hw.tbi_compatibility_on &&
+ ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+ adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
netdev->mtu = new_mtu;
@@ -3060,11 +3045,21 @@ void
e1000_update_stats(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
unsigned long flags;
uint16_t phy_tmp;
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
+ /*
+ * Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+ return;
+
spin_lock_irqsave(&adapter->stats_lock, flags);
/* these counters are modified from e1000_adjust_tbi_stats,
@@ -3165,7 +3160,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
adapter->stats.cexterr;
- adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.ruc +
adapter->stats.roc;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -3391,13 +3385,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
tx_ring->next_to_clean = i;
- spin_lock(&tx_ring->tx_lock);
-
+#define TX_WAKE_THRESHOLD 32
if (unlikely(cleaned && netif_queue_stopped(netdev) &&
- netif_carrier_ok(netdev)))
- netif_wake_queue(netdev);
-
- spin_unlock(&tx_ring->tx_lock);
+ netif_carrier_ok(netdev))) {
+ spin_lock(&tx_ring->tx_lock);
+ if (netif_queue_stopped(netdev) &&
+ (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+ netif_wake_queue(netdev);
+ spin_unlock(&tx_ring->tx_lock);
+ }
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
@@ -3445,7 +3441,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
-static inline void
+static void
e1000_rx_checksum(struct e1000_adapter *adapter,
uint32_t status_err, uint32_t csum,
struct sk_buff *skb)
@@ -3567,7 +3563,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
flags);
length--;
} else {
- dev_kfree_skb_irq(skb);
+ /* recycle */
+ buffer_info->skb = skb;
goto next_desc;
}
}
@@ -3675,6 +3672,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+ buffer_info = &rx_ring->buffer_info[i];
while (staterr & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i];
@@ -3733,9 +3731,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
/* page alloc/put takes too long and effects small packet
* throughput, so unsplit small packets and save the alloc/put*/
- if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
+ if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
- /* there is no documentation about how to call
+ /* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long */
pci_dma_sync_single_for_cpu(pdev,
@@ -4155,7 +4153,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
- if (adapter->hw.phy_type == e1000_media_type_copper) {
+ if (adapter->hw.media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
if (mii_reg & MII_CR_POWER_DOWN)
@@ -4514,21 +4512,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
} else {
E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
}
if (adapter->hw.mac_type >= e1000_82540 &&
@@ -4537,13 +4527,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if (manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc);
- retval = pci_enable_wake(pdev, PCI_D3hot, 1);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 1);
- if (retval)
- DPRINTK(PROBE, ERR,
- "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1);
}
}
@@ -4553,9 +4538,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
pci_disable_device(pdev);
- retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
@@ -4566,22 +4549,15 @@ e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- int retval;
uint32_t manc, ret_val;
- retval = pci_set_power_state(pdev, PCI_D0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error in setting power state\n");
+ pci_set_power_state(pdev, PCI_D0);
e1000_pci_restore_state(adapter);
ret_val = pci_enable_device(pdev);
pci_set_master(pdev);
- retval = pci_enable_wake(pdev, PCI_D3hot, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
- retval = pci_enable_wake(pdev, PCI_D3cold, 0);
- if (retval)
- DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0);
@@ -4635,4 +4611,101 @@ e1000_netpoll(struct net_device *netdev)
}
#endif
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci conneection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ e1000_down(adapter);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+
+ if (pci_enable_device(pdev)) {
+ printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ pci_set_master(pdev);
+
+ pci_enable_wake(pdev, 3, 0);
+ pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
+
+ /* Perform card reset only on one instance of the card */
+ if (PCI_FUNC (pdev->devfn) != 0)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ e1000_reset(adapter);
+ E1000_WRITE_REG(&adapter->hw, WUS, ~0);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct e1000_adapter *adapter = netdev->priv;
+ uint32_t manc, swsm;
+
+ if (netif_running(netdev)) {
+ if (e1000_up(adapter)) {
+ printk("e1000: can't bring device back up after reset\n");
+ return;
+ }
+ }
+
+ netif_device_attach(netdev);
+
+ if (adapter->hw.mac_type >= e1000_82540 &&
+ adapter->hw.media_type == e1000_media_type_copper) {
+ manc = E1000_READ_REG(&adapter->hw, MANC);
+ manc &= ~(E1000_MANC_ARP_EN);
+ E1000_WRITE_REG(&adapter->hw, MANC, manc);
+ }
+
+ switch (adapter->hw.mac_type) {
+ case e1000_82573:
+ swsm = E1000_READ_REG(&adapter->hw, SWSM);
+ E1000_WRITE_REG(&adapter->hw, SWSM,
+ swsm | E1000_SWSM_DRV_LOAD);
+ break;
+ default:
+ break;
+ }
+
+ if (netif_running(netdev))
+ mod_timer(&adapter->watchdog_timer, jiffies);
+}
+
/* e1000_main.c */