summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 14:37:56 +0000
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 14:37:56 +0000
commit4c1ac1b49122b805adfa4efc620592f68dccf5db (patch)
tree87557f4bc2fd4fe65b7570489c2f610c45c0adcd /drivers/net/e1000
parentc4028958b6ecad064b1a6303a6a5906d4fe48d73 (diff)
parentd916faace3efc0bf19fe9a615a1ab8fa1a24cd93 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/infiniband/core/iwcm.c drivers/net/chelsio/cxgb2.c drivers/net/wireless/bcm43xx/bcm43xx_main.c drivers/net/wireless/prism54/islpci_eth.c drivers/usb/core/hub.h drivers/usb/input/hid-core.c net/core/netpoll.c Fix up merge failures with Linus's head and fix new compilation failures. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r--drivers/net/e1000/e1000.h17
-rw-r--r--drivers/net/e1000/e1000_ethtool.c36
-rw-r--r--drivers/net/e1000/e1000_hw.c139
-rw-r--r--drivers/net/e1000/e1000_hw.h90
-rw-r--r--drivers/net/e1000/e1000_main.c490
-rw-r--r--drivers/net/e1000/e1000_osdep.h9
-rw-r--r--drivers/net/e1000/e1000_param.c98
7 files changed, 583 insertions, 296 deletions
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 7ecce438d25..f091042b146 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -59,6 +59,9 @@
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/ip.h>
+#ifdef NETIF_F_TSO6
+#include <linux/ipv6.h>
+#endif
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/pkt_sched.h>
@@ -254,6 +257,17 @@ struct e1000_adapter {
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem;
+ unsigned int detect_link;
+ unsigned int total_tx_bytes;
+ unsigned int total_tx_packets;
+ unsigned int total_rx_bytes;
+ unsigned int total_rx_packets;
+ /* Interrupt Throttle Rate */
+ uint32_t itr;
+ uint32_t itr_setting;
+ uint16_t tx_itr;
+ uint16_t rx_itr;
+
struct work_struct reset_task;
uint8_t fc_autoneg;
@@ -262,6 +276,7 @@ struct e1000_adapter {
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
+ unsigned int restart_queue;
unsigned long tx_queue_len;
uint32_t txd_cmd;
uint32_t tx_int_delay;
@@ -310,8 +325,6 @@ struct e1000_adapter {
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
- /* Interrupt Throttle Rate */
- uint32_t itr;
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c564adbd669..da459f7177c 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -85,6 +85,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+ { "tx_restart_queue", E1000_STAT(restart_queue) },
{ "rx_long_length_errors", E1000_STAT(stats.roc) },
{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
@@ -133,9 +134,7 @@ e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (hw->autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
-
/* the e1000 autoneg seems to match ethtool nicely */
-
ecmd->advertising |= hw->autoneg_advertised;
}
@@ -285,7 +284,7 @@ e1000_set_pauseparam(struct net_device *netdev,
e1000_reset(adapter);
} else
retval = ((hw->media_type == e1000_media_type_fiber) ?
- e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+ e1000_setup_link(hw) : e1000_force_mac_fc(hw));
clear_bit(__E1000_RESETTING, &adapter->flags);
return retval;
@@ -350,6 +349,13 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
else
netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ if (data)
+ netdev->features |= NETIF_F_TSO6;
+ else
+ netdev->features &= ~NETIF_F_TSO6;
+#endif
+
DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = TRUE;
return 0;
@@ -774,7 +780,7 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
- switch (adapter->hw.mac_type) {
+ switch (adapter->hw.mac_type) {
/* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
@@ -802,12 +808,14 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* restore previous status */
E1000_WRITE_REG(&adapter->hw, STATUS, before);
+
if (adapter->hw.mac_type != e1000_ich8lan) {
REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
}
+
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
@@ -820,8 +828,9 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
+
before = (adapter->hw.mac_type == e1000_ich8lan ?
- 0x06C3B33E : 0x06DFB3FE);
+ 0x06C3B33E : 0x06DFB3FE);
REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
@@ -834,10 +843,10 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
value = (adapter->hw.mac_type == e1000_ich8lan ?
- E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
+ E1000_RAR_ENTRIES_ICH8LAN : E1000_RAR_ENTRIES);
for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
- 0xFFFFFFFF);
+ 0xFFFFFFFF);
}
} else {
@@ -883,8 +892,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
}
static irqreturn_t
-e1000_test_intr(int irq,
- void *data)
+e1000_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -905,11 +913,11 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* NOTE: we don't test MSI interrupts here, yet */
/* Hook up test interrupt handler just for this test */
- if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev))
+ if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+ netdev))
shared_int = FALSE;
else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -925,6 +933,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
if (adapter->hw.mac_type == e1000_ich8lan && i == 8)
continue;
+
/* Interrupt to test */
mask = 1 << i;
@@ -1674,7 +1683,7 @@ e1000_diag_test(struct net_device *netdev,
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Offline tests aren't run; pass by default */
+ /* Online tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
@@ -1717,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
retval = 0;
break;
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 65077f39da6..3655d902b0b 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -385,6 +385,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
hw->mac_type = e1000_82571;
break;
case E1000_DEV_ID_82572EI_COPPER:
@@ -408,6 +409,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_ICH8_IGP_AMT:
case E1000_DEV_ID_ICH8_IGP_C:
case E1000_DEV_ID_ICH8_IFE:
+ case E1000_DEV_ID_ICH8_IFE_GT:
+ case E1000_DEV_ID_ICH8_IFE_G:
case E1000_DEV_ID_ICH8_IGP_M:
hw->mac_type = e1000_ich8lan;
break;
@@ -2367,6 +2370,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Need to reset the PHY or these changes will be ignored */
mii_ctrl_reg |= MII_CR_RESET;
+
/* Disable MDI-X support for 10/100 */
} else if (hw->phy_type == e1000_phy_ife) {
ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
@@ -2379,6 +2383,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
if (ret_val)
return ret_val;
+
} else {
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed or duplex are forced.
@@ -3868,7 +3873,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*
-* Sets bit 15 of the MII Control regiser
+* Sets bit 15 of the MII Control register
******************************************************************************/
int32_t
e1000_phy_reset(struct e1000_hw *hw)
@@ -3940,14 +3945,15 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
- /* Write VR power-down enable */
+ /* Write VR power-down enable - bits 9:8 should be 10b */
e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
- IGP3_VR_CTRL_MODE_SHUT);
+ phy_data |= (1 << 9);
+ phy_data &= ~(1 << 8);
+ e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data);
/* Read it back and test */
e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
- if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+ if (((phy_data & IGP3_VR_CTRL_MODE_MASK) == IGP3_VR_CTRL_MODE_SHUT) || retry)
break;
/* Issue PHY reset and repeat at most one more time */
@@ -4549,7 +4555,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
case e1000_ich8lan:
{
int32_t i = 0;
- uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+ uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
eeprom->type = e1000_eeprom_ich8;
eeprom->use_eerd = FALSE;
@@ -4565,12 +4571,14 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
}
}
- hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
- ICH8_FLASH_SECTOR_SIZE;
+ hw->flash_base_addr = (flash_size & ICH_GFPREG_BASE_MASK) *
+ ICH_FLASH_SECTOR_SIZE;
+
+ hw->flash_bank_size = ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
+ hw->flash_bank_size -= (flash_size & ICH_GFPREG_BASE_MASK);
+
+ hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
- hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
- hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
- hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
hw->flash_bank_size /= 2 * sizeof(uint16_t);
break;
@@ -5620,8 +5628,8 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
* signature is valid. We want to do this after the write
* has completed so that we don't mark the segment valid
* while the write is still in progress */
- if (i == E1000_ICH8_NVM_SIG_WORD)
- high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+ if (i == E1000_ICH_NVM_SIG_WORD)
+ high_byte = E1000_ICH_NVM_SIG_MASK | high_byte;
error = e1000_verify_write_ich8_byte(hw,
(i << 1) + new_bank_offset + 1, high_byte);
@@ -5643,18 +5651,18 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
* erase as well since these bits are 11 to start with
* and we need to change bit 14 to 0b */
e1000_read_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
&high_byte);
high_byte &= 0xBF;
error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + new_bank_offset, high_byte);
/* And invalidate the previously valid segment by setting
* its signature word (0x13) high_byte to 0b. This can be
* done without an erase because flash erase sets all bits
* to 1's. We can write 1's to 0's without an erase */
if (error == E1000_SUCCESS) {
error = e1000_verify_write_ich8_byte(hw,
- E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
+ E1000_ICH_NVM_SIG_WORD * 2 + 1 + old_bank_offset, 0);
}
/* Clear the now not used entry in the cache */
@@ -5841,6 +5849,7 @@ e1000_mta_set(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & 0x7F;
if (hw->mac_type == e1000_ich8lan)
hash_reg &= 0x1F;
+
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
@@ -6026,6 +6035,7 @@ e1000_id_led_init(struct e1000_hw * hw)
else
eeprom_data = ID_LED_DEFAULT;
}
+
for (i = 0; i < 4; i++) {
temp = (eeprom_data >> (i << 2)) & led_mask;
switch (temp) {
@@ -8486,7 +8496,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
DEBUGFUNC("e1000_ich8_cycle_init");
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
/* May be check the Flash Des Valid bit in Hw status */
if (hsfsts.hsf_status.fldesvalid == 0) {
@@ -8499,7 +8509,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress bit to check
* against, in order to start a new cycle or FDONE bit should be changed
@@ -8514,13 +8524,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
/* There is no cycle running at present, so we can start a cycle */
/* Begin by setting Flash Cycle Done. */
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
error = E1000_SUCCESS;
} else {
/* otherwise poll for sometime so the current cycle has a chance
* to end before giving up. */
- for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcinprog == 0) {
error = E1000_SUCCESS;
break;
@@ -8531,7 +8541,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
/* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done. */
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
DEBUGOUT("Flash controller busy, cannot get access");
}
@@ -8553,13 +8563,13 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
uint32_t i = 0;
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcdone == 1)
break;
udelay(1);
@@ -8593,10 +8603,10 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
DEBUGFUNC("e1000_read_ich8_data");
if (size < 1 || size > 2 || data == 0x0 ||
- index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
return error;
- flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
hw->flash_base_addr;
do {
@@ -8606,25 +8616,25 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
if (error != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of index into Flash Linear address field in
* Flash Address */
/* TODO: TBD maybe check the index against the size of flash */
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it and try the whole
* sequence a few more times, else read in (shift in) the Flash Data0,
* the order is least significant byte first msb to lsb */
if (error == E1000_SUCCESS) {
- flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+ flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
if (size == 1) {
*data = (uint8_t)(flash_data & 0x000000FF);
} else if (size == 2) {
@@ -8634,9 +8644,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
} else {
/* If we've gotten here, then things are probably completely hosed,
* but if the error condition is detected, it won't hurt to give
- * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* Repeat for some time before giving up. */
continue;
@@ -8645,7 +8655,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
break;
}
}
- } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
return error;
}
@@ -8672,10 +8682,10 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
DEBUGFUNC("e1000_write_ich8_data");
if (size < 1 || size > 2 || data > size * 0xff ||
- index > ICH8_FLASH_LINEAR_ADDR_MASK)
+ index > ICH_FLASH_LINEAR_ADDR_MASK)
return error;
- flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+ flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
hw->flash_base_addr;
do {
@@ -8685,34 +8695,34 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
if (error != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size -1;
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of index into Flash Linear address field in
* Flash Address */
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
if (size == 1)
flash_data = (uint32_t)data & 0x00FF;
else
flash_data = (uint32_t)data;
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
/* check if FCERR is set to 1 , if set to 1, clear it and try the whole
* sequence a few more times else done */
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_COMMAND_TIMEOUT);
if (error == E1000_SUCCESS) {
break;
} else {
/* If we're here, then things are most likely completely hosed,
* but if the error condition is detected, it won't hurt to give
- * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+ * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
*/
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* Repeat for some time before giving up. */
continue;
@@ -8721,7 +8731,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
break;
}
}
- } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
return error;
}
@@ -8840,7 +8850,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
int32_t j = 0;
int32_t error_flag = 0;
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
/* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
/* 00: The Hw sector is 256 bytes, hence we need to erase 16
@@ -8853,19 +8863,14 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
* 11: The Hw sector size is 64K bytes */
if (hsfsts.hsf_status.berasesz == 0x0) {
/* Hw sector size 256 */
- sub_sector_size = ICH8_FLASH_SEG_SIZE_256;
- bank_size = ICH8_FLASH_SECTOR_SIZE;
- iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+ sub_sector_size = ICH_FLASH_SEG_SIZE_256;
+ bank_size = ICH_FLASH_SECTOR_SIZE;
+ iteration = ICH_FLASH_SECTOR_SIZE / ICH_FLASH_SEG_SIZE_256;
} else if (hsfsts.hsf_status.berasesz == 0x1) {
- bank_size = ICH8_FLASH_SEG_SIZE_4K;
- iteration = 1;
- } else if (hw->mac_type != e1000_ich8lan &&
- hsfsts.hsf_status.berasesz == 0x2) {
- /* 8K erase size invalid for ICH8 - added in for ICH9 */
- bank_size = ICH9_FLASH_SEG_SIZE_8K;
+ bank_size = ICH_FLASH_SEG_SIZE_4K;
iteration = 1;
} else if (hsfsts.hsf_status.berasesz == 0x3) {
- bank_size = ICH8_FLASH_SEG_SIZE_64K;
+ bank_size = ICH_FLASH_SEG_SIZE_64K;
iteration = 1;
} else {
return error;
@@ -8883,9 +8888,9 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
/* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
* Control */
- hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
- hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
- E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+ hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+ E1000_WRITE_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of an index within the block into Flash
* Linear address field in Flash Address. This probably needs to
@@ -8893,17 +8898,17 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
* the software bank size (4, 8 or 64 KBytes) */
flash_linear_address = bank * bank_size + j * sub_sector_size;
flash_linear_address += hw->flash_base_addr;
- flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+ flash_linear_address &= ICH_FLASH_LINEAR_ADDR_MASK;
- E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+ E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
- error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_ERASE_TIMEOUT);
+ error = e1000_ich8_flash_cycle(hw, ICH_FLASH_ERASE_TIMEOUT);
/* Check if FCERR is set to 1. If 1, clear it and try the whole
* sequence a few more times else Done */
if (error == E1000_SUCCESS) {
break;
} else {
- hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+ hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1) {
/* repeat for some time before giving up */
continue;
@@ -8912,7 +8917,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
break;
}
}
- } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+ } while ((count < ICH_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
if (error_flag == 1)
break;
}
@@ -9013,5 +9018,3 @@ e1000_init_lcd_from_nvm(struct e1000_hw *hw)
return E1000_SUCCESS;
}
-
-
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 449a60303e0..3321fb13bfa 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -128,11 +128,13 @@ typedef enum {
/* PCI bus widths */
typedef enum {
e1000_bus_width_unknown = 0,
+ /* These PCIe values should literally match the possible return values
+ * from config space */
+ e1000_bus_width_pciex_1 = 1,
+ e1000_bus_width_pciex_2 = 2,
+ e1000_bus_width_pciex_4 = 4,
e1000_bus_width_32,
e1000_bus_width_64,
- e1000_bus_width_pciex_1,
- e1000_bus_width_pciex_2,
- e1000_bus_width_pciex_4,
e1000_bus_width_reserved
} e1000_bus_width;
@@ -326,6 +328,7 @@ int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+
void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
/* EEPROM Functions */
@@ -390,7 +393,6 @@ int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
-
int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
@@ -473,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82571EB_FIBER 0x105F
#define E1000_DEV_ID_82571EB_SERDES 0x1060
#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC
#define E1000_DEV_ID_82572EI_COPPER 0x107D
#define E1000_DEV_ID_82572EI_FIBER 0x107E
#define E1000_DEV_ID_82572EI_SERDES 0x107F
@@ -490,6 +493,8 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
#define E1000_DEV_ID_ICH8_IGP_C 0x104B
#define E1000_DEV_ID_ICH8_IFE 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
#define E1000_DEV_ID_ICH8_IGP_M 0x104D
@@ -576,6 +581,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
* E1000_RAR_ENTRIES - 1 multicast addresses.
*/
#define E1000_RAR_ENTRIES 15
+
#define E1000_RAR_ENTRIES_ICH8LAN 6
#define MIN_NUMBER_OF_DESCRIPTORS 8
@@ -1335,9 +1341,9 @@ struct e1000_hw_stats {
uint64_t gotch;
uint64_t rnbc;
uint64_t ruc;
+ uint64_t rfc;
uint64_t roc;
uint64_t rlerrc;
- uint64_t rfc;
uint64_t rjc;
uint64_t mgprc;
uint64_t mgpdc;
@@ -1577,8 +1583,8 @@ struct e1000_hw {
#define E1000_HICR_FW_RESET 0xC0
#define E1000_SHADOW_RAM_WORDS 2048
-#define E1000_ICH8_NVM_SIG_WORD 0x13
-#define E1000_ICH8_NVM_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC0
/* EEPROM Read */
#define E1000_EERD_START 0x00000001 /* Start Read */
@@ -3172,6 +3178,7 @@ struct e1000_host_command_info {
#define IGP3_VR_CTRL \
PHY_REG(776, 18) /* Voltage regulator control register */
#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */
+#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */
#define IGP3_CAPABILITY \
PHY_REG(776, 19) /* IGP3 Capability Register */
@@ -3256,41 +3263,40 @@ struct e1000_host_command_info {
#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-#define ICH8_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
-#define ICH8_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
-#define ICH8_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
-#define ICH8_FLASH_SEG_SIZE_256 256
-#define ICH8_FLASH_SEG_SIZE_4K 4096
-#define ICH9_FLASH_SEG_SIZE_8K 8192
-#define ICH8_FLASH_SEG_SIZE_64K 65536
-
-#define ICH8_CYCLE_READ 0x0
-#define ICH8_CYCLE_RESERVED 0x1
-#define ICH8_CYCLE_WRITE 0x2
-#define ICH8_CYCLE_ERASE 0x3
-
-#define ICH8_FLASH_GFPREG 0x0000
-#define ICH8_FLASH_HSFSTS 0x0004
-#define ICH8_FLASH_HSFCTL 0x0006
-#define ICH8_FLASH_FADDR 0x0008
-#define ICH8_FLASH_FDATA0 0x0010
-#define ICH8_FLASH_FRACC 0x0050
-#define ICH8_FLASH_FREG0 0x0054
-#define ICH8_FLASH_FREG1 0x0058
-#define ICH8_FLASH_FREG2 0x005C
-#define ICH8_FLASH_FREG3 0x0060
-#define ICH8_FLASH_FPR0 0x0074
-#define ICH8_FLASH_FPR1 0x0078
-#define ICH8_FLASH_SSFSTS 0x0090
-#define ICH8_FLASH_SSFCTL 0x0092
-#define ICH8_FLASH_PREOP 0x0094
-#define ICH8_FLASH_OPTYPE 0x0096
-#define ICH8_FLASH_OPMENU 0x0098
-
-#define ICH8_FLASH_REG_MAPSIZE 0x00A0
-#define ICH8_FLASH_SECTOR_SIZE 4096
-#define ICH8_GFPREG_BASE_MASK 0x1FFF
-#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */
+#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define ICH_CYCLE_READ 0x0
+#define ICH_CYCLE_RESERVED 0x1
+#define ICH_CYCLE_WRITE 0x2
+#define ICH_CYCLE_ERASE 0x3
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_FRACC 0x0050
+#define ICH_FLASH_FREG0 0x0054
+#define ICH_FLASH_FREG1 0x0058
+#define ICH_FLASH_FREG2 0x005C
+#define ICH_FLASH_FREG3 0x0060
+#define ICH_FLASH_FPR0 0x0074
+#define ICH_FLASH_FPR1 0x0078
+#define ICH_FLASH_SSFSTS 0x0090
+#define ICH_FLASH_SSFCTL 0x0092
+#define ICH_FLASH_PREOP 0x0094
+#define ICH_FLASH_OPTYPE 0x0096
+#define ICH_FLASH_OPMENU 0x0098
+
+#define ICH_FLASH_REG_MAPSIZE 0x00A0
+#define ICH_FLASH_SECTOR_SIZE 4096
+#define ICH_GFPREG_BASE_MASK 0x1FFF
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 03294400bc9..73f3a85fd23 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -27,6 +27,7 @@
*******************************************************************************/
#include "e1000.h"
+#include <net/ip6_checksum.h>
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
-#define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x10B9),
INTEL_E1000_ETHERNET_DEVICE(0x10BA),
INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+ INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+ INTEL_E1000_ETHERNET_DEVICE(0x10C5),
/* required last entry */
{0,}
};
@@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+#endif
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
@@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
flags = IRQF_SHARED;
#ifdef CONFIG_PCI_MSI
- if (adapter->hw.mac_type > e1000_82547_rev_2) {
+ if (adapter->hw.mac_type >= e1000_82571) {
adapter->have_msi = TRUE;
if ((err = pci_enable_msi(adapter->pdev))) {
DPRINTK(PROBE, ERR,
@@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
adapter->have_msi = FALSE;
}
}
- if (adapter->have_msi)
+ if (adapter->have_msi) {
flags &= ~IRQF_SHARED;
+ err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+ netdev->name, netdev);
+ if (err)
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate interrupt Error: %d\n", err);
+ } else
#endif
if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
netdev->name, netdev)))
@@ -375,7 +388,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
* e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
- * of the f/w this means that the netowrk i/f is closed.
+ * of the f/w this means that the network i/f is closed.
*
**/
@@ -416,7 +429,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
- * of the f/w this means that the netowrk i/f is open.
+ * of the f/w this means that the network i/f is open.
*
**/
@@ -426,6 +439,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter)
uint32_t ctrl_ext;
uint32_t swsm;
uint32_t extcnf;
+
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
@@ -601,9 +615,6 @@ void
e1000_reset(struct e1000_adapter *adapter)
{
uint32_t pba, manc;
-#ifdef DISABLE_MULR
- uint32_t tctl;
-#endif
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
/* Repartition Pba for greater than 9k mtu
@@ -670,12 +681,7 @@ e1000_reset(struct e1000_adapter *adapter)
e1000_reset_hw(&adapter->hw);
if (adapter->hw.mac_type >= e1000_82544)
E1000_WRITE_REG(&adapter->hw, WUC, 0);
-#ifdef DISABLE_MULR
- /* disable Multiple Reads in Transmit Control Register for debugging */
- tctl = E1000_READ_REG(hw, TCTL);
- E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR);
-#endif
if (e1000_init_hw(&adapter->hw))
DPRINTK(PROBE, ERR, "Hardware Error\n");
e1000_update_mng_vlan(adapter);
@@ -851,9 +857,9 @@ e1000_probe(struct pci_dev *pdev,
(adapter->hw.mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
if (adapter->hw.mac_type > e1000_82547_rev_2)
- netdev->features |= NETIF_F_TSO_IPV6;
+ netdev->features |= NETIF_F_TSO6;
#endif
#endif
if (pci_using_dac)
@@ -967,6 +973,7 @@ e1000_probe(struct pci_dev *pdev,
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
case E1000_DEV_ID_82571EB_QUAD_COPPER:
+ case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
@@ -1278,12 +1285,10 @@ e1000_open(struct net_device *netdev)
return -EBUSY;
/* allocate transmit descriptors */
-
if ((err = e1000_setup_all_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
-
if ((err = e1000_setup_all_rx_resources(adapter)))
goto err_setup_rx;
@@ -1568,6 +1573,8 @@ e1000_configure_tx(struct e1000_adapter *adapter)
if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
tarc = E1000_READ_REG(hw, TARC0);
+ /* set the speed mode bit, we'll clear it if we're not at
+ * gigabit link later */
tarc |= (1 << 21);
E1000_WRITE_REG(hw, TARC0, tarc);
} else if (hw->mac_type == e1000_80003es2lan) {
@@ -1582,8 +1589,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
e1000_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
- adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
- E1000_TXD_CMD_IFCS;
+ adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+ /* only set IDE if we are delaying interrupts using the timers */
+ if (adapter->tx_int_delay)
+ adapter->txd_cmd |= E1000_TXD_CMD_IDE;
if (hw->mac_type < e1000_82543)
adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@ -1820,8 +1830,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
/* Configure extra packet-split registers */
rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
- /* disable IPv6 packet split support */
- rfctl |= E1000_RFCTL_IPV6_DIS;
+ /* disable packet split support for IPv6 extension headers,
+ * because some malformed IPv6 headers can hang the RX */
+ rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+ E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
rctl |= E1000_RCTL_DTYP_PS;
@@ -1884,7 +1897,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
if (hw->mac_type >= e1000_82540) {
E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
- if (adapter->itr > 1)
+ if (adapter->itr_setting != 0)
E1000_WRITE_REG(hw, ITR,
1000000000 / (adapter->itr * 256));
}
@@ -1894,11 +1907,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
#ifdef CONFIG_E1000_NAPI
- /* Auto-Mask interrupts upon ICR read. */
+ /* Auto-Mask interrupts upon ICR access */
ctrl_ext |= E1000_CTRL_EXT_IAME;
+ E1000_WRITE_REG(hw, IAM, 0xffffffff);
#endif
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
- E1000_WRITE_REG(hw, IAM, ~0);
E1000_WRITE_FLUSH(hw);
}
@@ -1937,6 +1950,12 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RXCSUM, rxcsum);
}
+ /* enable early receives on 82573, only takes effect if using > 2048
+ * byte total frame size. for example only for jumbo frames */
+#define E1000_ERT_2048 0x100
+ if (hw->mac_type == e1000_82573)
+ E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
+
/* Enable Receives */
E1000_WRITE_REG(hw, RCTL, rctl);
}
@@ -1990,10 +2009,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma,
buffer_info->length,
PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
}
- if (buffer_info->skb)
+ if (buffer_info->skb) {
dev_kfree_skb_any(buffer_info->skb);
- memset(buffer_info, 0, sizeof(struct e1000_buffer));
+ buffer_info->skb = NULL;
+ }
+ /* buffer_info must be completely set up in the transmit path */
}
/**
@@ -2417,6 +2439,7 @@ e1000_watchdog(unsigned long data)
DPRINTK(LINK, INFO,
"Gigabit has been disabled, downgrading speed\n");
}
+
if (adapter->hw.mac_type == e1000_82573) {
e1000_enable_tx_pkt_filtering(&adapter->hw);
if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
@@ -2461,13 +2484,12 @@ e1000_watchdog(unsigned long data)
if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
txb2b == 0) {
-#define SPEED_MODE_BIT (1 << 21)
uint32_t tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
- tarc0 &= ~SPEED_MODE_BIT;
+ tarc0 &= ~(1 << 21);
E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
}
-
+
#ifdef NETIF_F_TSO
/* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues */
@@ -2479,9 +2501,15 @@ e1000_watchdog(unsigned long data)
DPRINTK(PROBE,INFO,
"10/100 speed: disabling TSO\n");
netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features &= ~NETIF_F_TSO6;
+#endif
break;
case SPEED_1000:
netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+ netdev->features |= NETIF_F_TSO6;
+#endif
break;
default:
/* oops */
@@ -2548,19 +2576,6 @@ e1000_watchdog(unsigned long data)
}
}
- /* Dynamic mode for Interrupt Throttle Rate (ITR) */
- if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
- /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
- * asymmetrical Tx or Rx gets ITR=8000; everyone
- * else is between 2000-8000. */
- uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
- uint32_t dif = (adapter->gotcl > adapter->gorcl ?
- adapter->gotcl - adapter->gorcl :
- adapter->gorcl - adapter->gotcl) / 10000;
- uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
- }
-
/* Cause software interrupt to ensure rx ring is cleaned */
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
@@ -2576,6 +2591,135 @@ e1000_watchdog(unsigned long data)
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ * this functionality is controlled by the InterruptThrottleRate module
+ * parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+ uint16_t itr_setting,
+ int packets,
+ int bytes)
+{
+ unsigned int retval = itr_setting;
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ goto update_itr_done;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if ((packets < 5) && (bytes > 512))
+ retval = low_latency;
+ break;
+ case low_latency: /* 50 usec aka 20000 ints/s */
+ if (bytes > 10000) {
+ if ((packets < 10) ||
+ ((bytes/packets) > 1200))
+ retval = bulk_latency;
+ else if ((packets > 35))
+ retval = lowest_latency;
+ } else if (packets <= 2 && bytes < 512)
+ retval = lowest_latency;
+ break;
+ case bulk_latency: /* 250 usec aka 4000 ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ retval = low_latency;
+ } else {
+ if (bytes < 6000)
+ retval = low_latency;
+ }
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ uint16_t current_itr;
+ uint32_t new_itr = adapter->itr;
+
+ if (unlikely(hw->mac_type < e1000_82540))
+ return;
+
+ /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+ if (unlikely(adapter->link_speed != SPEED_1000)) {
+ current_itr = 0;
+ new_itr = 4000;
+ goto set_itr_now;
+ }
+
+ adapter->tx_itr = e1000_update_itr(adapter,
+ adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter,
+ adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
+
+ current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+ /* conservative mode eliminates the lowest_latency setting */
+ if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+ current_itr = low_latency;
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 70000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ new_itr = 4000;
+ break;
+ default:
+ break;
+ }
+
+set_itr_now:
+ if (new_itr != adapter->itr) {
+ /* this attempts to bias the interrupt rate towards Bulk
+ * by adding intermediate steps when interrupt rate is
+ * increasing */
+ new_itr = new_itr > adapter->itr ?
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
+ adapter->itr = new_itr;
+ E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+ }
+
+ return;
+}
+
#define E1000_TX_FLAGS_CSUM 0x00000001
#define E1000_TX_FLAGS_VLAN 0x00000002
#define E1000_TX_FLAGS_TSO 0x00000004
@@ -2616,7 +2760,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb->h.raw - skb->data - 1;
-#ifdef NETIF_F_TSO_IPV6
+#ifdef NETIF_F_TSO6
} else if (skb->protocol == htons(ETH_P_IPV6)) {
skb->nh.ipv6h->payload_len = 0;
skb->h.th->check =
@@ -2652,6 +2796,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->cmd_and_length = cpu_to_le32(cmd_length);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
@@ -2680,12 +2825,13 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i;
@@ -2754,6 +2900,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
@@ -2793,6 +2940,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
+ buffer_info->next_to_watch = i;
len -= size;
offset += size;
@@ -2858,6 +3006,9 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+ /* we need this if more than one processor can write to our tail
+ * at a time, it syncronizes IO on IA64/Altix systems */
+ mmiowb();
}
/**
@@ -2951,6 +3102,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
/* A reprieve! */
netif_start_queue(netdev);
+ ++adapter->restart_queue;
return 0;
}
@@ -3009,9 +3161,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
- * points to just header, pull a few bytes of payload from
- * frags into skb->data */
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ * points to just header, pull a few bytes of payload from
+ * frags into skb->data */
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
switch (adapter->hw.mac_type) {
@@ -3316,12 +3468,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.roc += E1000_READ_REG(hw, ROC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
- adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
- adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
- adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
- adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
- adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
+ adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
+ adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
+ adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
+ adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
+ adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
+ adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
}
adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
@@ -3352,12 +3504,12 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.tpr += E1000_READ_REG(hw, TPR);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
- adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
- adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
- adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
- adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
- adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
+ adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
+ adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
+ adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
+ adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
+ adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
+ adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
}
adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
@@ -3383,18 +3535,17 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
if (adapter->hw.mac_type != e1000_ich8lan) {
- adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
- adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
- adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
- adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
- adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
- adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
- adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
+ adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
+ adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
+ adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
+ adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
+ adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
+ adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
+ adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
}
}
/* Fill out the OS statistics structure */
-
adapter->net_stats.rx_packets = adapter->stats.gprc;
adapter->net_stats.tx_packets = adapter->stats.gptc;
adapter->net_stats.rx_bytes = adapter->stats.gorcl;
@@ -3426,7 +3577,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
/* Tx Dropped needs to be maintained elsewhere */
/* Phy Stats */
-
if (hw->media_type == e1000_media_type_copper) {
if ((adapter->link_speed == SPEED_1000) &&
(!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
@@ -3442,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
}
+#ifdef CONFIG_PCI_MSI
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static
+irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+ int i;
+#endif
+
+ /* this code avoids the read of ICR but has to get 1000 interrupts
+ * at every link change event before it will notice the change */
+ if (++adapter->detect_link >= 1000) {
+ uint32_t icr = E1000_READ_REG(hw, ICR);
+#ifdef CONFIG_E1000_NAPI
+ /* read ICR disables interrupts using IAM, so keep up with our
+ * enable/disable accounting */
+ atomic_inc(&adapter->irq_sem);
+#endif
+ adapter->detect_link = 0;
+ if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+ (icr & E1000_ICR_INT_ASSERTED)) {
+ hw->get_link_status = 1;
+ /* 80003ES2LAN workaround--
+ * For packet buffer work-around on link down event;
+ * disable receives here in the ISR and
+ * reset adapter in watchdog
+ */
+ if (netif_carrier_ok(netdev) &&
+ (adapter->hw.mac_type == e1000_80003es2lan)) {
+ /* disable receives */
+ uint32_t rctl = E1000_READ_REG(hw, RCTL);
+ E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+ }
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->flags))
+ mod_timer(&adapter->watchdog_timer,
+ jiffies + 1);
+ }
+ } else {
+ E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+ E1000_ICR_LSC)));
+ /* bummer we have to flush here, but things break otherwise as
+ * some event appears to be lost or delayed and throughput
+ * drops. In almost all tests this flush is un-necessary */
+ E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_NAPI
+ /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+ * masked. No need for the IMC write, but it does mean we
+ * should account for it ASAP. */
+ atomic_inc(&adapter->irq_sem);
+#endif
+ }
+
+#ifdef CONFIG_E1000_NAPI
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __netif_rx_schedule(netdev);
+ } else
+ e1000_irq_enable(adapter);
+#else
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
+ for (i = 0; i < E1000_MAX_INTR; i++)
+ if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+ !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+ break;
+
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+#endif
+
+ return IRQ_HANDLED;
+}
+#endif
/**
* e1000_intr - Interrupt Handler
@@ -3458,7 +3697,17 @@ e1000_intr(int irq, void *data)
uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
int i;
-#else
+#endif
+ if (unlikely(!icr))
+ return IRQ_NONE; /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ * not set, then the adapter didn't send an interrupt */
+ if (unlikely(hw->mac_type >= e1000_82571 &&
+ !(icr & E1000_ICR_INT_ASSERTED)))
+ return IRQ_NONE;
+
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write, but it does mean we should
@@ -3467,14 +3716,6 @@ e1000_intr(int irq, void *data)
atomic_inc(&adapter->irq_sem);
#endif
- if (unlikely(!icr)) {
-#ifdef CONFIG_E1000_NAPI
- if (hw->mac_type >= e1000_82571)
- e1000_irq_enable(adapter);
-#endif
- return IRQ_NONE; /* Not our interrupt */
- }
-
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1;
/* 80003ES2LAN workaround--
@@ -3495,13 +3736,20 @@ e1000_intr(int irq, void *data)
#ifdef CONFIG_E1000_NAPI
if (unlikely(hw->mac_type < e1000_82571)) {
+ /* disable interrupts, without the synchronize_irq bit */
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
- if (likely(netif_rx_schedule_prep(netdev)))
+ if (likely(netif_rx_schedule_prep(netdev))) {
+ adapter->total_tx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
__netif_rx_schedule(netdev);
- else
+ } else
+ /* this really should not happen! if it does it is basically a
+ * bug, but not a hard error, so enable ints and continue */
e1000_irq_enable(adapter);
#else
/* Writing IMC and IMS is needed for 82547.
@@ -3519,16 +3767,23 @@ e1000_intr(int irq, void *data)
E1000_WRITE_REG(hw, IMC, ~0);
}
+ adapter->total_tx_bytes = 0;
+ adapter->total_rx_bytes = 0;
+ adapter->total_tx_packets = 0;
+ adapter->total_rx_packets = 0;
+
for (i = 0; i < E1000_MAX_INTR; i++)
if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
!e1000_clean_tx_irq(adapter, adapter->tx_ring)))
break;
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
+
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
e1000_irq_enable(adapter);
#endif
-
return IRQ_HANDLED;
}
@@ -3572,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget)
if ((!tx_cleaned && (work_done == 0)) ||
!netif_running(poll_dev)) {
quit_polling:
+ if (likely(adapter->itr_setting & 3))
+ e1000_set_itr(adapter);
netif_rx_complete(poll_dev);
e1000_irq_enable(adapter);
return 0;
@@ -3598,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
unsigned int count = 0;
#endif
boolean_t cleaned = FALSE;
+ unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3609,13 +3867,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
+ if (cleaned) {
+ /* this packet count is wrong for TSO but has a
+ * tendency to make dynamic ITR change more
+ * towards bulk */
+ total_tx_packets++;
+ total_tx_bytes += buffer_info->skb->len;
+ }
e1000_unmap_and_free_tx_resource(adapter, buffer_info);
- memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+ tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count)) i = 0;
}
-
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
#ifdef CONFIG_E1000_NAPI
@@ -3634,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
* sees the new next_to_clean.
*/
smp_mb();
- if (netif_queue_stopped(netdev))
+ if (netif_queue_stopped(netdev)) {
netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
}
if (adapter->detect_tx_hung) {
@@ -3673,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
netif_stop_queue(netdev);
}
}
+ adapter->total_tx_bytes += total_tx_bytes;
+ adapter->total_tx_packets += total_tx_packets;
return cleaned;
}
@@ -3752,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
unsigned int i;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3760,6 +4029,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
while (rx_desc->status & E1000_RXD_STAT_DD) {
struct sk_buff *skb;
u8 status;
+
#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
break;
@@ -3817,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
* done after the TBI_ACCEPT workaround above */
length -= 4;
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += length;
+ total_rx_packets++;
+
/* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack */
@@ -3832,12 +4106,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
- skb_put(skb, length);
}
- } else
- skb_put(skb, length);
-
+ /* else just continue with the old one */
+ }
/* end copybreak code */
+ skb_put(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
@@ -3886,6 +4159,8 @@ next_desc:
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
@@ -3915,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
uint32_t length, staterr;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
+ unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
@@ -3999,7 +4275,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
goto copydone;
} /* if */
}
-
+
for (j = 0; j < adapter->rx_ps_pages; j++) {
if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
break;
@@ -4019,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
pskb_trim(skb, skb->len - 4);
copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
e1000_rx_checksum(adapter, staterr,
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
skb->protocol = eth_type_trans(skb, netdev);
@@ -4067,6 +4346,8 @@ next_desc:
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+ adapter->total_rx_packets += total_rx_packets;
+ adapter->total_rx_bytes += total_rx_bytes;
return cleaned;
}
@@ -4234,7 +4515,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
}
skb = netdev_alloc_skb(netdev,
- adapter->rx_ps_bsize0 + NET_IP_ALIGN);
+ adapter->rx_ps_bsize0 + NET_IP_ALIGN);
if (unlikely(!skb)) {
adapter->alloc_rx_buff_failed++;
@@ -4511,7 +4792,6 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
return E1000_SUCCESS;
}
-
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
{
@@ -4534,12 +4814,12 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* enable VLAN receive filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl |= E1000_RCTL_VFE;
- rctl &= ~E1000_RCTL_CFIEN;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- e1000_update_mng_vlan(adapter);
+ /* enable VLAN receive filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ e1000_update_mng_vlan(adapter);
}
} else {
/* disable VLAN tag insert/strip */
@@ -4548,14 +4828,16 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
if (adapter->hw.mac_type != e1000_ich8lan) {
- /* disable VLAN filtering */
- rctl = E1000_READ_REG(&adapter->hw, RCTL);
- rctl &= ~E1000_RCTL_VFE;
- E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
- if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
+ /* disable VLAN filtering */
+ rctl = E1000_READ_REG(&adapter->hw, RCTL);
+ rctl &= ~E1000_RCTL_VFE;
+ E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+ if (adapter->mng_vlan_id !=
+ (uint16_t)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev,
+ adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+ }
}
}
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index a464cb29062..18afc0c25da 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -107,17 +107,16 @@ typedef enum {
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
-#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \
writel((value), ((a)->flash_address + reg)))
-#define E1000_READ_ICH8_REG(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG(a, reg) ( \
readl((a)->flash_address + reg))
-#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \
writew((value), ((a)->flash_address + reg)))
-#define E1000_READ_ICH8_REG16(a, reg) ( \
+#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \
readw((a)->flash_address + reg))
-
#endif /* _E1000_OSDEP_H_ */
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 9c3c1acefcc..cbfcd7f2889 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -44,16 +44,6 @@
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
-/* Module Parameters are always initialized to -1, so that the driver
- * can tell the difference between no user specified value or the
- * user asking for the default value.
- * The true default values are loaded in when e1000_check_options is called.
- *
- * This is a GCC extension to ANSI C.
- * See the item "Labeled Elements in Initializers" in the section
- * "Extensions to the C Language Family" of the GCC documentation.
- */
-
#define E1000_PARAM(X, desc) \
static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static int num_##X = 0; \
@@ -67,7 +57,6 @@
*
* Default Value: 256
*/
-
E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
@@ -77,7 +66,6 @@ E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
*
* Default Value: 256
*/
-
E1000_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Speed Override
@@ -90,7 +78,6 @@ E1000_PARAM(RxDescriptors, "Number of receive descriptors");
*
* Default Value: 0
*/
-
E1000_PARAM(Speed, "Speed setting");
/* User Specified Duplex Override
@@ -102,7 +89,6 @@ E1000_PARAM(Speed, "Speed setting");
*
* Default Value: 0
*/
-
E1000_PARAM(Duplex, "Duplex setting");
/* Auto-negotiation Advertisement Override
@@ -119,8 +105,9 @@ E1000_PARAM(Duplex, "Duplex setting");
*
* Default Value: 0x2F (copper); 0x20 (fiber)
*/
-
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT 0x2F
+#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -132,8 +119,8 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
*
* Default Value: Read flow control settings from the EEPROM
*/
-
E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
@@ -144,53 +131,54 @@ E1000_PARAM(FlowControl, "Flow Control setting");
*
* Default Value: 1
*/
-
E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
- *
- * Default Value: 64
*/
-
E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 8
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
- *
- * Default Value: 0
*/
-
E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 32
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
- *
- * Default Value: 0
*/
-
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
- *
- * Default Value: 128
*/
-
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
*
- * Valid Range: 100-100000 (0=off, 1=dynamic)
- *
- * Default Value: 8000
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
-
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 3
+#define MAX_ITR 100000
+#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
*
@@ -198,7 +186,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*
* Default Value: 0 (disabled)
*/
-
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/* Enable Kumeran Lock Loss workaround
@@ -207,33 +194,8 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*
* Default Value: 1 (enabled)
*/
-
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
-
-#define DEFAULT_RDTR 0
-#define MAX_RXDELAY 0xFFFF
-#define MIN_RXDELAY 0
-
-#define DEFAULT_RADV 128
-#define MAX_RXABSDELAY 0xFFFF
-#define MIN_RXABSDELAY 0
-
-#define DEFAULT_TIDV 64
-#define MAX_TXDELAY 0xFFFF
-#define MIN_TXDELAY 0
-
-#define DEFAULT_TADV 64
-#define MAX_TXABSDELAY 0xFFFF
-#define MIN_TXABSDELAY 0
-
-#define DEFAULT_ITR 8000
-#define MAX_ITR 100000
-#define MIN_ITR 100
-
struct e1000_option {
enum { enable_option, range_option, list_option } type;
char *name;
@@ -510,15 +472,27 @@ e1000_check_options(struct e1000_adapter *adapter)
break;
case 1:
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
- opt.name);
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
+ break;
+ case 3:
+ DPRINTK(PROBE, INFO,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
+ adapter->itr_setting = adapter->itr;
+ adapter->itr = 20000;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
+ /* save the setting, because the dynamic bits change itr */
+ adapter->itr_setting = adapter->itr;
break;
}
} else {
- adapter->itr = opt.def;
+ adapter->itr_setting = opt.def;
+ adapter->itr = 20000;
}
}
{ /* Smart Power Down */