diff options
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 490 |
1 files changed, 386 insertions, 104 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 03294400bc9..73f3a85fd23 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -27,6 +27,7 @@ *******************************************************************************/ #include "e1000.h" +#include <net/ip6_checksum.h> char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; @@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "7.2.9-k4"DRIVERNAPI +#define DRV_VERSION "7.3.15-k2"DRIVERNAPI char e1000_driver_version[] = DRV_VERSION; static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x10B9), INTEL_E1000_ETHERNET_DEVICE(0x10BA), INTEL_E1000_ETHERNET_DEVICE(0x10BB), + INTEL_E1000_ETHERNET_DEVICE(0x10BC), + INTEL_E1000_ETHERNET_DEVICE(0x10C4), + INTEL_E1000_ETHERNET_DEVICE(0x10C5), /* required last entry */ {0,} }; @@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data); +#ifdef CONFIG_PCI_MSI +static irqreturn_t e1000_intr_msi(int irq, void *data); +#endif static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring); #ifdef CONFIG_E1000_NAPI @@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter) flags = IRQF_SHARED; #ifdef CONFIG_PCI_MSI - if (adapter->hw.mac_type > e1000_82547_rev_2) { + if (adapter->hw.mac_type >= e1000_82571) { adapter->have_msi = TRUE; if ((err = pci_enable_msi(adapter->pdev))) { DPRINTK(PROBE, ERR, @@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter) adapter->have_msi = FALSE; } } - if (adapter->have_msi) + if (adapter->have_msi) { flags &= ~IRQF_SHARED; + err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags, + netdev->name, netdev); + if (err) + DPRINTK(PROBE, ERR, + "Unable to allocate interrupt Error: %d\n", err); + } else #endif if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags, netdev->name, netdev))) @@ -375,7 +388,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i - * of the f/w this means that the netowrk i/f is closed. + * of the f/w this means that the network i/f is closed. * **/ @@ -416,7 +429,7 @@ e1000_release_hw_control(struct e1000_adapter *adapter) * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) - * of the f/w this means that the netowrk i/f is open. + * of the f/w this means that the network i/f is open. * **/ @@ -426,6 +439,7 @@ e1000_get_hw_control(struct e1000_adapter *adapter) uint32_t ctrl_ext; uint32_t swsm; uint32_t extcnf; + /* Let firmware know the driver has taken over */ switch (adapter->hw.mac_type) { case e1000_82571: @@ -601,9 +615,6 @@ void e1000_reset(struct e1000_adapter *adapter) { uint32_t pba, manc; -#ifdef DISABLE_MULR - uint32_t tctl; -#endif uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; /* Repartition Pba for greater than 9k mtu @@ -670,12 +681,7 @@ e1000_reset(struct e1000_adapter *adapter) e1000_reset_hw(&adapter->hw); if (adapter->hw.mac_type >= e1000_82544) E1000_WRITE_REG(&adapter->hw, WUC, 0); -#ifdef DISABLE_MULR - /* disable Multiple Reads in Transmit Control Register for debugging */ - tctl = E1000_READ_REG(hw, TCTL); - E1000_WRITE_REG(hw, TCTL, tctl & ~E1000_TCTL_MULR); -#endif if (e1000_init_hw(&adapter->hw)) DPRINTK(PROBE, ERR, "Hardware Error\n"); e1000_update_mng_vlan(adapter); @@ -851,9 +857,9 @@ e1000_probe(struct pci_dev *pdev, (adapter->hw.mac_type != e1000_82547)) netdev->features |= NETIF_F_TSO; -#ifdef NETIF_F_TSO_IPV6 +#ifdef NETIF_F_TSO6 if (adapter->hw.mac_type > e1000_82547_rev_2) - netdev->features |= NETIF_F_TSO_IPV6; + netdev->features |= NETIF_F_TSO6; #endif #endif if (pci_using_dac) @@ -967,6 +973,7 @@ e1000_probe(struct pci_dev *pdev, break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->eeprom_wol = 0; @@ -1278,12 +1285,10 @@ e1000_open(struct net_device *netdev) return -EBUSY; /* allocate transmit descriptors */ - if ((err = e1000_setup_all_tx_resources(adapter))) goto err_setup_tx; /* allocate receive descriptors */ - if ((err = e1000_setup_all_rx_resources(adapter))) goto err_setup_rx; @@ -1568,6 +1573,8 @@ e1000_configure_tx(struct e1000_adapter *adapter) if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { tarc = E1000_READ_REG(hw, TARC0); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later */ tarc |= (1 << 21); E1000_WRITE_REG(hw, TARC0, tarc); } else if (hw->mac_type == e1000_80003es2lan) { @@ -1582,8 +1589,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) e1000_config_collision_dist(hw); /* Setup Transmit Descriptor Settings for eop descriptor */ - adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | - E1000_TXD_CMD_IFCS; + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; if (hw->mac_type < e1000_82543) adapter->txd_cmd |= E1000_TXD_CMD_RPS; @@ -1820,8 +1830,11 @@ e1000_setup_rctl(struct e1000_adapter *adapter) /* Configure extra packet-split registers */ rfctl = E1000_READ_REG(&adapter->hw, RFCTL); rfctl |= E1000_RFCTL_EXTEN; - /* disable IPv6 packet split support */ - rfctl |= E1000_RFCTL_IPV6_DIS; + /* disable packet split support for IPv6 extension headers, + * because some malformed IPv6 headers can hang the RX */ + rfctl |= (E1000_RFCTL_IPV6_EX_DIS | + E1000_RFCTL_NEW_IPV6_EXT_DIS); + E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); rctl |= E1000_RCTL_DTYP_PS; @@ -1884,7 +1897,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) if (hw->mac_type >= e1000_82540) { E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); - if (adapter->itr > 1) + if (adapter->itr_setting != 0) E1000_WRITE_REG(hw, ITR, 1000000000 / (adapter->itr * 256)); } @@ -1894,11 +1907,11 @@ e1000_configure_rx(struct e1000_adapter *adapter) /* Reset delay timers after every interrupt */ ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; #ifdef CONFIG_E1000_NAPI - /* Auto-Mask interrupts upon ICR read. */ + /* Auto-Mask interrupts upon ICR access */ ctrl_ext |= E1000_CTRL_EXT_IAME; + E1000_WRITE_REG(hw, IAM, 0xffffffff); #endif E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); - E1000_WRITE_REG(hw, IAM, ~0); E1000_WRITE_FLUSH(hw); } @@ -1937,6 +1950,12 @@ e1000_configure_rx(struct e1000_adapter *adapter) E1000_WRITE_REG(hw, RXCSUM, rxcsum); } + /* enable early receives on 82573, only takes effect if using > 2048 + * byte total frame size. for example only for jumbo frames */ +#define E1000_ERT_2048 0x100 + if (hw->mac_type == e1000_82573) + E1000_WRITE_REG(hw, ERT, E1000_ERT_2048); + /* Enable Receives */ E1000_WRITE_REG(hw, RCTL, rctl); } @@ -1990,10 +2009,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); + buffer_info->dma = 0; } - if (buffer_info->skb) + if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); - memset(buffer_info, 0, sizeof(struct e1000_buffer)); + buffer_info->skb = NULL; + } + /* buffer_info must be completely set up in the transmit path */ } /** @@ -2417,6 +2439,7 @@ e1000_watchdog(unsigned long data) DPRINTK(LINK, INFO, "Gigabit has been disabled, downgrading speed\n"); } + if (adapter->hw.mac_type == e1000_82573) { e1000_enable_tx_pkt_filtering(&adapter->hw); if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) @@ -2461,13 +2484,12 @@ e1000_watchdog(unsigned long data) if ((adapter->hw.mac_type == e1000_82571 || adapter->hw.mac_type == e1000_82572) && txb2b == 0) { -#define SPEED_MODE_BIT (1 << 21) uint32_t tarc0; tarc0 = E1000_READ_REG(&adapter->hw, TARC0); - tarc0 &= ~SPEED_MODE_BIT; + tarc0 &= ~(1 << 21); E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); } - + #ifdef NETIF_F_TSO /* disable TSO for pcie and 10/100 speeds, to avoid * some hardware issues */ @@ -2479,9 +2501,15 @@ e1000_watchdog(unsigned long data) DPRINTK(PROBE,INFO, "10/100 speed: disabling TSO\n"); netdev->features &= ~NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features &= ~NETIF_F_TSO6; +#endif break; case SPEED_1000: netdev->features |= NETIF_F_TSO; +#ifdef NETIF_F_TSO6 + netdev->features |= NETIF_F_TSO6; +#endif break; default: /* oops */ @@ -2548,19 +2576,6 @@ e1000_watchdog(unsigned long data) } } - /* Dynamic mode for Interrupt Throttle Rate (ITR) */ - if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) { - /* Symmetric Tx/Rx gets a reduced ITR=2000; Total - * asymmetrical Tx or Rx gets ITR=8000; everyone - * else is between 2000-8000. */ - uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000; - uint32_t dif = (adapter->gotcl > adapter->gorcl ? - adapter->gotcl - adapter->gorcl : - adapter->gorcl - adapter->gotcl) / 10000; - uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; - E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256)); - } - /* Cause software interrupt to ensure rx ring is cleaned */ E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); @@ -2576,6 +2591,135 @@ e1000_watchdog(unsigned long data) mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); } +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + uint16_t itr_setting, + int packets, + int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + + switch (itr_setting) { + case lowest_latency: + if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + if ((packets < 10) || + ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else { + if (bytes < 6000) + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + uint16_t current_itr; + uint32_t new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, + adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + adapter->rx_itr = e1000_update_itr(adapter, + adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + /* conservative mode eliminates the lowest_latency setting */ + if (current_itr == lowest_latency && (adapter->itr_setting == 3)) + current_itr = low_latency; + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256)); + } + + return; +} + #define E1000_TX_FLAGS_CSUM 0x00000001 #define E1000_TX_FLAGS_VLAN 0x00000002 #define E1000_TX_FLAGS_TSO 0x00000004 @@ -2616,7 +2760,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 0); cmd_length = E1000_TXD_CMD_IP; ipcse = skb->h.raw - skb->data - 1; -#ifdef NETIF_F_TSO_IPV6 +#ifdef NETIF_F_TSO6 } else if (skb->protocol == htons(ETH_P_IPV6)) { skb->nh.ipv6h->payload_len = 0; skb->h.th->check = @@ -2652,6 +2796,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, context_desc->cmd_and_length = cpu_to_le32(cmd_length); buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; if (++i == tx_ring->count) i = 0; tx_ring->next_to_use = i; @@ -2680,12 +2825,13 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; - context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; if (unlikely(++i == tx_ring->count)) i = 0; tx_ring->next_to_use = i; @@ -2754,6 +2900,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, size, PCI_DMA_TODEVICE); buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; len -= size; offset += size; @@ -2793,6 +2940,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, size, PCI_DMA_TODEVICE); buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; len -= size; offset += size; @@ -2858,6 +3006,9 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, tx_ring->next_to_use = i; writel(i, adapter->hw.hw_addr + tx_ring->tdt); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); } /** @@ -2951,6 +3102,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) /* A reprieve! */ netif_start_queue(netdev); + ++adapter->restart_queue; return 0; } @@ -3009,9 +3161,9 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) max_per_txd = min(mss << 2, max_per_txd); max_txd_pwr = fls(max_per_txd) - 1; - /* TSO Workaround for 82571/2/3 Controllers -- if skb->data - * points to just header, pull a few bytes of payload from - * frags into skb->data */ + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { switch (adapter->hw.mac_type) { @@ -3316,12 +3468,12 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.roc += E1000_READ_REG(hw, ROC); if (adapter->hw.mac_type != e1000_ich8lan) { - adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); - adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); - adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); - adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); - adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); - adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); + adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); + adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); + adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); + adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); + adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); + adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); } adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); @@ -3352,12 +3504,12 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.tpr += E1000_READ_REG(hw, TPR); if (adapter->hw.mac_type != e1000_ich8lan) { - adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); - adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); - adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); - adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); - adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); - adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); + adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); + adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); + adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); + adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); + adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); + adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); } adapter->stats.mptc += E1000_READ_REG(hw, MPTC); @@ -3383,18 +3535,17 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); if (adapter->hw.mac_type != e1000_ich8lan) { - adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); - adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); - adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); - adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); - adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); - adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); - adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); + adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); + adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); + adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); + adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); + adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); + adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); + adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); } } /* Fill out the OS statistics structure */ - adapter->net_stats.rx_packets = adapter->stats.gprc; adapter->net_stats.tx_packets = adapter->stats.gptc; adapter->net_stats.rx_bytes = adapter->stats.gorcl; @@ -3426,7 +3577,6 @@ e1000_update_stats(struct e1000_adapter *adapter) /* Tx Dropped needs to be maintained elsewhere */ /* Phy Stats */ - if (hw->media_type == e1000_media_type_copper) { if ((adapter->link_speed == SPEED_1000) && (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { @@ -3442,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } +#ifdef CONFIG_PCI_MSI + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ + +static +irqreturn_t e1000_intr_msi(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; +#ifndef CONFIG_E1000_NAPI + int i; +#endif + + /* this code avoids the read of ICR but has to get 1000 interrupts + * at every link change event before it will notice the change */ + if (++adapter->detect_link >= 1000) { + uint32_t icr = E1000_READ_REG(hw, ICR); +#ifdef CONFIG_E1000_NAPI + /* read ICR disables interrupts using IAM, so keep up with our + * enable/disable accounting */ + atomic_inc(&adapter->irq_sem); +#endif + adapter->detect_link = 0; + if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) && + (icr & E1000_ICR_INT_ASSERTED)) { + hw->get_link_status = 1; + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->hw.mac_type == e1000_80003es2lan)) { + /* disable receives */ + uint32_t rctl = E1000_READ_REG(hw, RCTL); + E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + mod_timer(&adapter->watchdog_timer, + jiffies + 1); + } + } else { + E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ | + E1000_ICR_LSC))); + /* bummer we have to flush here, but things break otherwise as + * some event appears to be lost or delayed and throughput + * drops. In almost all tests this flush is un-necessary */ + E1000_WRITE_FLUSH(hw); +#ifdef CONFIG_E1000_NAPI + /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are + * masked. No need for the IMC write, but it does mean we + * should account for it ASAP. */ + atomic_inc(&adapter->irq_sem); +#endif + } + +#ifdef CONFIG_E1000_NAPI + if (likely(netif_rx_schedule_prep(netdev))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __netif_rx_schedule(netdev); + } else + e1000_irq_enable(adapter); +#else + adapter->total_tx_bytes = 0; + adapter->total_rx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_packets = 0; + + for (i = 0; i < E1000_MAX_INTR; i++) + if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) + break; + + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); +#endif + + return IRQ_HANDLED; +} +#endif /** * e1000_intr - Interrupt Handler @@ -3458,7 +3697,17 @@ e1000_intr(int irq, void *data) uint32_t rctl, icr = E1000_READ_REG(hw, ICR); #ifndef CONFIG_E1000_NAPI int i; -#else +#endif + if (unlikely(!icr)) + return IRQ_NONE; /* Not our interrupt */ + +#ifdef CONFIG_E1000_NAPI + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt */ + if (unlikely(hw->mac_type >= e1000_82571 && + !(icr & E1000_ICR_INT_ASSERTED))) + return IRQ_NONE; + /* Interrupt Auto-Mask...upon reading ICR, * interrupts are masked. No need for the * IMC write, but it does mean we should @@ -3467,14 +3716,6 @@ e1000_intr(int irq, void *data) atomic_inc(&adapter->irq_sem); #endif - if (unlikely(!icr)) { -#ifdef CONFIG_E1000_NAPI - if (hw->mac_type >= e1000_82571) - e1000_irq_enable(adapter); -#endif - return IRQ_NONE; /* Not our interrupt */ - } - if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; /* 80003ES2LAN workaround-- @@ -3495,13 +3736,20 @@ e1000_intr(int irq, void *data) #ifdef CONFIG_E1000_NAPI if (unlikely(hw->mac_type < e1000_82571)) { + /* disable interrupts, without the synchronize_irq bit */ atomic_inc(&adapter->irq_sem); E1000_WRITE_REG(hw, IMC, ~0); E1000_WRITE_FLUSH(hw); } - if (likely(netif_rx_schedule_prep(netdev))) + if (likely(netif_rx_schedule_prep(netdev))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; __netif_rx_schedule(netdev); - else + } else + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue */ e1000_irq_enable(adapter); #else /* Writing IMC and IMS is needed for 82547. @@ -3519,16 +3767,23 @@ e1000_intr(int irq, void *data) E1000_WRITE_REG(hw, IMC, ~0); } + adapter->total_tx_bytes = 0; + adapter->total_rx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_packets = 0; + for (i = 0; i < E1000_MAX_INTR; i++) if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & !e1000_clean_tx_irq(adapter, adapter->tx_ring))) break; + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) e1000_irq_enable(adapter); #endif - return IRQ_HANDLED; } @@ -3572,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget) if ((!tx_cleaned && (work_done == 0)) || !netif_running(poll_dev)) { quit_polling: + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); netif_rx_complete(poll_dev); e1000_irq_enable(adapter); return 0; @@ -3598,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, unsigned int count = 0; #endif boolean_t cleaned = FALSE; + unsigned int total_tx_bytes=0, total_tx_packets=0; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@ -3609,13 +3867,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, buffer_info = &tx_ring->buffer_info[i]; cleaned = (i == eop); + if (cleaned) { + /* this packet count is wrong for TSO but has a + * tendency to make dynamic ITR change more + * towards bulk */ + total_tx_packets++; + total_tx_bytes += buffer_info->skb->len; + } e1000_unmap_and_free_tx_resource(adapter, buffer_info); - memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); + tx_desc->upper.data = 0; if (unlikely(++i == tx_ring->count)) i = 0; } - eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); #ifdef CONFIG_E1000_NAPI @@ -3634,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, * sees the new next_to_clean. */ smp_mb(); - if (netif_queue_stopped(netdev)) + if (netif_queue_stopped(netdev)) { netif_wake_queue(netdev); + ++adapter->restart_queue; + } } if (adapter->detect_tx_hung) { @@ -3673,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, netif_stop_queue(netdev); } } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; return cleaned; } @@ -3752,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, unsigned int i; int cleaned_count = 0; boolean_t cleaned = FALSE; + unsigned int total_rx_bytes=0, total_rx_packets=0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); @@ -3760,6 +4029,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, while (rx_desc->status & E1000_RXD_STAT_DD) { struct sk_buff *skb; u8 status; + #ifdef CONFIG_E1000_NAPI if (*work_done >= work_to_do) break; @@ -3817,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, * done after the TBI_ACCEPT workaround above */ length -= 4; + /* probably a little skewed due to removing CRC */ + total_rx_bytes += length; + total_rx_packets++; + /* code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack */ @@ -3832,12 +4106,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, /* save the skb in buffer_info as good */ buffer_info->skb = skb; skb = new_skb; - skb_put(skb, length); } - } else - skb_put(skb, length); - + /* else just continue with the old one */ + } /* end copybreak code */ + skb_put(skb, length); /* Receive Checksum Offload */ e1000_rx_checksum(adapter, @@ -3886,6 +4159,8 @@ next_desc: if (cleaned_count) adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; return cleaned; } @@ -3915,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, uint32_t length, staterr; int cleaned_count = 0; boolean_t cleaned = FALSE; + unsigned int total_rx_bytes=0, total_rx_packets=0; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC_PS(*rx_ring, i); @@ -3999,7 +4275,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, goto copydone; } /* if */ } - + for (j = 0; j < adapter->rx_ps_pages; j++) { if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j]))) break; @@ -4019,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, pskb_trim(skb, skb->len - 4); copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + e1000_rx_checksum(adapter, staterr, le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); skb->protocol = eth_type_trans(skb, netdev); @@ -4067,6 +4346,8 @@ next_desc: if (cleaned_count) adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; return cleaned; } @@ -4234,7 +4515,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, } skb = netdev_alloc_skb(netdev, - adapter->rx_ps_bsize0 + NET_IP_ALIGN); + adapter->rx_ps_bsize0 + NET_IP_ALIGN); if (unlikely(!skb)) { adapter->alloc_rx_buff_failed++; @@ -4511,7 +4792,6 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) return E1000_SUCCESS; } - void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) { @@ -4534,12 +4814,12 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); if (adapter->hw.mac_type != e1000_ich8lan) { - /* enable VLAN receive filtering */ - rctl = E1000_READ_REG(&adapter->hw, RCTL); - rctl |= E1000_RCTL_VFE; - rctl &= ~E1000_RCTL_CFIEN; - E1000_WRITE_REG(&adapter->hw, RCTL, rctl); - e1000_update_mng_vlan(adapter); + /* enable VLAN receive filtering */ + rctl = E1000_READ_REG(&adapter->hw, RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + e1000_update_mng_vlan(adapter); } } else { /* disable VLAN tag insert/strip */ @@ -4548,14 +4828,16 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); if (adapter->hw.mac_type != e1000_ich8lan) { - /* disable VLAN filtering */ - rctl = E1000_READ_REG(&adapter->hw, RCTL); - rctl &= ~E1000_RCTL_VFE; - E1000_WRITE_REG(&adapter->hw, RCTL, rctl); - if (adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { - e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); - adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; - } + /* disable VLAN filtering */ + rctl = E1000_READ_REG(&adapter->hw, RCTL); + rctl &= ~E1000_RCTL_VFE; + E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + if (adapter->mng_vlan_id != + (uint16_t)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } } } |