diff options
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 345 |
1 files changed, 247 insertions, 98 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 73f3a85fd23..4c1ff752048 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -213,6 +213,12 @@ static void e1000_netpoll (struct net_device *netdev); extern void e1000_check_options(struct e1000_adapter *adapter); +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state); static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); @@ -264,7 +270,13 @@ e1000_init_module(void) printk(KERN_INFO "%s\n", e1000_copyright); ret = pci_register_driver(&e1000_driver); - + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + printk(KERN_INFO "e1000: copybreak disabled\n"); + else + printk(KERN_INFO "e1000: copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } return ret; } @@ -464,6 +476,52 @@ e1000_get_hw_control(struct e1000_adapter *adapter) } } +static void +e1000_init_manageability(struct e1000_adapter *adapter) +{ + if (adapter->en_mng_pt) { + uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + /* this will probably generate destination unreachable messages + * from the host OS, but the packets will be handled on SMBUS */ + if (adapter->hw.has_manc2h) { + uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); + + manc |= E1000_MANC_EN_MNG2HOST; +#define E1000_MNG2HOST_PORT_623 (1 << 5) +#define E1000_MNG2HOST_PORT_664 (1 << 6) + manc2h |= E1000_MNG2HOST_PORT_623; + manc2h |= E1000_MNG2HOST_PORT_664; + E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h); + } + + E1000_WRITE_REG(&adapter->hw, MANC, manc); + } +} + +static void +e1000_release_manageability(struct e1000_adapter *adapter) +{ + if (adapter->en_mng_pt) { + uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + if (adapter->hw.has_manc2h) + manc &= ~E1000_MANC_EN_MNG2HOST; + + /* don't explicitly have to mess with MANC2H since + * MANC has an enable disable that gates MANC2H */ + + E1000_WRITE_REG(&adapter->hw, MANC, manc); + } +} + int e1000_up(struct e1000_adapter *adapter) { @@ -475,6 +533,7 @@ e1000_up(struct e1000_adapter *adapter) e1000_set_multi(netdev); e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); e1000_configure_tx(adapter); e1000_setup_rctl(adapter); @@ -497,7 +556,8 @@ e1000_up(struct e1000_adapter *adapter) clear_bit(__E1000_DOWN, &adapter->flags); - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); + /* fire a link change interrupt to start the watchdog */ + E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_LSC); return 0; } @@ -614,16 +674,34 @@ e1000_reinit_locked(struct e1000_adapter *adapter) void e1000_reset(struct e1000_adapter *adapter) { - uint32_t pba, manc; + uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; + boolean_t legacy_pba_adjust = FALSE; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. */ switch (adapter->hw.mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = TRUE; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; case e1000_82547: case e1000_82547_rev_2: + legacy_pba_adjust = TRUE; pba = E1000_PBA_30K; break; case e1000_82571: @@ -632,27 +710,80 @@ e1000_reset(struct e1000_adapter *adapter) pba = E1000_PBA_38K; break; case e1000_82573: - pba = E1000_PBA_12K; + pba = E1000_PBA_20K; break; case e1000_ich8lan: pba = E1000_PBA_8K; - break; - default: - pba = E1000_PBA_48K; + case e1000_undefined: + case e1000_num_macs: break; } - if ((adapter->hw.mac_type != e1000_82573) && - (adapter->netdev->mtu > E1000_RXBUFFER_8192)) - pba -= 8; /* allocate more FIFO for Tx */ + if (legacy_pba_adjust == TRUE) { + if (adapter->netdev->mtu > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + if (adapter->hw.mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (adapter->hw.max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { + /* adjust PBA for jumbo frames */ + E1000_WRITE_REG(&adapter->hw, PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accomodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accomodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. */ + pba = E1000_READ_REG(&adapter->hw, PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* don't include ethernet FCS because hardware appends/strips */ + min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE + + VLAN_TAG_SIZE; + min_tx_space = min_rx_space; + min_tx_space *= 2; + E1000_ROUNDUP(min_tx_space, 1024); + min_tx_space >>= 10; + E1000_ROUNDUP(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (adapter->hw.mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } - if (adapter->hw.mac_type == e1000_82547) { - adapter->tx_fifo_head = 0; - adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; - adapter->tx_fifo_size = - (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; - atomic_set(&adapter->tx_fifo_stall, 0); + /* if short on rx space, rx wins and must trump tx + * adjustment or use Early Receive if available */ + if (pba < min_rx_space) { + switch (adapter->hw.mac_type) { + case e1000_82573: + /* ERT enabled in e1000_configure_rx */ + break; + default: + pba = min_rx_space; + break; + } + } + } } E1000_WRITE_REG(&adapter->hw, PBA, pba); @@ -685,6 +816,20 @@ e1000_reset(struct e1000_adapter *adapter) if (e1000_init_hw(&adapter->hw)) DPRINTK(PROBE, ERR, "Hardware Error\n"); e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (adapter->hw.mac_type >= e1000_82544 && + adapter->hw.mac_type <= e1000_82547_rev_2 && + adapter->hw.autoneg == 1 && + adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { + uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload */ + ctrl &= ~E1000_CTRL_SWDPIN3; + E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); + } + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); @@ -705,14 +850,7 @@ e1000_reset(struct e1000_adapter *adapter) phy_data); } - if ((adapter->en_mng_pt) && - (adapter->hw.mac_type >= e1000_82540) && - (adapter->hw.mac_type < e1000_82571) && - (adapter->hw.media_type == e1000_media_type_copper)) { - manc = E1000_READ_REG(&adapter->hw, MANC); - manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); - E1000_WRITE_REG(&adapter->hw, MANC, manc); - } + e1000_release_manageability(adapter); } /** @@ -857,6 +995,12 @@ e1000_probe(struct pci_dev *pdev, (adapter->hw.mac_type != e1000_82547)) netdev->features |= NETIF_F_TSO; +#ifdef CONFIG_DEBUG_SLAB + /* 82544's work arounds do not play nicely with DEBUG SLAB */ + if (adapter->hw.mac_type == e1000_82544) + netdev->features &= ~NETIF_F_TSO; +#endif + #ifdef NETIF_F_TSO6 if (adapter->hw.mac_type > e1000_82547_rev_2) netdev->features |= NETIF_F_TSO6; @@ -1078,22 +1222,13 @@ e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - uint32_t manc; #ifdef CONFIG_E1000_NAPI int i; #endif flush_scheduled_work(); - if (adapter->hw.mac_type >= e1000_82540 && - adapter->hw.mac_type < e1000_82571 && - adapter->hw.media_type == e1000_media_type_copper) { - manc = E1000_READ_REG(&adapter->hw, MANC); - if (manc & E1000_MANC_SMBUS_EN) { - manc |= E1000_MANC_ARP_EN; - E1000_WRITE_REG(&adapter->hw, MANC, manc); - } - } + e1000_release_manageability(adapter); /* Release control of h/w to f/w. If f/w is AMT enabled, this * would have already happened in close and is redundant. */ @@ -1531,9 +1666,9 @@ e1000_configure_tx(struct e1000_adapter *adapter) } /* Set the default values for the Tx Inter Packet Gap timer */ - - if (hw->media_type == e1000_media_type_fiber || - hw->media_type == e1000_media_type_internal_serdes) + if (adapter->hw.mac_type <= e1000_82547_rev_2 && + (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; @@ -2528,6 +2663,13 @@ e1000_watchdog(unsigned long data) netif_wake_queue(netdev); mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); adapter->smartspeed = 0; + } else { + /* make sure the receive unit is started */ + if (adapter->hw.rx_needs_kicking) { + struct e1000_hw *hw = &adapter->hw; + uint32_t rctl = E1000_READ_REG(hw, RCTL); + E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); + } } } else { if (netif_carrier_ok(netdev)) { @@ -2628,29 +2770,34 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, if (packets == 0) goto update_itr_done; - switch (itr_setting) { case lowest_latency: - if ((packets < 5) && (bytes > 512)) + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) retval = low_latency; break; case low_latency: /* 50 usec aka 20000 ints/s */ if (bytes > 10000) { - if ((packets < 10) || - ((bytes/packets) > 1200)) + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) retval = bulk_latency; else if ((packets > 35)) retval = lowest_latency; - } else if (packets <= 2 && bytes < 512) + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) retval = lowest_latency; break; case bulk_latency: /* 250 usec aka 4000 ints/s */ if (bytes > 25000) { if (packets > 35) retval = low_latency; - } else { - if (bytes < 6000) - retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; } break; } @@ -2679,17 +2826,20 @@ static void e1000_set_itr(struct e1000_adapter *adapter) adapter->tx_itr, adapter->total_tx_packets, adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, adapter->total_rx_packets, adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; current_itr = max(adapter->rx_itr, adapter->tx_itr); - /* conservative mode eliminates the lowest_latency setting */ - if (current_itr == lowest_latency && (adapter->itr_setting == 3)) - current_itr = low_latency; - switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: @@ -3168,6 +3318,16 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) { switch (adapter->hw.mac_type) { unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword */ + if ((unsigned long)(skb->tail - 1) & 4) + break; + /* fall through */ case e1000_82571: case e1000_82572: case e1000_82573: @@ -3419,12 +3579,11 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; netdev->mtu = new_mtu; + adapter->hw.max_frame_size = max_frame; if (netif_running(netdev)) e1000_reinit_locked(adapter); - adapter->hw.max_frame_size = max_frame; - return 0; } @@ -3573,6 +3732,11 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; adapter->net_stats.tx_window_errors = adapter->stats.latecol; adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; + if (adapter->hw.bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + adapter->net_stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } /* Tx Dropped needs to be maintained elsewhere */ @@ -3590,6 +3754,13 @@ e1000_update_stats(struct e1000_adapter *adapter) adapter->phy_stats.receive_errors += phy_tmp; } + /* Management Stats */ + if (adapter->hw.has_smbus) { + adapter->stats.mgptc += E1000_READ_REG(hw, MGTPTC); + adapter->stats.mgprc += E1000_READ_REG(hw, MGTPRC); + adapter->stats.mgpdc += E1000_READ_REG(hw, MGTPDC); + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); } #ifdef CONFIG_PCI_MSI @@ -3868,11 +4039,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, cleaned = (i == eop); if (cleaned) { - /* this packet count is wrong for TSO but has a - * tendency to make dynamic ITR change more - * towards bulk */ + struct sk_buff *skb = buffer_info->skb; + unsigned int segs = skb_shinfo(skb)->gso_segs; + total_tx_packets += segs; total_tx_packets++; - total_tx_bytes += buffer_info->skb->len; + total_tx_bytes += skb->len; } e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; @@ -4094,8 +4265,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, /* code added for copybreak, this should improve * performance for small packets with large amounts * of reassembly being done in the stack */ -#define E1000_CB_LENGTH 256 - if (length < E1000_CB_LENGTH) { + if (length < copybreak) { struct sk_buff *new_skb = netdev_alloc_skb(netdev, length + NET_IP_ALIGN); if (new_skb) { @@ -4253,7 +4423,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, /* page alloc/put takes too long and effects small packet * throughput, so unsplit small packets and save the alloc/put*/ - if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) { + if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) { u8 *vaddr; /* there is no documentation about how to call * kmap_atomic, so we can't hold the mapping @@ -4998,7 +5168,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - uint32_t ctrl, ctrl_ext, rctl, manc, status; + uint32_t ctrl, ctrl_ext, rctl, status; uint32_t wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; @@ -5067,16 +5237,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_enable_wake(pdev, PCI_D3cold, 0); } - if (adapter->hw.mac_type >= e1000_82540 && - adapter->hw.mac_type < e1000_82571 && - adapter->hw.media_type == e1000_media_type_copper) { - manc = E1000_READ_REG(&adapter->hw, MANC); - if (manc & E1000_MANC_SMBUS_EN) { - manc |= E1000_MANC_ARP_EN; - E1000_WRITE_REG(&adapter->hw, MANC, manc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + e1000_release_manageability(adapter); + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) { + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_enable_wake(pdev, PCI_D3cold, 1); } if (adapter->hw.phy_type == e1000_phy_igp_3) @@ -5102,7 +5268,7 @@ e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); - uint32_t manc, err; + uint32_t err; pci_set_power_state(pdev, PCI_D0); e1000_pci_restore_state(adapter); @@ -5122,19 +5288,13 @@ e1000_resume(struct pci_dev *pdev) e1000_reset(adapter); E1000_WRITE_REG(&adapter->hw, WUS, ~0); + e1000_init_manageability(adapter); + if (netif_running(netdev)) e1000_up(adapter); netif_device_attach(netdev); - if (adapter->hw.mac_type >= e1000_82540 && - adapter->hw.mac_type < e1000_82571 && - adapter->hw.media_type == e1000_media_type_copper) { - manc = E1000_READ_REG(&adapter->hw, MANC); - manc &= ~(E1000_MANC_ARP_EN); - E1000_WRITE_REG(&adapter->hw, MANC, manc); - } - /* If the controller is 82573 and f/w is AMT, do not set * DRV_LOAD until the interface is up. For all other cases, * let the f/w know that the h/w is now under the control @@ -5235,7 +5395,8 @@ static void e1000_io_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev->priv; - uint32_t manc, swsm; + + e1000_init_manageability(adapter); if (netif_running(netdev)) { if (e1000_up(adapter)) { @@ -5246,26 +5407,14 @@ static void e1000_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); - if (adapter->hw.mac_type >= e1000_82540 && - adapter->hw.mac_type < e1000_82571 && - adapter->hw.media_type == e1000_media_type_copper) { - manc = E1000_READ_REG(&adapter->hw, MANC); - manc &= ~(E1000_MANC_ARP_EN); - E1000_WRITE_REG(&adapter->hw, MANC, manc); - } - - switch (adapter->hw.mac_type) { - case e1000_82573: - swsm = E1000_READ_REG(&adapter->hw, SWSM); - E1000_WRITE_REG(&adapter->hw, SWSM, - swsm | E1000_SWSM_DRV_LOAD); - break; - default: - break; - } + /* If the controller is 82573 and f/w is AMT, do not set + * DRV_LOAD until the interface is up. For all other cases, + * let the f/w know that the h/w is now under the control + * of the driver. */ + if (adapter->hw.mac_type != e1000_82573 || + !e1000_check_mng_mode(&adapter->hw)) + e1000_get_hw_control(adapter); - if (netif_running(netdev)) - mod_timer(&adapter->watchdog_timer, jiffies); } /* e1000_main.c */ |