diff options
Diffstat (limited to 'drivers/net/igb/igb_main.c')
-rw-r--r-- | drivers/net/igb/igb_main.c | 148 |
1 files changed, 92 insertions, 56 deletions
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index e25343588fc..ea17319624a 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -942,6 +942,8 @@ int igb_up(struct igb_adapter *adapter) rd32(E1000_ICR); igb_irq_enable(adapter); + netif_tx_start_all_queues(adapter->netdev); + /* Fire a link change interrupt to start the watchdog. */ wr32(E1000_ICS, E1000_ICS_LSC); return 0; @@ -994,6 +996,11 @@ void igb_down(struct igb_adapter *adapter) igb_reset(adapter); igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); +#ifdef CONFIG_IGB_DCA + + /* since we reset the hardware DCA settings were cleared */ + igb_setup_dca(adapter); +#endif } void igb_reinit_locked(struct igb_adapter *adapter) @@ -1343,6 +1350,9 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + if (adapter->hw.mac.type == e1000_82576) + netdev->features |= NETIF_F_SCTP_CSUM; + adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); /* before reading the NVM, reset the controller to put the device in a @@ -1390,8 +1400,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, igb_validate_mdi_setting(hw); - adapter->rx_csum = 1; - /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, * enable the ACPI Magic Packet filter */ @@ -1442,22 +1450,18 @@ static int __devinit igb_probe(struct pci_dev *pdev, * driver. */ igb_get_hw_control(adapter); - /* tell the stack to leave us alone until igb_open() is called */ - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_register; + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); - /* Always use CB2 mode, difference is masked - * in the CB driver. */ - wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); igb_setup_dca(adapter); } #endif @@ -1699,6 +1703,8 @@ static int igb_open(struct net_device *netdev) if (test_bit(__IGB_TESTING, &adapter->state)) return -EBUSY; + netif_carrier_off(netdev); + /* allocate transmit descriptors */ err = igb_setup_all_tx_resources(adapter); if (err) @@ -2231,29 +2237,24 @@ static void igb_configure_rx(struct igb_adapter *adapter) mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); - wr32(E1000_MRQC, mrqc); - - /* Multiqueue and raw packet checksumming are mutually - * exclusive. Note that this not the same as TCP/IP - * checksumming, which works fine. */ - rxcsum = rd32(E1000_RXCSUM); - rxcsum |= E1000_RXCSUM_PCSD; - wr32(E1000_RXCSUM, rxcsum); - } else { + } else if (adapter->vfs_allocated_count) { /* Enable multi-queue for sr-iov */ - if (adapter->vfs_allocated_count) - wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ); - /* Enable Receive Checksum Offload for TCP and UDP */ - rxcsum = rd32(E1000_RXCSUM); - if (adapter->rx_csum) - rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE; - else - rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE); - - wr32(E1000_RXCSUM, rxcsum); + wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ); } + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = rd32(E1000_RXCSUM); + /* Disable raw packet checksumming */ + rxcsum |= E1000_RXCSUM_PCSD; + + if (adapter->hw.mac.type == e1000_82576) + /* Enable Receive Checksum Offload for SCTP */ + rxcsum |= E1000_RXCSUM_CRCOFL; + + /* Don't need to set TUOFL or IPOFL, they default to 1 */ + wr32(E1000_RXCSUM, rxcsum); + /* Set the default pool for the PF's first queue */ igb_configure_vt_default_pool(adapter); @@ -2661,7 +2662,6 @@ static void igb_watchdog_task(struct work_struct *work) } netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); igb_ping_all_vfs(adapter); @@ -2678,7 +2678,6 @@ static void igb_watchdog_task(struct work_struct *work) printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); igb_ping_all_vfs(adapter); @@ -2712,6 +2711,8 @@ link_up: * (Do the reset outside of interrupt context). */ adapter->tx_timeout_count++; schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; } } @@ -2895,13 +2896,13 @@ static void igb_set_itr(struct igb_adapter *adapter) switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: - new_itr = 70000; + new_itr = 56; /* aka 70,000 ints/sec */ break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = 196; /* aka 20,000 ints/sec */ break; case bulk_latency: - new_itr = 4000; + new_itr = 980; /* aka 4,000 ints/sec */ break; default: break; @@ -2920,7 +2921,8 @@ set_itr_now: * by adding intermediate steps when interrupt rate is * increasing */ new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : + max((new_itr * adapter->itr) / + (new_itr + (adapter->itr >> 2)), new_itr) : new_itr; /* Don't write the value here; it resets the adapter's * internal timer, and causes us to delay far longer than @@ -2929,7 +2931,7 @@ set_itr_now: * ends up being correct. */ adapter->itr = new_itr; - adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256); + adapter->rx_ring->itr_val = new_itr; adapter->rx_ring->set_itr = 1; } @@ -3068,11 +3070,15 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; if (ip_hdr(skb)->protocol == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; case cpu_to_be16(ETH_P_IPV6): /* XXX what about other V6 headers?? */ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; default: if (unlikely(net_ratelimit())) @@ -3133,8 +3139,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, /* set time_stamp *before* dma to help avoid a possible race */ buffer_info->time_stamp = jiffies; buffer_info->next_to_watch = i; - buffer_info->dma = map[count]; - count++; + buffer_info->dma = skb_shinfo(skb)->dma_head; for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { struct skb_frag_struct *frag; @@ -3158,7 +3163,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter, tx_ring->buffer_info[i].skb = skb; tx_ring->buffer_info[first].next_to_watch = i; - return count; + return count + 1; } static inline void igb_tx_queue_adv(struct igb_adapter *adapter, @@ -3338,7 +3343,6 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb, if (count) { igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, skb->len, hdr_len); - netdev->trans_start = jiffies; /* Make sure there is space in the ring for the next send. */ igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); } else { @@ -3582,8 +3586,35 @@ void igb_update_stats(struct igb_adapter *adapter) /* Rx Errors */ + if (hw->mac.type != e1000_82575) { + u32 rqdpc_tmp; + u64 rqdpc_total = 0; + int i; + /* Read out drops stats per RX queue. Notice RQDPC (Receive + * Queue Drop Packet Count) stats only gets incremented, if + * the DROP_EN but it set (in the SRRCTL register for that + * queue). If DROP_EN bit is NOT set, then the some what + * equivalent count is stored in RNBC (not per queue basis). + * Also note the drop count is due to lack of available + * descriptors. + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF; + adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp; + rqdpc_total += adapter->rx_ring[i].rx_stats.drops; + } + adapter->net_stats.rx_fifo_errors = rqdpc_total; + } + + /* Note RNBC (Receive No Buffers Count) is an not an exact + * drop count as the hardware FIFO might save the day. Thats + * one of the reason for saving it in rx_fifo_errors, as its + * potentially not a true drop. + */ + adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc; + /* RLEC on some newer hardware can be incorrect so build - * our own version based on RUC and ROC */ + * our own version based on RUC and ROC */ adapter->net_stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + @@ -3767,11 +3798,15 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring) static void igb_setup_dca(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; int i; if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) return; + /* Always use CB2 mode, difference is masked in the CB driver. */ + wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); + for (i = 0; i < adapter->num_tx_queues; i++) { adapter->tx_ring[i].cpu = -1; igb_update_tx_dca(&adapter->tx_ring[i]); @@ -4434,20 +4469,12 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status, bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); skb_record_rx_queue(skb, ring->queue_index); - if (skb->ip_summed == CHECKSUM_UNNECESSARY) { - if (vlan_extracted) - vlan_gro_receive(&ring->napi, adapter->vlgrp, - le16_to_cpu(rx_desc->wb.upper.vlan), - skb); - else - napi_gro_receive(&ring->napi, skb); - } else { - if (vlan_extracted) - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->wb.upper.vlan)); - else - netif_receive_skb(skb); - } + if (vlan_extracted) + vlan_gro_receive(&ring->napi, adapter->vlgrp, + le16_to_cpu(rx_desc->wb.upper.vlan), + skb); + else + napi_gro_receive(&ring->napi, skb); } static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, @@ -4456,19 +4483,28 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, skb->ip_summed = CHECKSUM_NONE; /* Ignore Checksum bit is set or checksum is disabled through ethtool */ - if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum) + if ((status_err & E1000_RXD_STAT_IXSM) || + (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) return; /* TCP/UDP checksum error bit is set */ if (status_err & (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* + * work around errata with sctp packets where the TCPE aka + * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) + * packets, (aka let the stack check the crc32c) + */ + if (!((adapter->hw.mac.type == e1000_82576) && + (skb->len == 60))) + adapter->hw_csum_err++; /* let the stack verify checksum errors */ - adapter->hw_csum_err++; return; } /* It must be a TCP or UDP packet with a valid checksum */ if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) skb->ip_summed = CHECKSUM_UNNECESSARY; + dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); adapter->hw_csum_good++; } |