diff options
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 1972 |
1 files changed, 1057 insertions, 915 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e32af434cc9..f85631263af 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -50,7 +50,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = - "Intel(R) 10 Gigabit PCI Express Network Driver"; + "Intel(R) 10 Gigabit PCI Express Network Driver"; #define DRV_VERSION "2.0.84-k2" const char ixgbe_driver_version[] = DRV_VERSION; @@ -120,7 +120,7 @@ MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, - void *p); + void *p); static struct notifier_block dca_notifier = { .notifier_call = ixgbe_notify_dca, .next = NULL, @@ -131,8 +131,8 @@ static struct notifier_block dca_notifier = { #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); -MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " - "per physical function"); +MODULE_PARM_DESC(max_vfs, + "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); @@ -169,8 +169,8 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* take a breather then clean up driver data */ msleep(100); - if (adapter->vfinfo) - kfree(adapter->vfinfo); + + kfree(adapter->vfinfo); adapter->vfinfo = NULL; adapter->num_vfs = 0; @@ -282,17 +282,17 @@ static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); break; default: - printk(KERN_INFO "%-15s %08x\n", reginfo->name, + pr_info("%-15s %08x\n", reginfo->name, IXGBE_READ_REG(hw, reginfo->ofs)); return; } for (i = 0; i < 8; i++) { snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); - printk(KERN_ERR "%-15s ", rname); + pr_err("%-15s", rname); for (j = 0; j < 8; j++) - printk(KERN_CONT "%08x ", regs[i*8+j]); - printk(KERN_CONT "\n"); + pr_cont(" %08x", regs[i*8+j]); + pr_cont("\n"); } } @@ -322,18 +322,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) /* Print netdevice Info */ if (netdev) { dev_info(&adapter->pdev->dev, "Net device Info\n"); - printk(KERN_INFO "Device Name state " + pr_info("Device Name state " "trans_start last_rx\n"); - printk(KERN_INFO "%-15s %016lX %016lX %016lX\n", - netdev->name, - netdev->state, - netdev->trans_start, - netdev->last_rx); + pr_info("%-15s %016lX %016lX %016lX\n", + netdev->name, + netdev->state, + netdev->trans_start, + netdev->last_rx); } /* Print Registers */ dev_info(&adapter->pdev->dev, "Register Dump\n"); - printk(KERN_INFO " Register Name Value\n"); + pr_info(" Register Name Value\n"); for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; reginfo->name; reginfo++) { ixgbe_regdump(hw, reginfo); @@ -344,13 +344,12 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] " - "leng ntw timestamp\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n", + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)tx_buffer_info->dma, tx_buffer_info->length, @@ -377,18 +376,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "T [desc] [address 63:0 ] " + pr_info("------------------------------------\n"); + pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("T [desc] [address 63:0 ] " "[PlPOIdStDDt Ln] [bi->dma ] " "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX" + pr_info("T [0x%03X] %016llX %016llX %016llX" " %04X %3X %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), @@ -399,13 +398,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) tx_buffer_info->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC/U\n"); + pr_cont(" NTC/U\n"); else if (i == tx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); + pr_cont(" NTU\n"); else if (i == tx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); + pr_cont(" NTC\n"); else - printk(KERN_CONT "\n"); + pr_cont("\n"); if (netif_msg_pktdata(adapter) && tx_buffer_info->dma != 0) @@ -419,11 +418,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) /* Print RX Rings Summary */ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); - printk(KERN_INFO "Queue [NTU] [NTC]\n"); + pr_info("Queue [NTU] [NTC]\n"); for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO "%5d %5X %5X\n", n, - rx_ring->next_to_use, rx_ring->next_to_clean); + pr_info("%5d %5X %5X\n", + n, rx_ring->next_to_use, rx_ring->next_to_clean); } /* Print RX Rings */ @@ -454,30 +453,30 @@ rx_ring_summary: */ for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index); - printk(KERN_INFO "------------------------------------\n"); - printk(KERN_INFO "R [desc] [ PktBuf A0] " + pr_info("------------------------------------\n"); + pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); + pr_info("------------------------------------\n"); + pr_info("R [desc] [ PktBuf A0] " "[ HeadBuf DD] [bi->dma ] [bi->skb] " "<-- Adv Rx Read format\n"); - printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] " + pr_info("RWB[desc] [PcsmIpSHl PtRs] " "[vl er S cks ln] ---------------- [bi->skb] " "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { rx_buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (staterr & IXGBE_RXD_STAT_DD) { /* Descriptor Done */ - printk(KERN_INFO "RWB[0x%03X] %016llX " + pr_info("RWB[0x%03X] %016llX " "%016llX ---------------- %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), rx_buffer_info->skb); } else { - printk(KERN_INFO "R [0x%03X] %016llX " + pr_info("R [0x%03X] %016llX " "%016llX %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), @@ -503,11 +502,11 @@ rx_ring_summary: } if (i == rx_ring->next_to_use) - printk(KERN_CONT " NTU\n"); + pr_cont(" NTU\n"); else if (i == rx_ring->next_to_clean) - printk(KERN_CONT " NTC\n"); + pr_cont(" NTC\n"); else - printk(KERN_CONT "\n"); + pr_cont("\n"); } } @@ -523,7 +522,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) /* Let firmware take over control of h/w */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); } static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) @@ -533,7 +532,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) /* Let firmware know the driver has taken over */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } /* @@ -545,7 +544,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) * */ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, - u8 queue, u8 msix_vector) + u8 queue, u8 msix_vector) { u32 ivar, index; struct ixgbe_hw *hw = &adapter->hw; @@ -586,7 +585,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, } static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, - u64 qmask) + u64 qmask) { u32 mask; @@ -601,9 +600,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, - struct ixgbe_tx_buffer - *tx_buffer_info) +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, + struct ixgbe_tx_buffer + *tx_buffer_info) { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) @@ -637,7 +636,7 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, * Returns : true if in xon state (currently not paused) */ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { u32 txoff = IXGBE_TFCS_TXOFF; @@ -682,8 +681,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, } static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - unsigned int eop) + struct ixgbe_ring *tx_ring, + unsigned int eop) { struct ixgbe_hw *hw = &adapter->hw; @@ -695,7 +694,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, ixgbe_tx_xon_state(adapter, tx_ring)) { /* detected Tx unit hang */ union ixgbe_adv_tx_desc *tx_desc; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" @@ -732,7 +731,7 @@ static void ixgbe_tx_timeout(struct net_device *netdev); * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct ixgbe_adapter *adapter = q_vector->adapter; struct net_device *netdev = adapter->netdev; @@ -743,7 +742,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->work_limit)) { @@ -751,7 +750,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { struct sk_buff *skb; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; cleaned = (i == eop); skb = tx_buffer_info->skb; @@ -781,7 +780,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } ixgbe_unmap_and_free_tx_resource(adapter, - tx_buffer_info); + tx_buffer_info); tx_desc->wb.status = 0; @@ -791,14 +790,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); } tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(netdev) && - (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -825,14 +824,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; + u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.packets += total_packets; tx_ring->stats.bytes += total_bytes; - return (count < tx_ring->work_limit); + u64_stats_update_end(&tx_ring->syncp); + return count < tx_ring->work_limit; } #ifdef CONFIG_IXGBE_DCA static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { u32 rxctrl; int cpu = get_cpu(); @@ -846,13 +847,13 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); } rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); rx_ring->cpu = cpu; } @@ -860,7 +861,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, } static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { u32 txctrl; int cpu = get_cpu(); @@ -878,7 +879,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); } @@ -946,27 +947,22 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) * @rx_desc: rx descriptor **/ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb, u8 status, - struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc) + struct sk_buff *skb, u8 status, + struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc) { struct ixgbe_adapter *adapter = q_vector->adapter; struct napi_struct *napi = &q_vector->napi; bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - skb_record_rx_queue(skb, ring->queue_index); - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { - if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) - vlan_gro_receive(napi, adapter->vlgrp, tag, skb); - else - napi_gro_receive(napi, skb); - } else { - if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK)) - vlan_hwaccel_rx(skb, adapter->vlgrp, tag); - else - netif_rx(skb); - } + if (is_vlan && (tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, tag); + + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(napi, skb); + else + netif_rx(skb); } /** @@ -981,7 +977,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, { u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error); - skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); /* Rx csum disabled */ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) @@ -1017,7 +1013,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, } static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, - struct ixgbe_ring *rx_ring, u32 val) + struct ixgbe_ring *rx_ring, u32 val) { /* * Force memory writes to complete before letting h/w @@ -1033,25 +1029,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split * @adapter: address of board private structure **/ -static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - int cleaned_count) +void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, + int cleaned_count) { + struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; unsigned int i; + unsigned int bufsz = rx_ring->rx_buf_len; i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); if (!bi->page_dma && (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); + bi->page = netdev_alloc_page(netdev); if (!bi->page) { adapter->alloc_rx_page_failed++; goto no_buffers; @@ -1063,29 +1061,28 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, } bi->page_dma = dma_map_page(&pdev->dev, bi->page, - bi->page_offset, - (PAGE_SIZE / 2), + bi->page_offset, + (PAGE_SIZE / 2), DMA_FROM_DEVICE); } if (!bi->skb) { - struct sk_buff *skb; - /* netdev_alloc_skb reserves 32 bytes up front!! */ - uint bufsz = rx_ring->rx_buf_len + SMP_CACHE_BYTES; - skb = netdev_alloc_skb(adapter->netdev, bufsz); + struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, + bufsz); + bi->skb = skb; if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } + /* initialize queue mapping */ + skb_record_rx_queue(skb, rx_ring->queue_index); + } - /* advance the data pointer to the next cache line */ - skb_reserve(skb, (PTR_ALIGN(skb->data, SMP_CACHE_BYTES) - - skb->data)); - - bi->skb = skb; - bi->dma = dma_map_single(&pdev->dev, skb->data, - rx_ring->rx_buf_len, + if (!bi->dma) { + bi->dma = dma_map_single(&pdev->dev, + bi->skb->data, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); } /* Refresh the desc even if buffer_addrs didn't change because @@ -1095,6 +1092,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); } else { rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + rx_desc->read.hdr_addr = 0; } i++; @@ -1126,8 +1124,8 @@ static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) { return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & - IXGBE_RXDADV_RSCCNT_MASK) >> - IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_RXDADV_RSCCNT_MASK) >> + IXGBE_RXDADV_RSCCNT_SHIFT; } /** @@ -1140,7 +1138,7 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) * turns it into the frag list owner. **/ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, - u64 *count) + u64 *count) { unsigned int frag_list_size = 0; @@ -1168,11 +1166,10 @@ struct ixgbe_rsc_cb { #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *rx_ring, - int *work_done, int work_to_do) + struct ixgbe_ring *rx_ring, + int *work_done, int work_to_do) { struct ixgbe_adapter *adapter = q_vector->adapter; - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; @@ -1188,7 +1185,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ i = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; @@ -1231,9 +1228,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; } else { dma_unmap_single(&pdev->dev, - rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + rx_buffer_info->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); } rx_buffer_info->dma = 0; skb_put(skb, len); @@ -1244,9 +1241,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, - rx_buffer_info->page_offset, - upper_len); + rx_buffer_info->page, + rx_buffer_info->page_offset, + upper_len); if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || (page_count(rx_buffer_info->page) != 1)) @@ -1263,7 +1260,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (i == rx_ring->count) i = 0; - next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); + next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); prefetch(next_rxd); cleaned_count++; @@ -1280,24 +1277,28 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (staterr & IXGBE_RXD_STAT_EOP) { if (skb->prev) - skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); + skb = ixgbe_transform_rsc_queue(skb, + &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (IXGBE_RSC_CB(skb)->delay_unmap) { dma_unmap_single(&pdev->dev, IXGBE_RSC_CB(skb)->dma, - rx_ring->rx_buf_len, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); IXGBE_RSC_CB(skb)->dma = 0; IXGBE_RSC_CB(skb)->delay_unmap = false; } if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) - rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; + rx_ring->rsc_count += + skb_shinfo(skb)->nr_frags; else rx_ring->rsc_count++; rx_ring->rsc_flush++; } + u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets++; rx_ring->stats.bytes += skb->len; + u64_stats_update_end(&rx_ring->syncp); } else { if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { rx_buffer_info->skb = next_buffer->skb; @@ -1373,8 +1374,6 @@ next_desc: rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1403,24 +1402,24 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) q_vector = adapter->q_vector[v_idx]; /* XXX for_each_set_bit(...) */ r_idx = find_first_bit(q_vector->rxr_idx, - adapter->num_rx_queues); + adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { j = adapter->rx_ring[r_idx]->reg_idx; ixgbe_set_ivar(adapter, 0, j, v_idx); r_idx = find_next_bit(q_vector->rxr_idx, - adapter->num_rx_queues, - r_idx + 1); + adapter->num_rx_queues, + r_idx + 1); } r_idx = find_first_bit(q_vector->txr_idx, - adapter->num_tx_queues); + adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { j = adapter->tx_ring[r_idx]->reg_idx; ixgbe_set_ivar(adapter, 1, j, v_idx); r_idx = find_next_bit(q_vector->txr_idx, - adapter->num_tx_queues, - r_idx + 1); + adapter->num_tx_queues, + r_idx + 1); } if (q_vector->txr_count && !q_vector->rxr_count) @@ -1431,11 +1430,26 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) q_vector->eitr = adapter->rx_eitr_param; ixgbe_write_eitr(q_vector); + /* If Flow Director is enabled, set interrupt affinity */ + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* + * Allocate the affinity_hint cpumask, assign the mask + * for this vector, and set our affinity_hint for + * this irq. + */ + if (!alloc_cpumask_var(&q_vector->affinity_mask, + GFP_KERNEL)) + return; + cpumask_set_cpu(v_idx, q_vector->affinity_mask); + irq_set_affinity_hint(adapter->msix_entries[v_idx].vector, + q_vector->affinity_mask); + } } if (adapter->hw.mac.type == ixgbe_mac_82598EB) ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - v_idx); + v_idx); else if (adapter->hw.mac.type == ixgbe_mac_82599EB) ixgbe_set_ivar(adapter, -1, 1, v_idx); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); @@ -1477,8 +1491,8 @@ enum latency_range { * parameter (see ixgbe_param.c) **/ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, - u32 eitr, u8 itr_setting, - int packets, int bytes) + u32 eitr, u8 itr_setting, + int packets, int bytes) { unsigned int retval = itr_setting; u32 timepassed_us; @@ -1567,30 +1581,30 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) for (i = 0; i < q_vector->txr_count; i++) { tx_ring = adapter->tx_ring[r_idx]; ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->tx_itr, - tx_ring->total_packets, - tx_ring->total_bytes); + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? - q_vector->tx_itr - 1 : ret_itr); + q_vector->tx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { rx_ring = adapter->rx_ring[r_idx]; ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, - q_vector->rx_itr, - rx_ring->total_packets, - rx_ring->total_bytes); + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? - q_vector->rx_itr - 1 : ret_itr); + q_vector->rx_itr - 1 : ret_itr); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + r_idx + 1); } current_itr = max(q_vector->rx_itr, q_vector->tx_itr); @@ -1627,39 +1641,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) static void ixgbe_check_overtemp_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - check_overtemp_task); + struct ixgbe_adapter, + check_overtemp_task); struct ixgbe_hw *hw = &adapter->hw; u32 eicr = adapter->interrupt_event; - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { - switch (hw->device_id) { - case IXGBE_DEV_ID_82599_T3_LOM: { - u32 autoneg; - bool link_up = false; + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) + return; - if (hw->mac.ops.check_link) - hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_T3_LOM: { + u32 autoneg; + bool link_up = false; - if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || - (eicr & IXGBE_EICR_LSC)) - /* Check if this is due to overtemp */ - if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) - break; - } + if (hw->mac.ops.check_link) + hw->mac.ops.check_link(hw, &autoneg, &link_up, false); + + if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) || + (eicr & IXGBE_EICR_LSC)) + /* Check if this is due to overtemp */ + if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) + break; + return; + } + default: + if (!(eicr & IXGBE_EICR_GPI_SDP0)) return; - default: - if (!(eicr & IXGBE_EICR_GPI_SDP0)) - return; - break; - } - e_crit(drv, "Network adapter has been stopped because it has " - "over heated. Restart the computer. If the problem " - "persists, power off the system and replace the " - "adapter\n"); - /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); + break; } + e_crit(drv, + "Network adapter has been stopped because it has over heated. " + "Restart the computer. If the problem persists, " + "power off the system and replace the adapter\n"); + /* write to clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); } static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) @@ -1746,9 +1761,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) netif_tx_stop_all_queues(netdev); for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = - adapter->tx_ring[i]; + adapter->tx_ring[i]; if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, - &tx_ring->reinit_state)) + &tx_ring->reinit_state)) schedule_work(&adapter->fdir_reinit_task); } } @@ -1777,7 +1792,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, } static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, - u64 qmask) + u64 qmask) { u32 mask; @@ -1809,7 +1824,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) tx_ring->total_bytes = 0; tx_ring->total_packets = 0; r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } /* EIAM disabled interrupts (on this vector) for us */ @@ -1837,7 +1852,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) rx_ring->total_bytes = 0; rx_ring->total_packets = 0; r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + r_idx + 1); } if (!q_vector->rxr_count) @@ -1867,7 +1882,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) ring->total_bytes = 0; ring->total_packets = 0; r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); @@ -1876,7 +1891,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) ring->total_bytes = 0; ring->total_packets = 0; r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + r_idx + 1); } /* EIAM disabled interrupts (on this vector) for us */ @@ -1896,7 +1911,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data) static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *rx_ring = NULL; int work_done = 0; @@ -1918,7 +1933,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, - ((u64)1 << q_vector->v_idx)); + ((u64)1 << q_vector->v_idx)); } return work_done; @@ -1935,7 +1950,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *ring = NULL; int work_done = 0, i; @@ -1951,7 +1966,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) #endif tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + r_idx + 1); } /* attempt to distribute budget to each queue fairly, but don't allow @@ -1967,7 +1982,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) #endif ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + r_idx + 1); } r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); @@ -1979,7 +1994,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, - ((u64)1 << q_vector->v_idx)); + ((u64)1 << q_vector->v_idx)); return 0; } @@ -1997,7 +2012,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_ring *tx_ring = NULL; int work_done = 0; @@ -2019,14 +2034,15 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) if (adapter->tx_itr_setting & 1) ixgbe_set_itr_msix(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + ixgbe_irq_enable_queues(adapter, + ((u64)1 << q_vector->v_idx)); } return work_done; } static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, - int r_idx) + int r_idx) { struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; @@ -2035,7 +2051,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, } static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, - int t_idx) + int t_idx) { struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; @@ -2055,7 +2071,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, * mapping configurations in here. **/ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, - int vectors) + int vectors) { int v_start = 0; int rxr_idx = 0, txr_idx = 0; @@ -2122,7 +2138,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) struct net_device *netdev = adapter->netdev; irqreturn_t (*handler)(int, void *); int i, vector, q_vectors, err; - int ri=0, ti=0; + int ri = 0, ti = 0; /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -2133,26 +2149,24 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) goto out; #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ - (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ - &ixgbe_msix_clean_many) + (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ + &ixgbe_msix_clean_many) for (vector = 0; vector < q_vectors; vector++) { handler = SET_HANDLER(adapter->q_vector[vector]); - if(handler == &ixgbe_msix_clean_rx) { + if (handler == &ixgbe_msix_clean_rx) { sprintf(adapter->name[vector], "%s-%s-%d", netdev->name, "rx", ri++); - } - else if(handler == &ixgbe_msix_clean_tx) { + } else if (handler == &ixgbe_msix_clean_tx) { sprintf(adapter->name[vector], "%s-%s-%d", netdev->name, "tx", ti++); - } - else + } else sprintf(adapter->name[vector], "%s-%s-%d", netdev->name, "TxRx", vector); err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, adapter->name[vector], - adapter->q_vector[vector]); + handler, 0, adapter->name[vector], + adapter->q_vector[vector]); if (err) { e_err(probe, "request_irq failed for MSIX interrupt " "Error: %d\n", err); @@ -2162,7 +2176,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) sprintf(adapter->name[vector], "%s:lsc", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, - ixgbe_msix_lsc, 0, adapter->name[vector], netdev); + ixgbe_msix_lsc, 0, adapter->name[vector], netdev); if (err) { e_err(probe, "request_irq for msix_lsc failed: %d\n", err); goto free_queue_irqs; @@ -2173,7 +2187,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) free_queue_irqs: for (i = vector - 1; i >= 0; i--) free_irq(adapter->msix_entries[--vector].vector, - adapter->q_vector[i]); + adapter->q_vector[i]); adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); @@ -2191,13 +2205,13 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, - q_vector->tx_itr, - tx_ring->total_packets, - tx_ring->total_bytes); + q_vector->tx_itr, + tx_ring->total_packets, + tx_ring->total_bytes); q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, - q_vector->rx_itr, - rx_ring->total_packets, - rx_ring->total_bytes); + q_vector->rx_itr, + rx_ring->total_packets, + rx_ring->total_bytes); current_itr = max(q_vector->rx_itr, q_vector->tx_itr); @@ -2231,7 +2245,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) * ixgbe_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ -static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) +static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + bool flush) { u32 mask; @@ -2252,8 +2267,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - ixgbe_irq_enable_queues(adapter, ~0); - IXGBE_WRITE_FLUSH(&adapter->hw); + if (queues) + ixgbe_irq_enable_queues(adapter, ~0); + if (flush) + IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->num_vfs > 32) { u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; @@ -2275,7 +2292,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) u32 eicr; /* - * Workaround for silicon errata. Mask the interrupts + * Workaround for silicon errata on 82598. Mask the interrupts * before the read of EICR. */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -2284,10 +2301,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data) * therefore no explict interrupt disable is necessary */ eicr = IXGBE_READ_REG(hw, IXGBE_EICR); if (!eicr) { - /* shared interrupt alert! + /* + * shared interrupt alert! * make sure interrupts are enabled because the read will - * have disabled interrupts due to EIAM */ - ixgbe_irq_enable(adapter); + * have disabled interrupts due to EIAM + * finish the workaround of silicon errata on 82598. Unmask + * the interrupt that we masked before the EICR read. + */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, true, true); return IRQ_NONE; /* Not our interrupt */ } @@ -2311,6 +2333,14 @@ static irqreturn_t ixgbe_intr(int irq, void *data) __napi_schedule(&(q_vector->napi)); } + /* + * re-enable link(maybe) and non-queue interrupts, no flush. + * ixgbe_poll will re-enable the queue interrupts + */ + + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable(adapter, false, false); + return IRQ_HANDLED; } @@ -2343,10 +2373,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) err = ixgbe_request_msix_irqs(adapter); } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, - netdev->name, netdev); + netdev->name, netdev); } else { err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, - netdev->name, netdev); + netdev->name, netdev); } if (err) @@ -2370,7 +2400,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) i--; for (; i >= 0; i--) { free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); + adapter->q_vector[i]); } ixgbe_reset_q_vectors(adapter); @@ -2413,7 +2443,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; IXGBE_WRITE_REG(hw, IXGBE_EITR(0), - EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); + EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param)); ixgbe_set_ivar(adapter, 0, 0, 0); ixgbe_set_ivar(adapter, 1, 0, 0); @@ -2425,95 +2455,140 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) } /** - * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset * @adapter: board private structure + * @ring: structure containing ring specific data * - * Configure the Tx unit of the MAC after a reset. + * Configure the Tx descriptor ring after a reset. **/ -static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) { - u64 tdba; struct ixgbe_hw *hw = &adapter->hw; - u32 i, j, tdlen, txctrl; + u64 tdba = ring->dma; + int wait_loop = 10; + u32 txdctl; + u16 reg_idx = ring->reg_idx; - /* Setup the HW Tx Head and Tail descriptor pointers */ - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = adapter->tx_ring[i]; - j = ring->reg_idx; - tdba = ring->dma; - tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); - IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), - (tdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); - IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); - IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); - adapter->tx_ring[i]->head = IXGBE_TDH(j); - adapter->tx_ring[i]->tail = IXGBE_TDT(j); - /* - * Disable Tx Head Writeback RO bit, since this hoses - * bookkeeping if things aren't delivered in order. - */ - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); - break; - case ixgbe_mac_82599EB: - default: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); - break; - } - txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); - break; - case ixgbe_mac_82599EB: - default: - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); - break; - } + /* disable queue to avoid issues while updating state */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), + txdctl & ~IXGBE_TXDCTL_ENABLE); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), + (tdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->head = IXGBE_TDH(reg_idx); + ring->tail = IXGBE_TDT(reg_idx); + + /* configure fetching thresholds */ + if (adapter->rx_itr_setting == 0) { + /* cannot set wthresh when itr==0 */ + txdctl &= ~0x007F0000; + } else { + /* enable WTHRESH=8 descriptors, to encourage burst writeback */ + txdctl |= (8 << 16); + } + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + /* PThresh workaround for Tx hang with DFP enabled. */ + txdctl |= 32; } - if (hw->mac.type == ixgbe_mac_82599EB) { - u32 rttdcs; - u32 mask; + /* reinitialize flowdirector state */ + set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); - /* disable the arbiter while setting MTQC */ - rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); - rttdcs |= IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + /* enable queue */ + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); - /* set transmit pool layout */ - mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); - switch (adapter->flags & mask) { + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; - case (IXGBE_FLAG_SRIOV_ENABLED): - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); - break; + /* poll to verify queue is enabled */ + do { + msleep(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) + e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); +} - case (IXGBE_FLAG_DCB_ENABLED): - /* We enable 8 traffic classes, DCB only */ - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); - break; +static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rttdcs; + u32 mask; - default: - IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); - break; - } + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + /* disable the arbiter while setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ + mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED); + switch (adapter->flags & mask) { + + case (IXGBE_FLAG_SRIOV_ENABLED): + IXGBE_WRITE_REG(hw, IXGBE_MTQC, + (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); + break; + + case (IXGBE_FLAG_DCB_ENABLED): + /* We enable 8 traffic classes, DCB only */ + IXGBE_WRITE_REG(hw, IXGBE_MTQC, + (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ)); + break; + + default: + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); + break; + } + + /* re-enable the arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +} - /* re-eable the arbiter */ - rttdcs &= ~IXGBE_RTTDCS_ARBDIS; - IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); +/** + * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 dmatxctl; + u32 i; + + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* DMATXCTL.EN must be before Tx queues are enabled */ + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); } + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); } #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { u32 srrctl; int index; @@ -2529,6 +2604,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + if (adapter->num_vfs) + srrctl |= IXGBE_SRRCTL_DROP_EN; srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK; @@ -2549,20 +2626,46 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); } -static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) +static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { - u32 mrqc = 0; + struct ixgbe_hw *hw = &adapter->hw; + static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, + 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, + 0x6A3E67EA, 0x14364D17, 0x3BED200D}; + u32 mrqc = 0, reta = 0; + u32 rxcsum; + int i, j; int mask; - if (!(adapter->hw.mac.type == ixgbe_mac_82599EB)) - return mrqc; + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); - mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED + /* Fill out redirection table */ + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == adapter->ring_feature[RING_F_RSS].indices) + j = 0; + /* reta = 4-byte sliding window of + * 0x00..(indices-1)(indices-1)00..etc. */ + reta = (reta << 8) | (j * 0x11); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + } + + /* Disable indicating checksum in descriptor, enables RSS hash */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED; + else + mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED #ifdef CONFIG_IXGBE_DCB - | IXGBE_FLAG_DCB_ENABLED + | IXGBE_FLAG_DCB_ENABLED #endif - | IXGBE_FLAG_SRIOV_ENABLED - ); + | IXGBE_FLAG_SRIOV_ENABLED + ); switch (mask) { case (IXGBE_FLAG_RSS_ENABLED): @@ -2580,7 +2683,13 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) break; } - return mrqc; + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } /** @@ -2588,25 +2697,26 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) * @adapter: address of board private structure * @index: index of ring to set **/ -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) +static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) { - struct ixgbe_ring *rx_ring; struct ixgbe_hw *hw = &adapter->hw; - int j; u32 rscctrl; int rx_buf_len; + u16 reg_idx = ring->reg_idx; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + return; - rx_ring = adapter->rx_ring[index]; - j = rx_ring->reg_idx; - rx_buf_len = rx_ring->rx_buf_len; - rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); + rx_buf_len = ring->rx_buf_len; + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); rscctrl |= IXGBE_RSCCTL_RSCEN; /* * we must limit the number of descriptors so that the * total size of max desc * buf_len is not greater * than 65535 */ - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { #if (MAX_SKB_FRAGS > 16) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; #elif (MAX_SKB_FRAGS > 8) @@ -2624,31 +2734,181 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index) else rscctrl |= IXGBE_RSCCTL_MAXDESC_4; } - IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl); + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } /** - * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset - * @adapter: board private structure + * ixgbe_set_uta - Set unicast filter table address + * @adapter: board private structure * - * Configure the Rx unit of the MAC after a reset. + * The unicast table address is a register array of 32-bit registers. + * The table is meant to be used in a way similar to how the MTA is used + * however due to certain limitations in the hardware it is necessary to + * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous + * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ -static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +static void ixgbe_set_uta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + + /* The UTA table only exists on 82599 hardware and newer */ + if (hw->mac.type < ixgbe_mac_82599EB) + return; + + /* we only need to do this if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0); +} + +#define IXGBE_MAX_RX_DESC_POLL 10 +static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + int reg_idx = ring->reg_idx; + int wait_loop = IXGBE_MAX_RX_DESC_POLL; + u32 rxdctl; + + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && + !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) + return; + + do { + msleep(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { + e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " + "the polling period\n", reg_idx); + } +} + +void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 rdba = ring->dma; + u32 rxdctl; + u16 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), + rxdctl & ~IXGBE_RXDCTL_ENABLE); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->head = IXGBE_RDH(reg_idx); + ring->tail = IXGBE_RDT(reg_idx); + + ixgbe_configure_srrctl(adapter, ring); + ixgbe_configure_rscctl(adapter, ring); + + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), + * this also removes ugly rx_no_buffer_count increment + * HTHRESH=4 descriptors (to minimize latency on fetch) + * WTHRESH=8 burst writeback up to two cache lines + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); + + ixgbe_rx_desc_queue_enable(adapter, ring); + ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); +} + +static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int p; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_L2HDR | + IXGBE_PSRTYPE_IPV6HDR; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) + psrtype |= (adapter->num_rx_queues_per_pool << 29); + + for (p = 0; p < adapter->num_rx_pools; p++) + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), + psrtype); +} + +static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 gcr_ext; + u32 vt_reg_bits; + u32 reg_offset, vf_shift; + u32 vmdctl; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + + vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; + vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); + + vf_shift = adapter->num_vfs % 32; + reg_offset = (adapter->num_vfs > 32) ? 1 : 0; + + /* Enable only the PF's pool for Tx/Rx */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + + /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ + hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + + /* + * Set up VF register offsets for selected VT Mode, + * i.e. 32 or 64 VFs for SR-IOV + */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; + gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + + /* enable Tx loopback for VF/PF communication */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); +} + +static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) { - u64 rdba; struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring *rx_ring; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i, j; - u32 rdlen, rxctrl, rxcsum; - static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, - 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, - 0x6A3E67EA, 0x14364D17, 0x3BED200D}; - u32 fctrl, hlreg0; - u32 reta = 0, mrqc = 0; - u32 rdrxctl; int rx_buf_len; + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; /* Decide whether to use packet split mode or not */ /* Do not use packet split if we're in SR-IOV Mode */ @@ -2658,62 +2918,40 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_buf_len = IXGBE_RX_HDR_SIZE; - if (hw->mac.type == ixgbe_mac_82599EB) { - /* PSRTYPE must be initialized in 82599 */ - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR | - IXGBE_PSRTYPE_L2HDR; - IXGBE_WRITE_REG(hw, - IXGBE_PSRTYPE(adapter->num_vfs), - psrtype); - } } else { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && (netdev->mtu <= ETH_DATA_LEN)) rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; else - rx_buf_len = ALIGN(max_frame, 1024); + rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024); } - fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_BAM; - fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ - fctrl |= IXGBE_FCTRL_PMCF; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); +#ifdef IXGBE_FCOE + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; + +#endif /* IXGBE_FCOE */ + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (adapter->netdev->mtu <= ETH_DATA_LEN) - hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; - else - hlreg0 |= IXGBE_HLREG0_JUMBOEN; -#ifdef IXGBE_FCOE - if (netdev->features & NETIF_F_FCOE_MTU) - hlreg0 |= IXGBE_HLREG0_JUMBOEN; -#endif + /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc); - /* disable receives while setting up the descriptors */ - rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; - rdba = rx_ring->dma; - j = rx_ring->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); - IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); - IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); - IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); - IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); - rx_ring->head = IXGBE_RDH(j); - rx_ring->tail = IXGBE_RDT(j); rx_ring->rx_buf_len = rx_buf_len; if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) @@ -2729,15 +2967,21 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) rx_ring->rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; + IXGBE_FCOE_JUMBO_FRAME_SIZE; } } - #endif /* IXGBE_FCOE */ - ixgbe_configure_srrctl(adapter, rx_ring); } - if (hw->mac.type == ixgbe_mac_82598EB) { +} + +static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: /* * For VMDq support of different descriptor types or * buffer sizes through the use of multiple SRRCTL @@ -2748,110 +2992,66 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) * effects of setting this bit are only that SRRCTL must be * fully programmed [0..15] */ - rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); rdrxctl |= IXGBE_RDRXCTL_MVMEN; - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + break; + case ixgbe_mac_82599EB: + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + /* hardware requires some bits to be set by default */ + rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + break; + default: + /* We should do nothing since we don't know this hardware */ + return; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - u32 vt_reg_bits; - u32 reg_offset, vf_shift; - u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN - | IXGBE_VT_CTL_REPLEN; - vt_reg_bits |= (adapter->num_vfs << - IXGBE_VT_CTL_POOL_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); - IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0); - - vf_shift = adapter->num_vfs % 32; - reg_offset = adapter->num_vfs / 32; - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0); - /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - ixgbe_set_vmolr(hw, adapter->num_vfs, true); - } - - /* Program MRQC for the distribution of queues */ - mrqc = ixgbe_setup_mrqc(adapter); - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - /* Fill out redirection table */ - for (i = 0, j = 0; i < 128; i++, j++) { - if (j == adapter->ring_feature[RING_F_RSS].indices) - j = 0; - /* reta = 4-byte sliding window of - * 0x00..(indices-1)(indices-1)00..etc. */ - reta = (reta << 8) | (j * 0x11); - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); - } - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); - - if (hw->mac.type == ixgbe_mac_82598EB) - mrqc |= IXGBE_MRQC_RSSEN; - /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; - } - IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); +} - if (adapter->num_vfs) { - u32 reg; +/** + * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int i; + u32 rxctrl; - /* Map PF MAC address in RAR Entry 0 to first pool - * following VFs */ - hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + /* disable receives while setting up the descriptors */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); - /* Set up VF register offsets for selected VT Mode, i.e. - * 64 VFs for SR-IOV */ - reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - reg |= IXGBE_GCR_EXT_SRIOV; - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg); - } + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); - rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + /* Program registers for the distribution of queues */ + ixgbe_setup_mrqc(adapter); - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED || - adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) { - /* Disable indicating checksum in descriptor, enables - * RSS hash */ - rxcsum |= IXGBE_RXCSUM_PCSD; - } - if (!(rxcsum & IXGBE_RXCSUM_PCSD)) { - /* Enable IPv4 payload checksum for UDP fragments - * if PCSD is not set */ - rxcsum |= IXGBE_RXCSUM_IPPCSE; - } + ixgbe_set_uta(adapter); - IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + /* set_rx_buffer_len must be called before ring initialization */ + ixgbe_set_rx_buffer_len(adapter); - if (hw->mac.type == ixgbe_mac_82599EB) { - rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; - rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; - IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); - } + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - /* Enable 82599 HW-RSC */ - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_configure_rscctl(adapter, i); + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; - /* Disable RSC for ACK packets */ - IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, - (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); - } + /* enable all receives */ + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); } static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) @@ -2862,6 +3062,7 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) /* add VID to filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); + set_bit(vid, adapter->active_vlans); } static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) @@ -2870,16 +3071,9 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct ixgbe_hw *hw = &adapter->hw; int pool_ndx = adapter->num_vfs; - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_disable(adapter); - - vlan_group_set_device(adapter->vlgrp, vid, NULL); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter); - /* remove VID from filter table */ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); + clear_bit(vid, adapter->active_vlans); } /** @@ -2889,27 +3083,45 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * @adapter: driver data + */ +static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; + + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VFE; + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +/** + * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: - vlnctrl &= ~IXGBE_VLNCTRL_VFE; -#ifdef CONFIG_IXGBE_DCB - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - vlnctrl &= ~IXGBE_VLNCTRL_VME; -#endif - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: - vlnctrl &= ~IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - break; -#endif for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); @@ -2923,25 +3135,22 @@ static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) } /** - * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering + * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping * @adapter: driver data */ -static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) +static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + u32 vlnctrl; int i, j; switch (hw->mac.type) { case ixgbe_mac_82598EB: - vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: - vlnctrl |= IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); @@ -2954,40 +3163,14 @@ static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) } } -static void ixgbe_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_disable(adapter); - adapter->vlgrp = grp; - - /* - * For a DCB driver, always enable VLAN tag stripping so we can - * still receive traffic from a DCB-enabled host even if we're - * not in DCB mode. - */ - ixgbe_vlan_filter_enable(adapter); - - ixgbe_vlan_rx_add_vid(netdev, 0); - - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable(adapter); -} - static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { - ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); + u16 vid; - if (adapter->vlgrp) { - u16 vid; - for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { - if (!vlan_group_get_device(adapter->vlgrp, vid)) - continue; - ixgbe_vlan_rx_add_vid(adapter->netdev, vid); - } - } + ixgbe_vlan_rx_add_vid(adapter->netdev, 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgbe_vlan_rx_add_vid(adapter->netdev, vid); } /** @@ -3052,6 +3235,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + /* set all bits that we expect to always be set */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + /* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); @@ -3097,6 +3285,11 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + if (netdev->features & NETIF_F_HW_VLAN_RX) + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -3157,7 +3350,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) u32 txdctl; int i, j; - ixgbe_dcb_check_config(&adapter->dcb_cfg); + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 65536); + return; + } + + if (hw->mac.type == ixgbe_mac_82598EB) + netif_set_gso_max_size(adapter->netdev, 32768); + ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); @@ -3172,7 +3373,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); } /* Enable VLAN tag insert/strip */ - ixgbe_vlan_filter_enable(adapter); + adapter->netdev->features |= NETIF_F_HW_VLAN_RX; hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); } @@ -3184,23 +3385,13 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; int i; - ixgbe_set_rx_mode(netdev); - - ixgbe_restore_vlan(adapter); #ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - if (hw->mac.type == ixgbe_mac_82598EB) - netif_set_gso_max_size(netdev, 32768); - else - netif_set_gso_max_size(netdev, 65536); - ixgbe_configure_dcb(adapter); - } else { - netif_set_gso_max_size(netdev, 65536); - } -#else - netif_set_gso_max_size(netdev, 65536); + ixgbe_configure_dcb(adapter); #endif + ixgbe_set_rx_mode(netdev); + ixgbe_restore_vlan(adapter); + #ifdef IXGBE_FCOE if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ixgbe_configure_fcoe(adapter); @@ -3209,17 +3400,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i]->atr_sample_rate = - adapter->atr_sample_rate; + adapter->atr_sample_rate; ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc); } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc); } + ixgbe_configure_virtualization(adapter); ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i], - (adapter->rx_ring[i]->count - 1)); } static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) @@ -3290,7 +3479,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) goto link_cfg_out; if (hw->mac.ops.get_link_capabilities) - ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); + ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, + &negotiation); if (ret) goto link_cfg_out; @@ -3300,62 +3490,15 @@ link_cfg_out: return ret; } -#define IXGBE_MAX_RX_DESC_POLL 10 -static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, - int rxr) +static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) { - int j = adapter->rx_ring[rxr]->reg_idx; - int k; - - for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { - if (IXGBE_READ_REG(&adapter->hw, - IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE) - break; - else - msleep(1); - } - if (k >= IXGBE_MAX_RX_DESC_POLL) { - e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " - "the polling period\n", rxr); - } - ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr], - (adapter->rx_ring[rxr]->count - 1)); -} - -static int ixgbe_up_complete(struct ixgbe_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - int i, j = 0; - int num_rx_rings = adapter->num_rx_queues; - int err; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - u32 txdctl, rxdctl, mhadd; - u32 dmatxctl; - u32 gpie; - u32 ctrl_ext; - - ixgbe_get_hw_control(adapter); - - if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) || - (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | - IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); - } else { - /* MSI only */ - gpie = 0; - } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_VTMODE_64; - } - /* XXX: to interrupt immediately for EICS writes, enable this */ - /* gpie |= IXGBE_GPIE_EIMEN; */ - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - } + u32 gpie = 0; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + gpie |= IXGBE_GPIE_EIAME; /* * use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt @@ -3376,98 +3519,33 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } - /* Enable Thermal over heat sensor interrupt */ - if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie |= IXGBE_SDP0_GPIEN; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + /* XXX: to interrupt immediately for EICS writes, enable this */ + /* gpie |= IXGBE_GPIE_EIMEN; */ + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + gpie |= IXGBE_GPIE_VTMODE_64; } - /* Enable fan failure interrupt if media type is copper */ - if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) gpie |= IXGBE_SDP1_GPIEN; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - } - if (hw->mac.type == ixgbe_mac_82599EB) { - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (hw->mac.type == ixgbe_mac_82599EB) gpie |= IXGBE_SDP1_GPIEN; gpie |= IXGBE_SDP2_GPIEN; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - } -#ifdef IXGBE_FCOE - /* adjust max frame to be able to do baby jumbo for FCoE */ - if ((netdev->features & NETIF_F_FCOE_MTU) && - (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) - max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; - -#endif /* IXGBE_FCOE */ - mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); - if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { - mhadd &= ~IXGBE_MHADD_MFS_MASK; - mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; - - IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); - } - - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i]->reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - if (adapter->rx_itr_setting == 0) { - /* cannot set wthresh when itr==0 */ - txdctl &= ~0x007F0000; - } else { - /* enable WTHRESH=8 descriptors, to encourage burst writeback */ - txdctl |= (8 << 16); - } - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); - } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +} - if (hw->mac.type == ixgbe_mac_82599EB) { - /* DMATXCTL.EN must be set after all Tx queue config is done */ - dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); - dmatxctl |= IXGBE_DMATXCTL_TE; - IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); - } - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i]->reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - txdctl |= IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); - if (hw->mac.type == ixgbe_mac_82599EB) { - int wait_loop = 10; - /* poll for Tx Enable ready */ - do { - msleep(1); - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - } while (--wait_loop && - !(txdctl & IXGBE_TXDCTL_ENABLE)); - if (!wait_loop) - e_err(drv, "Could not enable Tx Queue %d\n", j); - } - } +static int ixgbe_up_complete(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err; + u32 ctrl_ext; - for (i = 0; i < num_rx_rings; i++) { - j = adapter->rx_ring[i]->reg_idx; - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); - /* enable PTHRESH=32 descriptors (half the internal cache) - * and HTHRESH=0 descriptors (to minimize latency on fetch), - * this also removes a pesky rx_no_buffer_count increment */ - rxdctl |= 0x0020; - rxdctl |= IXGBE_RXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl); - if (hw->mac.type == ixgbe_mac_82599EB) - ixgbe_rx_desc_queue_enable(adapter, i); - } - /* enable all receives */ - rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); - if (hw->mac.type == ixgbe_mac_82598EB) - rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN); - else - rxdctl |= IXGBE_RXCTRL_RXEN; - hw->mac.ops.enable_rx_dma(hw, rxdctl); + ixgbe_get_hw_control(adapter); + ixgbe_setup_gpie(adapter); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ixgbe_configure_msix(adapter); @@ -3483,8 +3561,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_EICR); - - ixgbe_irq_enable(adapter); + ixgbe_irq_enable(adapter, true, true); /* * If this adapter has a fan, check to see if we had a failure @@ -3525,12 +3602,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) e_err(probe, "link_config FAILED %d\n", err); } - for (i = 0; i < adapter->num_tx_queues; i++) - set_bit(__IXGBE_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->reinit_state)); - /* enable transmits */ - netif_tx_start_all_queues(netdev); + netif_tx_start_all_queues(adapter->netdev); /* bring the link up in the watchdog, this could race with our first * link up interrupt but shouldn't be a problem */ @@ -3609,21 +3682,24 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) * @rx_ring: ring to free buffers from **/ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; - /* Free all the Rx ring sk_buffs */ + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buffer_info) + return; + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct ixgbe_rx_buffer *rx_buffer_info; rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { dma_unmap_single(&pdev->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; } @@ -3635,7 +3711,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, if (IXGBE_RSC_CB(this)->delay_unmap) { dma_unmap_single(&pdev->dev, IXGBE_RSC_CB(this)->dma, - rx_ring->rx_buf_len, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); IXGBE_RSC_CB(this)->dma = 0; IXGBE_RSC_CB(skb)->delay_unmap = false; @@ -3677,14 +3753,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, * @tx_ring: ring to be cleaned **/ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *tx_buffer_info; unsigned long size; unsigned int i; - /* Free all the Tx ring sk_buffs */ + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); @@ -3736,6 +3815,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) u32 rxctrl; u32 txdctl; int i, j; + int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* signal that we are down to the interrupt handler */ set_bit(__IXGBE_DOWN, &adapter->state); @@ -3774,6 +3854,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ixgbe_napi_disable_all(adapter); + /* Cleanup the affinity_hint CPU mask memory and callback */ + for (i = 0; i < num_q_vectors; i++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL); + /* release the CPU mask memory */ + free_cpumask_var(q_vector->affinity_mask); + } + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) cancel_work_sync(&adapter->fdir_reinit_task); @@ -3786,13 +3875,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter) j = adapter->tx_ring[i]->reg_idx; txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), - (txdctl & ~IXGBE_TXDCTL_ENABLE)); + (txdctl & ~IXGBE_TXDCTL_ENABLE)); } /* Disable the Tx DMA engine on 82599 */ if (hw->mac.type == ixgbe_mac_82599EB) IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, - (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & - ~IXGBE_DMATXCTL_TE)); + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); /* power down the optics */ if (hw->phy.multispeed_fiber) @@ -3822,7 +3911,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) static int ixgbe_poll(struct napi_struct *napi, int budget) { struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); + container_of(napi, struct ixgbe_q_vector, napi); struct ixgbe_adapter *adapter = q_vector->adapter; int tx_clean_complete, work_done = 0; @@ -3932,7 +4021,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) * Rx load across CPUs using RSS. * **/ -static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) { bool ret = false; struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; @@ -4024,7 +4113,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) * fallthrough conditions. * **/ -static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { /* Start with base case */ adapter->num_rx_queues = 1; @@ -4033,7 +4122,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = 1; if (ixgbe_set_sriov_queues(adapter)) - return; + goto done; #ifdef IXGBE_FCOE if (ixgbe_set_fcoe_queues(adapter)) @@ -4056,12 +4145,14 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_tx_queues = 1; done: - /* Notify the stack of the (possibly) reduced Tx Queue count. */ + /* Notify the stack of the (possibly) reduced queue counts. */ netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); + return netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); } static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) + int vectors) { int err, vector_threshold; @@ -4080,7 +4171,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, */ while (vectors >= vector_threshold) { err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); + vectors); if (!err) /* Success in acquiring all requested vectors. */ break; else if (err < 0) @@ -4107,7 +4198,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, * vectors we were allocated. */ adapter->num_msix_vectors = min(vectors, - adapter->max_msix_q_vectors + NON_Q_VECTORS); + adapter->max_msix_q_vectors + NON_Q_VECTORS); } } @@ -4178,12 +4269,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) } for ( ; i < 5; i++) { adapter->tx_ring[i]->reg_idx = - ((i + 2) << 4); + ((i + 2) << 4); adapter->rx_ring[i]->reg_idx = i << 4; } for ( ; i < dcb_i; i++) { adapter->tx_ring[i]->reg_idx = - ((i + 8) << 3); + ((i + 8) << 3); adapter->rx_ring[i]->reg_idx = i << 4; } @@ -4226,7 +4317,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) * Cache the descriptor ring offsets for Flow Director to the assigned rings. * **/ -static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) { int i; bool ret = false; @@ -4383,7 +4474,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) adapter->node = cur_node; } ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, - adapter->node); + adapter->node); if (!ring) ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); if (!ring) @@ -4407,7 +4498,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) adapter->node = cur_node; } ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, - adapter->node); + adapter->node); if (!ring) ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); if (!ring) @@ -4453,7 +4544,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) * (roughly) the same number of vectors as there are CPU's. */ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)num_online_cpus()) + NON_Q_VECTORS; + (int)num_online_cpus()) + NON_Q_VECTORS; /* * At the same time, hardware can only support a maximum of @@ -4467,7 +4558,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); + sizeof(struct msix_entry), GFP_KERNEL); if (adapter->msix_entries) { for (vector = 0; vector < v_budget; vector++) adapter->msix_entries[vector].entry = vector; @@ -4486,7 +4577,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); - ixgbe_set_num_queues(adapter); + err = ixgbe_set_num_queues(adapter); + if (err) + return err; err = pci_enable_msi(adapter->pdev); if (!err) { @@ -4529,10 +4622,10 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), - GFP_KERNEL, adapter->node); + GFP_KERNEL, adapter->node); if (!q_vector) q_vector = kzalloc(sizeof(struct ixgbe_q_vector), - GFP_KERNEL); + GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; @@ -4611,7 +4704,9 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) int err; /* Number of supported queues */ - ixgbe_set_num_queues(adapter); + err = ixgbe_set_num_queues(adapter); + if (err) + return err; err = ixgbe_set_interrupt_capability(adapter); if (err) { @@ -4693,8 +4788,8 @@ static void ixgbe_sfp_timer(unsigned long data) static void ixgbe_sfp_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - sfp_task); + struct ixgbe_adapter, + sfp_task); struct ixgbe_hw *hw = &adapter->hw; if ((hw->phy.type == ixgbe_phy_nl) && @@ -4719,7 +4814,7 @@ static void ixgbe_sfp_task(struct work_struct *work) reschedule: if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state)) mod_timer(&adapter->sfp_timer, - round_jiffies(jiffies + (2 * HZ))); + round_jiffies(jiffies + (2 * HZ))); } /** @@ -4775,7 +4870,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->atr_sample_rate = 20; } adapter->ring_feature[RING_F_FDIR].indices = - IXGBE_MAX_FDIR_INDICES; + IXGBE_MAX_FDIR_INDICES; adapter->fdir_pballoc = 0; #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; @@ -4806,7 +4901,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, - adapter->ring_feature[RING_F_DCB].indices); + adapter->ring_feature[RING_F_DCB].indices); #endif @@ -4861,7 +4956,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) * Return 0 on success, negative on failure **/ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -4928,7 +5023,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) * Returns 0 on success, negative on failure **/ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; int size; @@ -5001,7 +5096,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) * Free all transmit software resources **/ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring) { struct pci_dev *pdev = adapter->pdev; @@ -5039,7 +5134,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) * Free all receive software resources **/ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring) { struct pci_dev *pdev = adapter->pdev; @@ -5333,6 +5428,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u64 total_mpc = 0; u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u64 non_eop_descs = 0, restart_queue = 0; + struct ixgbe_hw_stats *hwstats = &adapter->stats; if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) @@ -5343,7 +5439,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u64 rsc_flush = 0; for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += - IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); for (i = 0; i < adapter->num_rx_queues; i++) { rsc_count += adapter->rx_ring[i]->rsc_count; rsc_flush += adapter->rx_ring[i]->rsc_flush; @@ -5361,119 +5457,118 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) non_eop_descs += adapter->rx_ring[i]->non_eop_descs; adapter->non_eop_descs = non_eop_descs; - adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); for (i = 0; i < 8; i++) { /* for packet buffers not used, the register should read 0 */ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i)); missed_rx += mpc; - adapter->stats.mpc[i] += mpc; - total_mpc += adapter->stats.mpc[i]; + hwstats->mpc[i] += mpc; + total_mpc += hwstats->mpc[i]; if (hw->mac.type == ixgbe_mac_82598EB) - adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); - adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); - adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); if (hw->mac.type == ixgbe_mac_82599EB) { - adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXONRXCNT(i)); - adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXOFFRXCNT(i)); - adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + hwstats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); } else { - adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXONRXC(i)); - adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXOFFRXC(i)); + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + hwstats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); } - adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXONTXC(i)); - adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw, - IXGBE_PXOFFTXC(i)); + hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); } - adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); /* work around hardware counting issue */ - adapter->stats.gprc -= missed_rx; + hwstats->gprc -= missed_rx; /* 82598 hardware only has a 32 bit counter in the high register */ if (hw->mac.type == ixgbe_mac_82599EB) { u64 tmp; - adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); - tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */ - adapter->stats.gorc += (tmp << 32); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); - tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */ - adapter->stats.gotc += (tmp << 32); - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ - adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); - adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); - adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); + tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; + /* 4 high bits of GORC */ + hwstats->gorc += (tmp << 32); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); + tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; + /* 4 high bits of GOTC */ + hwstats->gotc += (tmp << 32); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); #ifdef IXGBE_FCOE - adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); - adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); - adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); - adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); - adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); - adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); #endif /* IXGBE_FCOE */ } else { - adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); } bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); - adapter->stats.bprc += bprc; - adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + hwstats->bprc += bprc; + hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); if (hw->mac.type == ixgbe_mac_82598EB) - adapter->stats.mprc -= bprc; - adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); - adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); - adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); - adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); - adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); - adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); - adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); - adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + hwstats->mprc -= bprc; + hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); - adapter->stats.lxontxc += lxon; + hwstats->lxontxc += lxon; lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - adapter->stats.lxofftxc += lxoff; - adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); - adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + hwstats->lxofftxc += lxoff; + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); /* * 82598 errata - tx of flow control packets is included in tx counters */ xon_off_tot = lxon + lxoff; - adapter->stats.gptc -= xon_off_tot; - adapter->stats.mptc -= xon_off_tot; - adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); - adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); - adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); - adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); - adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); - adapter->stats.ptc64 -= xon_off_tot; - adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); - adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); - adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); - adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); - adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); - adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + hwstats->gptc -= xon_off_tot; + hwstats->mptc -= xon_off_tot; + hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN)); + hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hwstats->ptc64 -= xon_off_tot; + hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); /* Fill out the OS statistics structure */ - netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.multicast = hwstats->mprc; /* Rx Errors */ - netdev->stats.rx_errors = adapter->stats.crcerrs + - adapter->stats.rlec; + netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; netdev->stats.rx_dropped = 0; - netdev->stats.rx_length_errors = adapter->stats.rlec; - netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_length_errors = hwstats->rlec; + netdev->stats.rx_crc_errors = hwstats->crcerrs; netdev->stats.rx_missed_errors = total_mpc; } @@ -5532,8 +5627,8 @@ watchdog_short_circuit: static void ixgbe_multispeed_fiber_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - multispeed_fiber_task); + struct ixgbe_adapter, + multispeed_fiber_task); struct ixgbe_hw *hw = &adapter->hw; u32 autoneg; bool negotiation; @@ -5556,8 +5651,8 @@ static void ixgbe_multispeed_fiber_task(struct work_struct *work) static void ixgbe_sfp_config_module_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - sfp_config_module_task); + struct ixgbe_adapter, + sfp_config_module_task); struct ixgbe_hw *hw = &adapter->hw; u32 err; @@ -5590,15 +5685,15 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) static void ixgbe_fdir_reinit_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - fdir_reinit_task); + struct ixgbe_adapter, + fdir_reinit_task); struct ixgbe_hw *hw = &adapter->hw; int i; if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->reinit_state)); + &(adapter->tx_ring[i]->reinit_state)); } else { e_err(probe, "failed to finish FDIR re-initialization, " "ignored adding FDIR ATR filters\n"); @@ -5616,8 +5711,8 @@ static DEFINE_MUTEX(ixgbe_watchdog_lock); static void ixgbe_watchdog_task(struct work_struct *work) { struct ixgbe_adapter *adapter = container_of(work, - struct ixgbe_adapter, - watchdog_task); + struct ixgbe_adapter, + watchdog_task); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 link_speed; @@ -5648,7 +5743,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) if (link_up || time_after(jiffies, (adapter->link_check_timeout + - IXGBE_TRY_LINK_TIMEOUT))) { + IXGBE_TRY_LINK_TIMEOUT))) { adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); } @@ -5719,8 +5814,8 @@ static void ixgbe_watchdog_task(struct work_struct *work) } static int ixgbe_tso(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len) + struct ixgbe_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, u8 *hdr_len) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -5743,28 +5838,28 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, iph->tot_len = 0; iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + iph->daddr, 0, + IPPROTO_TCP, + 0); } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); } i = tx_ring->next_to_use; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); /* VLAN MACLEN IPLEN */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); vlan_macip_lens |= ((skb_network_offset(skb)) << - IXGBE_ADVTXD_MACLEN_SHIFT); + IXGBE_ADVTXD_MACLEN_SHIFT); *hdr_len += skb_network_offset(skb); vlan_macip_lens |= (skb_transport_header(skb) - skb_network_header(skb)); @@ -5775,7 +5870,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); + IXGBE_ADVTXD_DTYP_CTXT); if (skb->protocol == htons(ETH_P_IP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; @@ -5803,9 +5898,53 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, return false; } +static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) +{ + u32 rtn = 0; + __be16 protocol; + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) + protocol = ((const struct vlan_ethhdr *)skb->data)-> + h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + rtn |= IXGBE_ADVTXD_TUCMD_IPV4; + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + case IPPROTO_SCTP: + rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; + } + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX what about other V6 headers?? */ + switch (ipv6_hdr(skb)->nexthdr) { + case IPPROTO_TCP: + rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + case IPPROTO_SCTP: + rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; + } + break; + default: + if (unlikely(net_ratelimit())) + e_warn(probe, "partial checksum but proto=%x!\n", + skb->protocol); + break; + } + + return rtn; +} + static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -5816,63 +5955,25 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, (tx_flags & IXGBE_TX_FLAGS_VLAN)) { i = tx_ring->next_to_use; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); if (tx_flags & IXGBE_TX_FLAGS_VLAN) vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); vlan_macip_lens |= (skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT); + IXGBE_ADVTXD_MACLEN_SHIFT); if (skb->ip_summed == CHECKSUM_PARTIAL) vlan_macip_lens |= (skb_transport_header(skb) - - skb_network_header(skb)); + skb_network_header(skb)); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->seqnum_seed = 0; type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - __be16 protocol; - - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { - const struct vlan_ethhdr *vhdr = - (const struct vlan_ethhdr *)skb->data; - - protocol = vhdr->h_vlan_encapsulated_proto; - } else { - protocol = skb->protocol; - } + IXGBE_ADVTXD_DTYP_CTXT); - switch (protocol) { - case cpu_to_be16(ETH_P_IP): - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; - case cpu_to_be16(ETH_P_IPV6): - /* XXX what about other V6 headers?? */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_SCTP; - break; - default: - if (unlikely(net_ratelimit())) { - e_warn(probe, "partial checksum " - "but proto=%x!\n", - skb->protocol); - } - break; - } - } + if (skb->ip_summed == CHECKSUM_PARTIAL) + type_tucmd_mlhl |= ixgbe_psum(adapter, skb); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); /* use index zero for tx checksum offload */ @@ -5893,9 +5994,9 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, } static int ixgbe_tx_map(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - unsigned int first) + struct ixgbe_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, + unsigned int first) { struct pci_dev *pdev = adapter->pdev; struct ixgbe_tx_buffer *tx_buffer_info; @@ -5990,7 +6091,7 @@ dma_error: /* clear timestamp and dma mappings for remaining portion of packet */ while (count--) { - if (i==0) + if (i == 0) i += tx_ring->count; i--; tx_buffer_info = &tx_ring->tx_buffer_info[i]; @@ -6001,8 +6102,8 @@ dma_error: } static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - int tx_flags, int count, u32 paylen, u8 hdr_len) + struct ixgbe_ring *tx_ring, + int tx_flags, int count, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_buffer_info; @@ -6021,17 +6122,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; /* use index 1 context for tso */ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= IXGBE_TXD_POPTS_IXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + IXGBE_ADVTXD_POPTS_SHIFT; if (tx_flags & IXGBE_TX_FLAGS_FCOE) { olinfo_status |= IXGBE_ADVTXD_CC; @@ -6045,10 +6146,10 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, i = tx_ring->next_to_use; while (count--) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); + cpu_to_le32(cmd_type_len | tx_buffer_info->length); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count) @@ -6070,7 +6171,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, } static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, - int queue, u32 tx_flags) + int queue, u32 tx_flags) { struct ixgbe_atr_input atr_input; struct tcphdr *th; @@ -6098,7 +6199,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> - IXGBE_TX_FLAGS_VLAN_SHIFT; + IXGBE_TX_FLAGS_VLAN_SHIFT; src_ipv4_addr = iph->saddr; dst_ipv4_addr = iph->daddr; flex_bytes = eth->h_proto; @@ -6117,7 +6218,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, } static int __ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) + struct ixgbe_ring *tx_ring, int size) { netif_stop_subqueue(netdev, tx_ring->queue_index); /* Herbert's original patch had: @@ -6137,7 +6238,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, } static int ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) + struct ixgbe_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; @@ -6183,11 +6284,10 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) return skb_tx_hash(dev, skb); } -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, - struct net_device *netdev) +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, + struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *tx_ring; struct netdev_queue *txq; unsigned int first; unsigned int tx_flags = 0; @@ -6196,7 +6296,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, int count = 0; unsigned int f; - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; @@ -6211,8 +6311,6 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, tx_flags |= IXGBE_TX_FLAGS_VLAN; } - tx_ring = adapter->tx_ring[skb->queue_mapping]; - #ifdef IXGBE_FCOE /* for FCoE with DCB, we force the priority to what * was specified by the switch */ @@ -6283,10 +6381,10 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, if (tx_ring->atr_sample_rate) { ++tx_ring->atr_count; if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && - test_bit(__IXGBE_FDIR_INIT_DONE, - &tx_ring->reinit_state)) { + test_bit(__IXGBE_FDIR_INIT_DONE, + &tx_ring->reinit_state)) { ixgbe_atr(adapter, skb, tx_ring->queue_index, - tx_flags); + tx_flags); tx_ring->atr_count = 0; } } @@ -6294,7 +6392,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, txq->tx_bytes += skb->len; txq->tx_packets++; ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, - hdr_len); + hdr_len); ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); } else { @@ -6306,6 +6404,15 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; + + tx_ring = adapter->tx_ring[skb->queue_mapping]; + return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); +} + /** * ixgbe_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure @@ -6436,8 +6543,40 @@ static void ixgbe_netpoll(struct net_device *netdev) } #endif +static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int i; + + /* accurate rx/tx bytes/packets stats */ + dev_txq_stats_fold(netdev, stats); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = adapter->rx_ring[i]; + u64 bytes, packets; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + /* following stats updated by ixgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + return stats; +} + + static const struct net_device_ops ixgbe_netdev_ops = { - .ndo_open = ixgbe_open, + .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, .ndo_select_queue = ixgbe_select_queue, @@ -6447,7 +6586,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, - .ndo_vlan_rx_register = ixgbe_vlan_rx_register, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, @@ -6455,6 +6593,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, + .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -6532,7 +6671,7 @@ err_novfs: * and a hardware reset occur. **/ static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; @@ -6577,7 +6716,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + IORESOURCE_MEM), ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@ -6617,7 +6756,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; hw->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + pci_resource_len(pdev, 0)); if (!hw->hw_addr) { err = -EIO; goto err_ioremap; @@ -6661,7 +6800,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, * which might start the timer */ init_timer(&adapter->sfp_timer); - adapter->sfp_timer.function = &ixgbe_sfp_timer; + adapter->sfp_timer.function = ixgbe_sfp_timer; adapter->sfp_timer.data = (unsigned long) adapter; INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task); @@ -6671,7 +6810,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* a new SFP+ module arrival, called from GPI SDP2 context */ INIT_WORK(&adapter->sfp_config_module_task, - ixgbe_sfp_config_module_task); + ixgbe_sfp_config_module_task); ii->get_invariants(hw); @@ -6723,10 +6862,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ixgbe_probe_vf(adapter, ii); netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_HW_VLAN_TX | - NETIF_F_HW_VLAN_RX | - NETIF_F_HW_VLAN_FILTER; + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_IPV6_CSUM; netdev->features |= NETIF_F_TSO; @@ -6766,8 +6905,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->vlan_features |= NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ - if (pci_using_dac) + if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; @@ -6793,7 +6934,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->mac.ops.disable_tx_laser(hw); init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = &ixgbe_watchdog; + adapter->watchdog_timer.function = ixgbe_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; INIT_WORK(&adapter->reset_task, ixgbe_reset_task); @@ -6806,7 +6947,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, switch (pdev->device) { case IXGBE_DEV_ID_82599_KX4: adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | - IXGBE_WUFC_MC | IXGBE_WUFC_BC); + IXGBE_WUFC_MC | IXGBE_WUFC_BC); break; default: adapter->wol = 0; @@ -6819,13 +6960,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* print bus type/speed/width info */ e_dev_info("(PCI Express:%s:%s) %pM\n", - ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": - (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), - ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : - (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : - (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : - "Unknown"), - netdev->dev_addr); + (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" : + hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" : + "Unknown"), + (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" : + hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" : + hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : + "Unknown"), + netdev->dev_addr); ixgbe_read_pba_num_generic(hw, &part_num); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " @@ -6872,7 +7014,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task); if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) - INIT_WORK(&adapter->check_overtemp_task, ixgbe_check_overtemp_task); + INIT_WORK(&adapter->check_overtemp_task, + ixgbe_check_overtemp_task); #ifdef CONFIG_IXGBE_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IXGBE_FLAG_DCA_ENABLED; @@ -6908,8 +7051,8 @@ err_eeprom: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -6976,7 +7119,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) iounmap(adapter->hw.hw_addr); pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + IORESOURCE_MEM)); e_dev_info("complete\n"); @@ -6996,7 +7139,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) * this device has been detected. */ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -7102,8 +7245,7 @@ static struct pci_driver ixgbe_driver = { static int __init ixgbe_init_module(void) { int ret; - pr_info("%s - version %s\n", ixgbe_driver_string, - ixgbe_driver_version); + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); #ifdef CONFIG_IXGBE_DCA @@ -7132,12 +7274,12 @@ static void __exit ixgbe_exit_module(void) #ifdef CONFIG_IXGBE_DCA static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, - void *p) + void *p) { int ret_val; ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, - __ixgbe_notify_dca); + __ixgbe_notify_dca); return ret_val ? NOTIFY_BAD : NOTIFY_DONE; } |