diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
85 files changed, 5633 insertions, 4506 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9436397e572..e498effb85d 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -412,6 +412,10 @@ enum cb_status { cb_ok = 0x2000, }; +/** + * cb_command - Command Block flags + * @cb_tx_nc: 0: controler does CRC (normal), 1: CRC from skb memory + */ enum cb_command { cb_nop = 0x0000, cb_iaaddr = 0x0001, @@ -421,6 +425,7 @@ enum cb_command { cb_ucode = 0x0005, cb_dump = 0x0006, cb_tx_sf = 0x0008, + cb_tx_nc = 0x0010, cb_cid = 0x1f00, cb_i = 0x2000, cb_s = 0x4000, @@ -457,7 +462,7 @@ struct config { /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), - rx_discard_overruns:1), rx_save_bad_frames:1); + rx_save_overruns : 1), rx_save_bad_frames : 1); /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), tx_dynamic_tbd:1); @@ -617,6 +622,7 @@ struct nic { u32 rx_fc_pause; u32 rx_fc_unsupported; u32 rx_tco_frames; + u32 rx_short_frame_errors; u32 rx_over_length_errors; u16 eeprom_wc; @@ -1075,7 +1081,7 @@ static void e100_get_defaults(struct nic *nic) /* Template for a freshly allocated RFD */ nic->blank_rfd.command = 0; nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); - nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN); + nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); /* MII setup */ nic->mii.phy_id_mask = 0x1F; @@ -1089,6 +1095,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct config *config = &cb->u.config; u8 *c = (u8 *)config; + struct net_device *netdev = nic->netdev; cb->command = cpu_to_le16(cb_config); @@ -1132,6 +1139,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) config->promiscuous_mode = 0x1; /* 1=on, 0=off */ } + if (unlikely(netdev->features & NETIF_F_RXFCS)) + config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ + if (nic->flags & multicast_all) config->multicast_all = 0x1; /* 1=accept, 0=no */ @@ -1156,6 +1166,12 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) } } + if (netdev->features & NETIF_F_RXALL) { + config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ + config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ + config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ + } + netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); @@ -1607,7 +1623,9 @@ static void e100_update_stats(struct nic *nic) ns->collisions += nic->tx_collisions; ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + le32_to_cpu(s->tx_lost_crs); - ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) + + nic->rx_short_frame_errors += + le32_to_cpu(s->rx_short_frame_errors); + ns->rx_length_errors = nic->rx_short_frame_errors + nic->rx_over_length_errors; ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); @@ -1720,6 +1738,16 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = nic->tx_command; + + /* + * Use the last 4 bytes of the SKB payload packet as the CRC, used for + * testing, ie sending frames with bad CRC. + */ + if (unlikely(skb->no_fcs)) + cb->command |= __constant_cpu_to_le16(cb_tx_nc); + else + cb->command &= ~__constant_cpu_to_le16(cb_tx_nc); + /* interrupt every 16 packets regardless of delay */ if ((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cpu_to_le16(cb_i); @@ -1881,7 +1909,7 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx) } } -#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) +#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) { if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN))) @@ -1919,6 +1947,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, struct sk_buff *skb = rx->skb; struct rfd *rfd = (struct rfd *)skb->data; u16 rfd_status, actual_size; + u16 fcs_pad = 0; if (unlikely(work_done && *work_done >= work_to_do)) return -EAGAIN; @@ -1951,6 +1980,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, } /* Get actual data size */ + if (unlikely(dev->features & NETIF_F_RXFCS)) + fcs_pad = 4; actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) actual_size = RFD_BUF_LEN - sizeof(struct rfd); @@ -1977,16 +2008,27 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, skb_put(skb, actual_size); skb->protocol = eth_type_trans(skb, nic->netdev); + /* If we are receiving all frames, then don't bother + * checking for errors. + */ + if (unlikely(dev->features & NETIF_F_RXALL)) { + if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) + /* Received oversized frame, but keep it. */ + nic->rx_over_length_errors++; + goto process_skb; + } + if (unlikely(!(rfd_status & cb_ok))) { /* Don't indicate if hardware indicates errors */ dev_kfree_skb_any(skb); - } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) { + } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { /* Don't indicate oversized frames */ nic->rx_over_length_errors++; dev_kfree_skb_any(skb); } else { +process_skb: dev->stats.rx_packets++; - dev->stats.rx_bytes += actual_size; + dev->stats.rx_bytes += (actual_size - fcs_pad); netif_receive_skb(skb); if (work_done) (*work_done)++; @@ -2058,7 +2100,8 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, pci_dma_sync_single_for_device(nic->pdev, old_before_last_rx->dma_addr, sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); - old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN); + old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN + + ETH_FCS_LEN); pci_dma_sync_single_for_device(nic->pdev, old_before_last_rx->dma_addr, sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); @@ -2618,6 +2661,7 @@ static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_deferred", "tx_single_collisions", "tx_multi_collisions", "tx_flow_control_pause", "rx_flow_control_pause", "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets", + "rx_short_frame_errors", "rx_over_length_errors", }; #define E100_NET_STATS_LEN 21 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) @@ -2651,6 +2695,8 @@ static void e100_get_ethtool_stats(struct net_device *netdev, data[i++] = nic->rx_fc_unsupported; data[i++] = nic->tx_tco_frames; data[i++] = nic->rx_tco_frames; + data[i++] = nic->rx_short_frame_errors; + data[i++] = nic->rx_over_length_errors; } static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) @@ -2729,6 +2775,20 @@ static int e100_close(struct net_device *netdev) return 0; } +static int e100_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct nic *nic = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + e100_exec_cb(nic, NULL, e100_configure); + return 0; +} + static const struct net_device_ops e100_netdev_ops = { .ndo_open = e100_open, .ndo_stop = e100_close, @@ -2742,6 +2802,7 @@ static const struct net_device_ops e100_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e100_netpoll, #endif + .ndo_set_features = e100_set_features, }; static int __devinit e100_probe(struct pci_dev *pdev, @@ -2751,11 +2812,12 @@ static int __devinit e100_probe(struct pci_dev *pdev, struct nic *nic; int err; - if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { - if (((1 << debug) - 1) & NETIF_MSG_PROBE) - pr_err("Etherdev alloc failed, aborting\n"); + if (!(netdev = alloc_etherdev(sizeof(struct nic)))) return -ENOMEM; - } + + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; netdev->netdev_ops = &e100_netdev_ops; SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 1e1596990b5..2b6cd02bfba 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -254,6 +254,7 @@ struct e1000_adapter { atomic_t tx_fifo_stall; bool pcix_82544; bool detect_tx_hung; + bool dump_buffers; /* RX */ bool (*clean_rx)(struct e1000_adapter *adapter, diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 36ee76bf4cb..c526279e492 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -5253,6 +5253,78 @@ static s32 e1000_check_downshift(struct e1000_hw *hw) return E1000_SUCCESS; } +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + /** * e1000_config_dsp_after_link_change * @hw: Struct containing variables accessed by shared code @@ -5269,13 +5341,6 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) { s32 ret_val; u16 phy_data, phy_saved_data, speed, duplex, i; - static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { - IGP01E1000_PHY_AGC_PARAM_A, - IGP01E1000_PHY_AGC_PARAM_B, - IGP01E1000_PHY_AGC_PARAM_C, - IGP01E1000_PHY_AGC_PARAM_D - }; - u16 min_length, max_length; e_dbg("e1000_config_dsp_after_link_change"); @@ -5290,84 +5355,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) } if (speed == SPEED_1000) { - - ret_val = - e1000_get_cable_length(hw, &min_length, - &max_length); + ret_val = e1000_1000Mb_check_cable_length(hw); if (ret_val) return ret_val; - - if ((hw->dsp_config_state == e1000_dsp_config_enabled) - && min_length >= e1000_igp_cable_length_50) { - - for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { - ret_val = - e1000_read_phy_reg(hw, - dsp_reg_array[i], - &phy_data); - if (ret_val) - return ret_val; - - phy_data &= - ~IGP01E1000_PHY_EDAC_MU_INDEX; - - ret_val = - e1000_write_phy_reg(hw, - dsp_reg_array - [i], phy_data); - if (ret_val) - return ret_val; - } - hw->dsp_config_state = - e1000_dsp_config_activated; - } - - if ((hw->ffe_config_state == e1000_ffe_config_enabled) - && (min_length < e1000_igp_cable_length_50)) { - - u16 ffe_idle_err_timeout = - FFE_IDLE_ERR_COUNT_TIMEOUT_20; - u32 idle_errs = 0; - - /* clear previous idle error counts */ - ret_val = - e1000_read_phy_reg(hw, PHY_1000T_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - for (i = 0; i < ffe_idle_err_timeout; i++) { - udelay(1000); - ret_val = - e1000_read_phy_reg(hw, - PHY_1000T_STATUS, - &phy_data); - if (ret_val) - return ret_val; - - idle_errs += - (phy_data & - SR_1000T_IDLE_ERROR_CNT); - if (idle_errs > - SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) - { - hw->ffe_config_state = - e1000_ffe_config_active; - - ret_val = - e1000_write_phy_reg(hw, - IGP01E1000_PHY_DSP_FFE, - IGP01E1000_PHY_DSP_FFE_CM_CP); - if (ret_val) - return ret_val; - break; - } - - if (idle_errs) - ffe_idle_err_timeout = - FFE_IDLE_ERR_COUNT_TIMEOUT_100; - } - } } } else { if (hw->dsp_config_state == e1000_dsp_config_activated) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index f6c4d7e2560..11578c8978d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -895,6 +895,11 @@ struct e1000_ffvt_entry { #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ @@ -1074,6 +1079,11 @@ struct e1000_ffvt_entry { #define E1000_82542_IMC E1000_IMC #define E1000_82542_RCTL E1000_RCTL #define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC #define E1000_82542_RDBAL 0x00110 #define E1000_82542_RDBAH 0x00114 #define E1000_82542_RDLEN 0x00118 diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 669ca3800c0..0e9aec8f691 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -730,10 +730,8 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter) eeprom.offset = 0; data = kmalloc(eeprom.len, GFP_KERNEL); - if (!data) { - pr_err("Unable to allocate memory to dump EEPROM data\n"); + if (!data) return; - } ops->get_eeprom(netdev, &eeprom, data); @@ -1069,8 +1067,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, (hw->mac_type != e1000_82547)) netdev->hw_features |= NETIF_F_TSO; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->features |= netdev->hw_features; netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_RXFCS; if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; @@ -2694,6 +2695,7 @@ set_itr_now: #define E1000_TX_FLAGS_VLAN 0x00000002 #define E1000_TX_FLAGS_TSO 0x00000004 #define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 @@ -2995,6 +2997,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); } + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + i = tx_ring->next_to_use; while (count--) { @@ -3009,6 +3014,10 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -3224,6 +3233,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (likely(skb->protocol == htons(ETH_P_IP))) tx_flags |= E1000_TX_FLAGS_IPV4; + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, nr_frags, mss); @@ -3241,6 +3253,215 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* + * transmit dump + */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { u64 a; u64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* + * receive dump + */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { u64 a; u64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->skb, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + /** * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -3262,6 +3483,7 @@ static void e1000_reset_task(struct work_struct *work) if (test_bit(__E1000_DOWN, &adapter->flags)) return; + e_err(drv, "Reset adapter\n"); e1000_reinit_safe(adapter); } @@ -3679,6 +3901,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, eop, jiffies, eop_desc->upper.fields.status); + e1000_dump(adapter); netif_stop_queue(netdev); } } @@ -3878,11 +4101,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (length <= copybreak && skb_tailroom(skb) >= length) { u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page, - KM_SKB_DATA_SOFTIRQ); + vaddr = kmap_atomic(buffer_info->page); memcpy(skb_tail_pointer(skb), vaddr, length); - kunmap_atomic(vaddr, - KM_SKB_DATA_SOFTIRQ); + kunmap_atomic(vaddr); /* re-use the page, so don't erase * buffer_info->page */ skb_put(skb, length); @@ -3902,10 +4123,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, ((u32)(rx_desc->errors) << 24), le16_to_cpu(rx_desc->csum), skb); - pskb_trim(skb, skb->len - 4); - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); total_rx_packets++; /* eth type trans needs skb->data to point to something */ @@ -4059,14 +4279,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, } } - /* adjust length to remove Ethernet CRC, this must be - * done after the TBI_ACCEPT workaround above */ - length -= 4; - - /* probably a little skewed due to removing CRC */ - total_rx_bytes += length; + total_rx_bytes += (length - 4); /* don't count FCS */ total_rx_packets++; + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + e1000_check_copybreak(netdev, buffer_info, length, &skb); skb_put(skb, length); @@ -4740,12 +4961,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) e1000_setup_rctl(adapter); e1000_set_rx_mode(netdev); + rctl = er32(RCTL); + /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & E1000_WUFC_MC) { - rctl = er32(RCTL); + if (wufc & E1000_WUFC_MC) rctl |= E1000_RCTL_MPE; - ew32(RCTL, rctl); - } + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); if (hw->mac_type >= e1000_82540) { ctrl = er32(CTRL); diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index e1159e54334..bac9dda31b6 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -201,19 +201,23 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. * @hw: pointer to the HW structure **/ -static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) { - struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; - struct e1000_mac_operations *func = &mac->ops; - /* Set media type */ - switch (adapter->pdev->device) { + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000e_check_for_serdes_link; + mac->ops.setup_physical_interface = + e1000e_setup_fiber_serdes_link; break; default: hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; break; } @@ -230,25 +234,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) /* Adaptive IFS not supported */ mac->adaptive_ifs = false; - /* check for link */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; - func->check_for_link = e1000e_check_for_copper_link; - break; - case e1000_media_type_fiber: - func->setup_physical_interface = e1000e_setup_fiber_serdes_link; - func->check_for_link = e1000e_check_for_fiber_link; - break; - case e1000_media_type_internal_serdes: - func->setup_physical_interface = e1000e_setup_fiber_serdes_link; - func->check_for_link = e1000e_check_for_serdes_link; - break; - default: - return -E1000_ERR_CONFIG; - break; - } - /* set lan id for port to determine which phy lock to use */ hw->mac.ops.set_lan_id(hw); @@ -260,7 +245,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; s32 rc; - rc = e1000_init_mac_params_80003es2lan(adapter); + rc = e1000_init_mac_params_80003es2lan(hw); if (rc) return rc; @@ -304,7 +289,7 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) } /** - * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register * @hw: pointer to the HW structure * * Acquire the semaphore to access the Kumeran interface. @@ -320,7 +305,7 @@ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) } /** - * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register * @hw: pointer to the HW structure * * Release the semaphore used to access the Kumeran interface @@ -473,7 +458,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return ret_val; } - if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { /* * The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI @@ -485,9 +470,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { - ret_val = -E1000_ERR_PHY; e1000_release_phy_80003es2lan(hw); - return ret_val; + return -E1000_ERR_PHY; } udelay(200); @@ -545,7 +529,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, return ret_val; } - if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) { + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { /* * The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI @@ -667,8 +651,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) udelay(1); if (hw->phy.autoneg_wait_to_complete) { - e_dbg("Waiting for forced speed/duplex link " - "on GG82563 phy.\n"); + e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); @@ -731,22 +714,19 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); if (ret_val) - goto out; + return ret_val; index = phy_data & GG82563_DSPD_CABLE_LENGTH; - if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) { - ret_val = -E1000_ERR_PHY; - goto out; - } + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_gg82563_cable_length_table[index]; phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return 0; } /** @@ -820,9 +800,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ew32(IMC, 0xffffffff); er32(ICR); - ret_val = e1000_check_alt_mac_addr_generic(hw); - - return ret_val; + return e1000_check_alt_mac_addr_generic(hw); } /** @@ -842,7 +820,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) e1000_initialize_hw_bits_80003es2lan(hw); /* Initialize identification LED */ - ret_val = e1000e_id_led_init(hw); + ret_val = mac->ops.id_led_init(hw); if (ret_val) e_dbg("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ @@ -860,7 +838,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ - ret_val = e1000e_setup_link(hw); + ret_val = mac->ops.setup_link(hw); /* Disable IBIST slave mode (far-end loopback) */ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, @@ -1078,7 +1056,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) * firmware will have already initialized them. We only initialize * them if the HW is not in IAMT mode. */ - if (!e1000e_check_mng_mode(hw)) { + if (!hw->mac.ops.check_mng_mode(hw)) { /* Enable Electrical Idle on the PHY */ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); @@ -1163,9 +1141,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) if (ret_val) return ret_val; - ret_val = e1000e_setup_copper_link(hw); - - return 0; + return e1000e_setup_copper_link(hw); } /** @@ -1241,9 +1217,7 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) else reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); - - return 0; + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** @@ -1285,9 +1259,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; - ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); - return ret_val; + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** @@ -1372,12 +1345,9 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) - goto out; - - ret_val = e1000_read_mac_addr_generic(hw); + return ret_val; -out: - return ret_val; + return e1000_read_mac_addr_generic(hw); } /** @@ -1443,7 +1413,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) static const struct e1000_mac_operations es2_mac_ops = { .read_mac_addr = e1000_read_mac_addr_80003es2lan, - .id_led_init = e1000e_id_led_init, + .id_led_init = e1000e_id_led_init_generic, .blink_led = e1000e_blink_led_generic, .check_mng_mode = e1000e_check_mng_mode_generic, /* check_for_link dependent on media type */ @@ -1459,9 +1429,10 @@ static const struct e1000_mac_operations es2_mac_ops = { .clear_vfta = e1000_clear_vfta_generic, .reset_hw = e1000_reset_hw_80003es2lan, .init_hw = e1000_init_hw_80003es2lan, - .setup_link = e1000e_setup_link, + .setup_link = e1000e_setup_link_generic, /* setup_physical_interface dependent on media type */ .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, }; static const struct e1000_phy_operations es2_phy_ops = { @@ -1486,6 +1457,7 @@ static const struct e1000_nvm_operations es2_nvm_ops = { .acquire = e1000_acquire_nvm_80003es2lan, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_80003es2lan, + .reload = e1000e_reload_nvm_generic, .update = e1000e_update_nvm_checksum_generic, .valid_led_default = e1000e_valid_led_default, .validate = e1000e_validate_nvm_checksum_generic, @@ -1502,8 +1474,7 @@ const struct e1000_info e1000_es2_info = { | FLAG_RX_NEEDS_RESTART /* errata */ | FLAG_TARC_SET_BIT_ZERO /* errata */ | FLAG_APME_CHECK_PORT_B - | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ - | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, + | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ .flags2 = FLAG2_DMA_BURST, .pba = 38, .max_hw_frame_size = DEFAULT_JUMBO, diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index a3e65fd26e0..b3fdc6977f2 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -235,30 +235,42 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) * e1000_init_mac_params_82571 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ -static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) { - struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; - struct e1000_mac_operations *func = &mac->ops; u32 swsm = 0; u32 swsm2 = 0; bool force_clear_smbi = false; - /* Set media type */ - switch (adapter->pdev->device) { + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82572EI_FIBER: case E1000_DEV_ID_82571EB_QUAD_FIBER: hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000e_check_for_fiber_link; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; break; case E1000_DEV_ID_82571EB_SERDES: - case E1000_DEV_ID_82572EI_SERDES: case E1000_DEV_ID_82571EB_SERDES_DUAL: case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; break; default: hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; break; } @@ -269,38 +281,13 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) /* Adaptive IFS supported */ mac->adaptive_ifs = true; - /* check for link */ - switch (hw->phy.media_type) { - case e1000_media_type_copper: - func->setup_physical_interface = e1000_setup_copper_link_82571; - func->check_for_link = e1000e_check_for_copper_link; - func->get_link_up_info = e1000e_get_speed_and_duplex_copper; - break; - case e1000_media_type_fiber: - func->setup_physical_interface = - e1000_setup_fiber_serdes_link_82571; - func->check_for_link = e1000e_check_for_fiber_link; - func->get_link_up_info = - e1000e_get_speed_and_duplex_fiber_serdes; - break; - case e1000_media_type_internal_serdes: - func->setup_physical_interface = - e1000_setup_fiber_serdes_link_82571; - func->check_for_link = e1000_check_for_serdes_link_82571; - func->get_link_up_info = - e1000e_get_speed_and_duplex_fiber_serdes; - break; - default: - return -E1000_ERR_CONFIG; - break; - } - + /* MAC-specific function pointers */ switch (hw->mac.type) { case e1000_82573: - func->set_lan_id = e1000_set_lan_id_single_port; - func->check_mng_mode = e1000e_check_mng_mode_generic; - func->led_on = e1000e_led_on_generic; - func->blink_led = e1000e_blink_led_generic; + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; @@ -314,14 +301,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) break; case e1000_82574: case e1000_82583: - func->set_lan_id = e1000_set_lan_id_single_port; - func->check_mng_mode = e1000_check_mng_mode_82574; - func->led_on = e1000_led_on_82574; + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; break; default: - func->check_mng_mode = e1000e_check_mng_mode_generic; - func->led_on = e1000e_led_on_generic; - func->blink_led = e1000e_blink_led_generic; + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; /* FWSM register */ mac->has_fwsm = true; @@ -342,11 +329,11 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) if (!(swsm2 & E1000_SWSM2_LOCK)) { /* Only do this for the first interface on this card */ - ew32(SWSM2, - swsm2 | E1000_SWSM2_LOCK); + ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); force_clear_smbi = true; - } else + } else { force_clear_smbi = false; + } break; default: force_clear_smbi = true; @@ -383,7 +370,7 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; s32 rc; - rc = e1000_init_mac_params_82571(adapter); + rc = e1000_init_mac_params_82571(hw); if (rc) return rc; @@ -577,7 +564,6 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) { u32 extcnf_ctrl; - s32 ret_val = 0; s32 i = 0; extcnf_ctrl = er32(EXTCNF_CTRL); @@ -599,12 +585,10 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) /* Release semaphores */ e1000_put_hw_semaphore_82573(hw); e_dbg("Driver can't access the PHY\n"); - ret_val = -E1000_ERR_PHY; - goto out; + return -E1000_ERR_PHY; } -out: - return ret_val; + return 0; } /** @@ -809,7 +793,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) * otherwise, commit the checksum to the flash NVM. */ if (hw->nvm.type != e1000_nvm_flash_hw) - return ret_val; + return 0; /* Check for pending operations. */ for (i = 0; i < E1000_FLASH_UPDATES; i++) { @@ -1134,7 +1118,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) e1000_initialize_hw_bits_82571(hw); /* Initialize identification LED */ - ret_val = e1000e_id_led_init(hw); + ret_val = mac->ops.id_led_init(hw); if (ret_val) e_dbg("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ @@ -1159,7 +1143,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ - ret_val = e1000_setup_link_82571(hw); + ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); @@ -1227,6 +1211,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82572: reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; default: break; } @@ -1281,18 +1269,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) reg |= E1000_PBA_ECC_CORR_EN; ew32(PBA_ECC, reg); } + /* * Workaround for hardware errata. * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 */ - - if ((hw->mac.type == e1000_82571) || - (hw->mac.type == e1000_82572)) { - reg = er32(CTRL_EXT); - reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; - ew32(CTRL_EXT, reg); - } - + if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } /* PCI-Ex Control Registers */ switch (hw->mac.type) { @@ -1418,7 +1404,6 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) { u16 status_1kbt = 0; u16 receive_errors = 0; - bool phy_hung = false; s32 ret_val = 0; /* @@ -1426,19 +1411,18 @@ bool e1000_check_phy_82574(struct e1000_hw *hw) * read the Base1000T status register If both are max then PHY is hung. */ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); - if (ret_val) - goto out; + return false; if (receive_errors == E1000_RECEIVE_ERROR_MAX) { ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); if (ret_val) - goto out; + return false; if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == E1000_IDLE_ERROR_COUNT_MASK) - phy_hung = true; + return true; } -out: - return phy_hung; + + return false; } /** @@ -1469,7 +1453,7 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw) break; } - return e1000e_setup_link(hw); + return e1000e_setup_link_generic(hw); } /** @@ -1506,9 +1490,7 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) if (ret_val) return ret_val; - ret_val = e1000e_setup_copper_link(hw); - - return ret_val; + return e1000e_setup_copper_link(hw); } /** @@ -1842,9 +1824,9 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) **/ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { - s32 ret_val = 0; - if (hw->mac.type == e1000_82571) { + s32 ret_val = 0; + /* * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm @@ -1852,13 +1834,10 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) - goto out; + return ret_val; } - ret_val = e1000_read_mac_addr_generic(hw); - -out: - return ret_val; + return e1000_read_mac_addr_generic(hw); } /** @@ -1873,7 +1852,7 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) struct e1000_phy_info *phy = &hw->phy; struct e1000_mac_info *mac = &hw->mac; - if (!(phy->ops.check_reset_block)) + if (!phy->ops.check_reset_block) return; /* If the management interface is not enabled, then power down */ @@ -1930,7 +1909,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) static const struct e1000_mac_operations e82571_mac_ops = { /* .check_mng_mode: mac type dependent */ /* .check_for_link: media type dependent */ - .id_led_init = e1000e_id_led_init, + .id_led_init = e1000e_id_led_init_generic, .cleanup_led = e1000e_cleanup_led_generic, .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, .get_bus_info = e1000e_get_bus_info_pcie, @@ -1946,6 +1925,7 @@ static const struct e1000_mac_operations e82571_mac_ops = { .setup_link = e1000_setup_link_82571, /* .setup_physical_interface: media type dependent */ .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, .read_mac_addr = e1000_read_mac_addr_82571, }; @@ -2007,6 +1987,7 @@ static const struct e1000_nvm_operations e82571_nvm_ops = { .acquire = e1000_acquire_nvm_82571, .read = e1000e_read_nvm_eerd, .release = e1000_release_nvm_82571, + .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_82571, .valid_led_default = e1000_valid_led_default_82571, .validate = e1000_validate_nvm_checksum_82571, diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 948c05db5d6..591b7132450 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2011 Intel Corporation. +# Copyright(c) 1999 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -33,5 +33,6 @@ obj-$(CONFIG_E1000E) += e1000e.o e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ - lib.o phy.o param.o ethtool.o netdev.o + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index c516a7440be..3a502591716 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -126,6 +126,13 @@ E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_RXE) +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + #define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 /* Management Control */ @@ -170,6 +177,7 @@ #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ @@ -326,6 +334,7 @@ /* Receive Checksum Control */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ #define E1000_RFCTL_NFSW_DIS 0x00000040 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index f478a22ed57..86cdd479399 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -234,6 +234,7 @@ struct e1000_buffer { }; struct e1000_ring { + struct e1000_adapter *adapter; /* back pointer to adapter */ void *desc; /* pointer to ring memory */ dma_addr_t dma; /* phys address of ring */ unsigned int size; /* length of ring in bytes */ @@ -242,8 +243,8 @@ struct e1000_ring { u16 next_to_use; u16 next_to_clean; - u16 head; - u16 tail; + void __iomem *head; + void __iomem *tail; /* array of buffer information structs */ struct e1000_buffer *buffer_info; @@ -251,7 +252,7 @@ struct e1000_ring { char name[IFNAMSIZ + 5]; u32 ims_val; u32 itr_val; - u16 itr_register; + void __iomem *itr_register; int set_itr; struct sk_buff *rx_skb_top; @@ -334,11 +335,10 @@ struct e1000_adapter { /* * Rx */ - bool (*clean_rx) (struct e1000_adapter *adapter, - int *work_done, int work_to_do) - ____cacheline_aligned_in_smp; - void (*alloc_rx_buf) (struct e1000_adapter *adapter, - int cleaned_count, gfp_t gfp); + bool (*clean_rx) (struct e1000_ring *ring, int *work_done, + int work_to_do) ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, + gfp_t gfp); struct e1000_ring *rx_ring; u32 rx_int_delay; @@ -398,6 +398,9 @@ struct e1000_adapter { bool idle_check; int phy_hang_count; + + u16 tx_ring_count; + u16 rx_ring_count; }; struct e1000_info { @@ -417,7 +420,7 @@ struct e1000_info { #define FLAG_HAS_FLASH (1 << 1) #define FLAG_HAS_HW_VLAN_FILTER (1 << 2) #define FLAG_HAS_WOL (1 << 3) -#define FLAG_HAS_ERT (1 << 4) +/* reserved bit4 */ #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) #define FLAG_HAS_JUMBO_FRAMES (1 << 7) @@ -427,7 +430,7 @@ struct e1000_info { #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) #define FLAG_IS_QUAD_PORT_A (1 << 12) #define FLAG_IS_QUAD_PORT (1 << 13) -#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN (1 << 14) +/* reserved bit14 */ #define FLAG_APME_IN_WUC (1 << 15) #define FLAG_APME_IN_CTRL3 (1 << 16) #define FLAG_APME_CHECK_PORT_B (1 << 17) @@ -458,6 +461,7 @@ struct e1000_info { #define FLAG2_CHECK_PHY_HANG (1 << 9) #define FLAG2_NO_DISABLE_RX (1 << 10) #define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) +#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) @@ -492,10 +496,10 @@ extern void e1000e_down(struct e1000_adapter *adapter); extern void e1000e_reinit_locked(struct e1000_adapter *adapter); extern void e1000e_reset(struct e1000_adapter *adapter); extern void e1000e_power_up_phy(struct e1000_adapter *adapter); -extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); -extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); -extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); -extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_ring *ring); +extern int e1000e_setup_tx_resources(struct e1000_ring *ring); +extern void e1000e_free_rx_resources(struct e1000_ring *ring); +extern void e1000e_free_tx_resources(struct e1000_ring *ring); extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); @@ -555,12 +559,12 @@ extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex); extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw); extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); -extern s32 e1000e_id_led_init(struct e1000_hw *hw); +extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw); extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); -extern s32 e1000e_setup_link(struct e1000_hw *hw); +extern s32 e1000e_setup_link_generic(struct e1000_hw *hw); extern void e1000_clear_vfta_generic(struct e1000_hw *hw); extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, @@ -571,7 +575,7 @@ extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); -extern void e1000e_config_collision_dist(struct e1000_hw *hw); +extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw); extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); extern s32 e1000e_force_mac_fc(struct e1000_hw *hw); extern s32 e1000e_blink_led_generic(struct e1000_hw *hw); @@ -658,11 +662,6 @@ static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) return hw->phy.ops.reset(hw); } -static inline s32 e1000_check_reset_block(struct e1000_hw *hw) -{ - return hw->phy.ops.check_reset_block(hw); -} - static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) { return hw->phy.ops.read_reg(hw, offset, data); @@ -685,7 +684,7 @@ extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); extern void e1000e_release_nvm(struct e1000_hw *hw); -extern void e1000e_reload_nvm(struct e1000_hw *hw); +extern void e1000e_reload_nvm_generic(struct e1000_hw *hw); extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) @@ -721,11 +720,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw) return hw->phy.ops.get_info(hw); } -static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) -{ - return hw->mac.ops.check_mng_mode(hw); -} - extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index fb2c28e799a..db35dd5d96d 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,6 +34,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/vmalloc.h> #include "e1000.h" @@ -257,7 +258,7 @@ static int e1000_set_settings(struct net_device *netdev, * When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ - if (e1000_check_reset_block(hw)) { + if (hw->phy.ops.check_reset_block(hw)) { e_err("Cannot change link characteristics when SoL/IDER is " "active.\n"); return -EINVAL; @@ -536,7 +537,7 @@ static int e1000_set_eeprom(struct net_device *netdev, ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); ptr++; } - if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) + if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) /* need read/modify/write of last changed EEPROM word */ /* only the first byte of the word is being modified */ ret_val = e1000_read_nvm(hw, last_word, 1, @@ -552,7 +553,7 @@ static int e1000_set_eeprom(struct net_device *netdev, memcpy(ptr, bytes, eeprom->len); for (i = 0; i < last_word - first_word + 1; i++) - eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + cpu_to_le16s(&eeprom_buff[i]); ret_val = e1000_write_nvm(hw, first_word, last_word - first_word + 1, eeprom_buff); @@ -605,94 +606,112 @@ static void e1000_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_ring *tx_ring = adapter->tx_ring; - struct e1000_ring *rx_ring = adapter->rx_ring; ring->rx_max_pending = E1000_MAX_RXD; ring->tx_max_pending = E1000_MAX_TXD; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; } static int e1000_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_ring *tx_ring, *tx_old; - struct e1000_ring *rx_ring, *rx_old; - int err; + struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; + int err = 0, size = sizeof(struct e1000_ring); + bool set_tx = false, set_rx = false; + u16 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) - usleep_range(1000, 2000); + new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, + E1000_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); - if (netif_running(adapter->netdev)) - e1000e_down(adapter); + new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, + E1000_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); - tx_old = adapter->tx_ring; - rx_old = adapter->rx_ring; + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + /* nothing to do */ + return 0; - err = -ENOMEM; - tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); - if (!tx_ring) - goto err_alloc_tx; + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); - rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); - if (!rx_ring) - goto err_alloc_rx; + if (!netif_running(adapter->netdev)) { + /* Set counts now and allocate resources during open() */ + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } - adapter->tx_ring = tx_ring; - adapter->rx_ring = rx_ring; + set_tx = (new_tx_count != adapter->tx_ring_count); + set_rx = (new_rx_count != adapter->rx_ring_count); - rx_ring->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); - rx_ring->count = min(rx_ring->count, (u32)(E1000_MAX_RXD)); - rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); + /* Allocate temporary storage for ring updates */ + if (set_tx) { + temp_tx = vmalloc(size); + if (!temp_tx) { + err = -ENOMEM; + goto free_temp; + } + } + if (set_rx) { + temp_rx = vmalloc(size); + if (!temp_rx) { + err = -ENOMEM; + goto free_temp; + } + } - tx_ring->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); - tx_ring->count = min(tx_ring->count, (u32)(E1000_MAX_TXD)); - tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); + e1000e_down(adapter); - if (netif_running(adapter->netdev)) { - /* Try to get new resources before deleting old */ - err = e1000e_setup_rx_resources(adapter); + /* + * We can't just free everything and then setup again, because the + * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring + * structs. First, attempt to allocate new resources... + */ + if (set_tx) { + memcpy(temp_tx, adapter->tx_ring, size); + temp_tx->count = new_tx_count; + err = e1000e_setup_tx_resources(temp_tx); if (err) - goto err_setup_rx; - err = e1000e_setup_tx_resources(adapter); + goto err_setup; + } + if (set_rx) { + memcpy(temp_rx, adapter->rx_ring, size); + temp_rx->count = new_rx_count; + err = e1000e_setup_rx_resources(temp_rx); if (err) - goto err_setup_tx; + goto err_setup_rx; + } - /* - * restore the old in order to free it, - * then add in the new - */ - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - e1000e_free_rx_resources(adapter); - e1000e_free_tx_resources(adapter); - kfree(tx_old); - kfree(rx_old); - adapter->rx_ring = rx_ring; - adapter->tx_ring = tx_ring; - err = e1000e_up(adapter); - if (err) - goto err_setup; + /* ...then free the old resources and copy back any new ring data */ + if (set_tx) { + e1000e_free_tx_resources(adapter->tx_ring); + memcpy(adapter->tx_ring, temp_tx, size); + adapter->tx_ring_count = new_tx_count; + } + if (set_rx) { + e1000e_free_rx_resources(adapter->rx_ring); + memcpy(adapter->rx_ring, temp_rx, size); + adapter->rx_ring_count = new_rx_count; } - clear_bit(__E1000_RESETTING, &adapter->state); - return 0; -err_setup_tx: - e1000e_free_rx_resources(adapter); err_setup_rx: - adapter->rx_ring = rx_old; - adapter->tx_ring = tx_old; - kfree(rx_ring); -err_alloc_rx: - kfree(tx_ring); -err_alloc_tx: - e1000e_up(adapter); + if (err && set_tx) + e1000e_free_tx_resources(temp_tx); err_setup: + e1000e_up(adapter); +free_temp: + vfree(temp_tx); + vfree(temp_rx); +clear_reset: clear_bit(__E1000_RESETTING, &adapter->state); return err; } @@ -1069,7 +1088,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) tx_ring->buffer_info = kcalloc(tx_ring->count, sizeof(struct e1000_buffer), GFP_KERNEL); - if (!(tx_ring->buffer_info)) { + if (!tx_ring->buffer_info) { ret_val = 1; goto err_nomem; } @@ -1131,7 +1150,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->buffer_info = kcalloc(rx_ring->count, sizeof(struct e1000_buffer), GFP_KERNEL); - if (!(rx_ring->buffer_info)) { + if (!rx_ring->buffer_info) { ret_val = 5; goto err_nomem; } @@ -1579,11 +1598,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) { + struct e1000_hw *hw = &adapter->hw; + /* * PHY loopback cannot be performed if SoL/IDER * sessions are active */ - if (e1000_check_reset_block(&adapter->hw)) { + if (hw->phy.ops.check_reset_block(hw)) { e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; @@ -1837,11 +1858,11 @@ static int e1000_set_phys_id(struct net_device *netdev, break; case ETHTOOL_ID_ON: - adapter->hw.mac.ops.led_on(&adapter->hw); + hw->mac.ops.led_on(hw); break; case ETHTOOL_ID_OFF: - adapter->hw.mac.ops.led_off(&adapter->hw); + hw->mac.ops.led_off(hw); break; } return 0; @@ -1955,6 +1976,53 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, } } +static int e1000_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + info->data = 0; + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = er32(MRQC); + + if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) + return 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } + return 0; + } + default: + return -EOPNOTSUPP; + } +} + static const struct ethtool_ops e1000_ethtool_ops = { .get_settings = e1000_get_settings, .set_settings = e1000_set_settings, @@ -1981,6 +2049,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_sset_count = e1000e_get_sset_count, .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, + .get_rxnfc = e1000_get_rxnfc, }; void e1000e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 29670397079..f82ecf536c8 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -204,6 +204,7 @@ enum e1e_registers { E1000_WUC = 0x05800, /* Wakeup Control - RW */ E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ E1000_WUS = 0x05810, /* Wakeup Status - RO */ + E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */ E1000_MANC = 0x05820, /* Management Control - RW */ E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */ E1000_HOST_IF = 0x08800, /* Host Interface */ @@ -219,6 +220,10 @@ enum e1e_registers { E1000_SWSM = 0x05B50, /* SW Semaphore */ E1000_FWSM = 0x05B54, /* FW Semaphore */ E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */ + E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */ +#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4)) + E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */ +#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4)) E1000_FFLT_DBG = 0x05F04, /* Debug Register */ E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */ #define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4)) @@ -776,6 +781,7 @@ struct e1000_mac_operations { s32 (*setup_physical_interface)(struct e1000_hw *); s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); s32 (*read_mac_addr)(struct e1000_hw *); }; @@ -824,6 +830,7 @@ struct e1000_nvm_operations { s32 (*acquire)(struct e1000_hw *); s32 (*read)(struct e1000_hw *, u16, u16, u16 *); void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); s32 (*update)(struct e1000_hw *); s32 (*valid_led_default)(struct e1000_hw *, u16 *); s32 (*validate)(struct e1000_hw *); @@ -964,8 +971,8 @@ struct e1000_dev_spec_ich8lan { struct e1000_hw { struct e1000_adapter *adapter; - u8 __iomem *hw_addr; - u8 __iomem *flash_address; + void __iomem *hw_addr; + void __iomem *flash_address; struct e1000_mac_info mac; struct e1000_fc_info fc; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e2a80a283fd..64c76443a7a 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -145,6 +145,8 @@ #define I82579_EMI_ADDR 0x10 #define I82579_EMI_DATA 0x11 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C @@ -278,8 +280,8 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) #define er16flash(reg) __er16flash(hw, (reg)) #define er32flash(reg) __er32flash(hw, (reg)) -#define ew16flash(reg,val) __ew16flash(hw, (reg), (val)) -#define ew32flash(reg,val) __ew32flash(hw, (reg), (val)) +#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) { @@ -304,7 +306,6 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - u32 fwsm; s32 ret_val = 0; phy->addr = 1; @@ -323,14 +324,14 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; - /* - * The MAC-PHY interconnect may still be in SMBus mode - * after Sx->S0. If the manageability engine (ME) is - * disabled, then toggle the LANPHYPC Value bit to force - * the interconnect to PCIe mode. - */ - fwsm = er32(FWSM); - if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { + if (!hw->phy.ops.check_reset_block(hw)) { + u32 fwsm = er32(FWSM); + + /* + * The MAC-PHY interconnect may still be in SMBus mode after + * Sx->S0. If resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ e1000_toggle_lanphypc_value_ich8lan(hw); msleep(50); @@ -338,25 +339,26 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) * Gate automatic PHY configuration by hardware on * non-managed 82579 */ - if (hw->mac.type == e1000_pch2lan) + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, true); - } - /* - * Reset the PHY before any access to it. Doing so, ensures that - * the PHY is in a known good state before we read/write PHY registers. - * The generic reset is sufficient here, because we haven't determined - * the PHY type yet. - */ - ret_val = e1000e_phy_hw_reset_generic(hw); - if (ret_val) - goto out; + /* + * Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; - /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(fwsm & E1000_ICH_FWSM_FW_VALID)) { - usleep_range(10000, 20000); - e1000_gate_hw_phy_config_ich8lan(hw, false); + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } } phy->id = e1000_phy_unknown; @@ -364,7 +366,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) default: ret_val = e1000e_get_phy_id(hw); if (ret_val) - goto out; + return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; /* fall-through */ @@ -375,10 +377,10 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1000e_get_phy_id(hw); if (ret_val) - goto out; + return ret_val; break; } phy->type = e1000e_get_phy_type_from_id(phy->id); @@ -404,7 +406,6 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) break; } -out: return ret_val; } @@ -551,9 +552,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) * Initialize family-specific MAC parameters and function * pointers. **/ -static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) { - struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; /* Set media type function pointer */ @@ -580,7 +580,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ - mac->ops.id_led_init = e1000e_id_led_init; + mac->ops.id_led_init = e1000e_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000e_blink_led_generic; /* setup LED */ @@ -634,20 +634,18 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) u16 phy_reg; if (hw->phy.type != e1000_phy_82579) - goto out; + return 0; ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) - goto out; + return ret_val; if (hw->dev_spec.ich8lan.eee_disable) phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; else phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; - ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); -out: - return ret_val; + return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } /** @@ -671,10 +669,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ - if (!mac->get_link_status) { - ret_val = 0; - goto out; - } + if (!mac->get_link_status) + return 0; /* * First we want to see if the MII Status Register reports @@ -683,16 +679,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) */ ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) - goto out; + return ret_val; } if (!link) - goto out; /* No link detected */ + return 0; /* No link detected */ mac->get_link_status = false; @@ -700,13 +696,13 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) - goto out; + return ret_val; /* fall-thru */ case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) - goto out; + return ret_val; } /* @@ -736,23 +732,21 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) /* Enable/Disable EEE after link up */ ret_val = e1000_set_eee_pchlan(hw); if (ret_val) - goto out; + return ret_val; /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - goto out; - } + if (!mac->autoneg) + return -E1000_ERR_CONFIG; /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ - e1000e_config_collision_dist(hw); + mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. @@ -764,7 +758,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) e_dbg("Error configuring flow control\n"); -out: return ret_val; } @@ -773,7 +766,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; s32 rc; - rc = e1000_init_mac_params_ich8lan(adapter); + rc = e1000_init_mac_params_ich8lan(hw); if (rc) return rc; @@ -900,8 +893,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) } if (!timeout) { - e_dbg("Failed to acquire the semaphore, FW or HW has it: " - "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", er32(FWSM), extcnf_ctrl); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); @@ -1008,15 +1000,13 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data &= ~HV_SMB_ADDR_MASK; phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); -out: - return ret_val; + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } /** @@ -1065,7 +1055,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) data = er32(FEXTNVM); if (!(data & sw_cfg_mask)) - goto out; + goto release; /* * Make sure HW does not configure LCD from PHY @@ -1074,14 +1064,14 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) data = er32(EXTCNF_CTRL); if (!(hw->mac.type == e1000_pch2lan)) { if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - goto out; + goto release; } cnf_size = er32(EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; if (!cnf_size) - goto out; + goto release; cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; @@ -1097,13 +1087,13 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) */ ret_val = e1000_write_smbus_addr(hw); if (ret_val) - goto out; + goto release; data = er32(LEDCTL); ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, (u16)data); if (ret_val) - goto out; + goto release; } /* Configure LCD from extended configuration region. */ @@ -1115,12 +1105,12 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) - goto out; + goto release; ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 1, ®_addr); if (ret_val) - goto out; + goto release; /* Save off the PHY page for future writes. */ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { @@ -1134,10 +1124,10 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, reg_data); if (ret_val) - goto out; + goto release; } -out: +release: hw->phy.ops.release(hw); return ret_val; } @@ -1159,12 +1149,12 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; if (hw->mac.type != e1000_pchlan) - goto out; + return 0; /* Wrap the whole flow with the sw flag */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { @@ -1218,7 +1208,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) release: hw->phy.ops.release(hw); -out: + return ret_val; } @@ -1240,22 +1230,20 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) u32 reg = 0; u16 kmrn_reg = 0; - ret_val = e1000e_read_kmrn_reg_locked(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - &kmrn_reg); + ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); if (ret_val) - goto out; + return ret_val; if (k1_enable) kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; else kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; - ret_val = e1000e_write_kmrn_reg_locked(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - kmrn_reg); + ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); if (ret_val) - goto out; + return ret_val; udelay(20); ctrl_ext = er32(CTRL_EXT); @@ -1273,8 +1261,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) e1e_flush(); udelay(20); -out: - return ret_val; + return 0; } /** @@ -1302,18 +1289,18 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) if (!(hw->mac.type == e1000_pch2lan)) { mac_reg = er32(EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) - goto out; + goto release; } mac_reg = er32(FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) - goto out; + goto release; mac_reg = er32(PHY_CTRL); ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) - goto out; + goto release; oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); @@ -1325,7 +1312,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) oem_reg |= HV_OEM_BITS_LPLU; /* Set Restart auto-neg to activate the bits */ - if (!e1000_check_reset_block(hw)) + if (!hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | @@ -1339,7 +1326,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); -out: +release: hw->phy.ops.release(hw); return ret_val; @@ -1376,13 +1363,13 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) u16 phy_data; if (hw->mac.type != e1000_pchlan) - return ret_val; + return 0; /* Set MDIO slow mode before any other MDIO access */ if (hw->phy.type == e1000_phy_82577) { ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) - goto out; + return ret_val; } if (((hw->phy.type == e1000_phy_82577) && @@ -1419,7 +1406,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); hw->phy.ops.release(hw); if (ret_val) - goto out; + return ret_val; /* * Configure the K1 Si workaround during phy reset assuming there is @@ -1427,12 +1414,12 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) */ ret_val = e1000_k1_gig_workaround_hv(hw, true); if (ret_val) - goto out; + return ret_val; /* Workaround for link disconnects on a busy hub in half duplex */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; @@ -1440,7 +1427,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) phy_data & 0x00FF); release: hw->phy.ops.release(hw); -out: + return ret_val; } @@ -1497,13 +1484,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) u16 i; if (hw->mac.type != e1000_pch2lan) - goto out; + return 0; /* disable Rx path while enabling/disabling workaround */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); if (ret_val) - goto out; + return ret_val; if (enable) { /* @@ -1545,24 +1532,24 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) - goto out; + return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data | (1 << 0)); if (ret_val) - goto out; + return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) - goto out; + return ret_val; /* Enable jumbo frame workaround in the PHY */ e1e_rphy(hw, PHY_REG(769, 23), &data); @@ -1570,25 +1557,25 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) data |= (0x37 << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data &= ~(1 << 13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x1A << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) - goto out; + return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); if (ret_val) - goto out; + return ret_val; } else { /* Write MAC register values back to h/w defaults */ mac_reg = er32(FFLT_DBG); @@ -1603,56 +1590,53 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) - goto out; + return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data & ~(1 << 0)); if (ret_val) - goto out; + return ret_val; ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) - goto out; + return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) - goto out; + return ret_val; /* Write PHY register values back to h/w defaults */ e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); data |= (1 << 13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x8 << 2); ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); if (ret_val) - goto out; + return ret_val; ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); if (ret_val) - goto out; + return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); if (ret_val) - goto out; + return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); - -out: - return ret_val; + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); } /** @@ -1664,12 +1648,31 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) s32 ret_val = 0; if (hw->mac.type != e1000_pch2lan) - goto out; + return 0; /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); -out: + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_MSE_THRESHOLD); + if (ret_val) + goto release; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_MSE_LINK_DOWN); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); +release: + hw->phy.ops.release(hw); + return ret_val; } @@ -1687,12 +1690,12 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) u16 phy_reg; if (hw->mac.type != e1000_pch2lan) - goto out; + return 0; /* Set K1 beacon duration based on 1Gbps speed or otherwise */ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); if (ret_val) - goto out; + return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { @@ -1701,7 +1704,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); if (ret_val) - goto out; + return ret_val; if (status_reg & HV_M_STATUS_SPEED_1000) { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; @@ -1714,7 +1717,6 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } -out: return ret_val; } @@ -1741,7 +1743,6 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; ew32(EXTCNF_CTRL, extcnf_ctrl); - return; } /** @@ -1785,8 +1786,8 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) s32 ret_val = 0; u16 reg; - if (e1000_check_reset_block(hw)) - goto out; + if (hw->phy.ops.check_reset_block(hw)) + return 0; /* Allow time for h/w to get to quiescent state after reset */ usleep_range(10000, 20000); @@ -1796,12 +1797,12 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) case e1000_pchlan: ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) - goto out; + return ret_val; break; case e1000_pch2lan: ret_val = e1000_lv_phy_workarounds_ich8lan(hw); if (ret_val) - goto out; + return ret_val; break; default: break; @@ -1817,7 +1818,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) /* Configure the LCD with the extended configuration region in NVM */ ret_val = e1000_sw_lcd_config_ich8lan(hw); if (ret_val) - goto out; + return ret_val; /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, true); @@ -1832,18 +1833,16 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) /* Set EEE LPI Update Timer to 200usec */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, I82579_LPI_UPDATE_TIMER); - if (ret_val) - goto release; - ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, - 0x1387); -release: + if (!ret_val) + ret_val = hw->phy.ops.write_reg_locked(hw, + I82579_EMI_DATA, + 0x1387); hw->phy.ops.release(hw); } -out: return ret_val; } @@ -1866,12 +1865,9 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) - goto out; - - ret_val = e1000_post_phy_reset_ich8lan(hw); + return ret_val; -out: - return ret_val; + return e1000_post_phy_reset_ich8lan(hw); } /** @@ -1892,18 +1888,17 @@ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); if (ret_val) - goto out; + return ret_val; if (active) oem_reg |= HV_OEM_BITS_LPLU; else oem_reg &= ~HV_OEM_BITS_LPLU; - oem_reg |= HV_OEM_BITS_RESTART_AN; - ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; -out: - return ret_val; + return e1e_wphy(hw, HV_OEM_BITS, oem_reg); } /** @@ -1927,7 +1922,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) u16 data; if (phy->type == e1000_phy_ife) - return ret_val; + return 0; phy_ctrl = er32(PHY_CTRL); @@ -2009,7 +2004,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; - s32 ret_val; + s32 ret_val = 0; u16 data; phy_ctrl = er32(PHY_CTRL); @@ -2075,7 +2070,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); } - return 0; + return ret_val; } /** @@ -2093,7 +2088,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; u8 sig_byte = 0; - s32 ret_val = 0; + s32 ret_val; switch (hw->mac.type) { case e1000_ich8lan: @@ -2108,8 +2103,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) return 0; } - e_dbg("Unable to determine valid NVM bank via EEC - " - "reading flash signature\n"); + e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); /* fall-thru */ default: /* set bank to 0 in case flash read fails */ @@ -2141,8 +2135,6 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) e_dbg("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; } - - return 0; } /** @@ -2221,8 +2213,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) /* Check if the flash descriptor is valid */ if (hsfsts.hsf_status.fldesvalid == 0) { - e_dbg("Flash descriptor invalid. " - "SW Sequencing must be used.\n"); + e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } @@ -2251,21 +2242,21 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { - s32 i = 0; + s32 i; /* * Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { - hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcinprog == 0) { ret_val = 0; break; } udelay(1); } - if (ret_val == 0) { + if (!ret_val) { /* * Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. @@ -2291,7 +2282,6 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) { union ich8_hws_flash_ctrl hsflctl; union ich8_hws_flash_status hsfsts; - s32 ret_val = -E1000_ERR_NVM; u32 i = 0; /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ @@ -2310,7 +2300,7 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) return 0; - return ret_val; + return -E1000_ERR_NVM; } /** @@ -2383,7 +2373,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, udelay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); - if (ret_val != 0) + if (ret_val) break; hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); @@ -2403,7 +2393,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ - if (ret_val == 0) { + if (!ret_val) { flash_data = er32flash(ICH_FLASH_FDATA0); if (size == 1) *data = (u8)(flash_data & 0x000000FF); @@ -2422,8 +2412,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* Repeat for some time before giving up. */ continue; } else if (hsfsts.hsf_status.flcdone == 0) { - e_dbg("Timeout error - flash cycle " - "did not complete.\n"); + e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } @@ -2618,7 +2607,7 @@ release: * until after the next adapter reset. */ if (!ret_val) { - e1000e_reload_nvm(hw); + nvm->ops.reload(hw); usleep_range(10000, 20000); } @@ -2774,8 +2763,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, /* Repeat for some time before giving up. */ continue; if (hsfsts.hsf_status.flcdone == 0) { - e_dbg("Timeout error - flash cycle " - "did not complete."); + e_dbg("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); @@ -2917,7 +2905,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_ERASE_COMMAND_TIMEOUT); - if (ret_val == 0) + if (!ret_val) break; /* @@ -2972,7 +2960,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) * * PCH also does not have an "always on" or "always off" mode which * complicates the ID feature. Instead of using the "on" mode to indicate - * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()), + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), * use "link_up" mode. The LEDs will still ID on request if there is no * link based on logic in e1000_led_[on|off]_pchlan(). **/ @@ -2987,7 +2975,7 @@ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) /* Get default ID LED modes */ ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) - goto out; + return ret_val; mac->ledctl_default = er32(LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; @@ -3032,8 +3020,7 @@ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) } } -out: - return ret_val; + return 0; } /** @@ -3120,7 +3107,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ctrl = er32(CTRL); - if (!e1000_check_reset_block(hw)) { + if (!hw->phy.ops.check_reset_block(hw)) { /* * Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the @@ -3148,11 +3135,11 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) if (ctrl & E1000_CTRL_PHY_RST) { ret_val = hw->phy.ops.get_cfg_done(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1000_post_phy_reset_ich8lan(hw); if (ret_val) - goto out; + return ret_val; } /* @@ -3170,8 +3157,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) kab |= E1000_KABGTXD_BGSQLBIAS; ew32(KABGTXD, kab); -out: - return ret_val; + return 0; } /** @@ -3224,7 +3210,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) } /* Setup link and flow control */ - ret_val = e1000_setup_link_ich8lan(hw); + ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy for both queues */ txdctl = er32(TXDCTL(0)); @@ -3262,7 +3248,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) */ e1000_clear_hw_cntrs_ich8lan(hw); - return 0; + return ret_val; } /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits @@ -3339,7 +3325,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) { s32 ret_val; - if (e1000_check_reset_block(hw)) + if (hw->phy.ops.check_reset_block(hw)) return 0; /* @@ -3365,7 +3351,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) hw->fc.current_mode); /* Continue to configure the copper link. */ - ret_val = e1000_setup_copper_link_ich8lan(hw); + ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; @@ -3465,6 +3451,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) default: break; } + return e1000e_setup_copper_link(hw); } @@ -3566,7 +3553,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) } /** - * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state * @hw: pointer to the HW structure * @state: boolean value used to set the current Kumeran workaround state * @@ -3676,9 +3663,10 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) * * During S0 to Sx transition, it is possible the link remains at gig * instead of negotiating to a lower speed. Before going to Sx, set - * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation - * to a lower speed. For PCH and newer parts, the OEM bits PHY register - * (LED, GbE disable and LPLU configurations) also needs to be written. + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { @@ -3686,7 +3674,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) s32 ret_val; phy_ctrl = er32(PHY_CTRL); - phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; ew32(PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) @@ -3714,47 +3702,41 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) **/ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { - u32 fwsm; + u16 phy_id1, phy_id2; + s32 ret_val; - if (hw->mac.type != e1000_pch2lan) + if ((hw->mac.type != e1000_pch2lan) || + hw->phy.ops.check_reset_block(hw)) return; - fwsm = er32(FWSM); - if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) { - u16 phy_id1, phy_id2; - s32 ret_val; - - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) { - e_dbg("Failed to acquire PHY semaphore in resume\n"); - return; - } + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to acquire PHY semaphore in resume\n"); + return; + } - /* Test access to the PHY registers by reading the ID regs */ - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); - if (ret_val) - goto release; - ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); - if (ret_val) - goto release; + /* Test access to the PHY registers by reading the ID regs */ + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); + if (ret_val) + goto release; + ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); + if (ret_val) + goto release; - if (hw->phy.id == ((u32)(phy_id1 << 16) | - (u32)(phy_id2 & PHY_REVISION_MASK))) - goto release; + if (hw->phy.id == ((u32)(phy_id1 << 16) | + (u32)(phy_id2 & PHY_REVISION_MASK))) + goto release; - e1000_toggle_lanphypc_value_ich8lan(hw); + e1000_toggle_lanphypc_value_ich8lan(hw); - hw->phy.ops.release(hw); - msleep(50); - e1000_phy_hw_reset(hw); - msleep(50); - return; - } + hw->phy.ops.release(hw); + msleep(50); + e1000_phy_hw_reset(hw); + msleep(50); + return; release: hw->phy.ops.release(hw); - - return; } /** @@ -4023,7 +4005,6 @@ release: } static const struct e1000_mac_operations ich8_mac_ops = { - .id_led_init = e1000e_id_led_init, /* check_mng_mode dependent on mac type */ .check_for_link = e1000_check_for_copper_link_ich8lan, /* cleanup_led dependent on mac type */ @@ -4039,6 +4020,7 @@ static const struct e1000_mac_operations ich8_mac_ops = { .setup_link = e1000_setup_link_ich8lan, .setup_physical_interface= e1000_setup_copper_link_ich8lan, /* id_led_init dependent on mac type */ + .config_collision_dist = e1000e_config_collision_dist_generic, }; static const struct e1000_phy_operations ich8_phy_ops = { @@ -4059,6 +4041,7 @@ static const struct e1000_nvm_operations ich8_nvm_ops = { .acquire = e1000_acquire_nvm_ich8lan, .read = e1000_read_nvm_ich8lan, .release = e1000_release_nvm_ich8lan, + .reload = e1000e_reload_nvm_generic, .update = e1000_update_nvm_checksum_ich8lan, .valid_led_default = e1000_valid_led_default_ich8lan, .validate = e1000_validate_nvm_checksum_ich8lan, @@ -4088,10 +4071,9 @@ const struct e1000_info e1000_ich9_info = { | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT - | FLAG_HAS_ERT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, - .pba = 10, + .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, @@ -4106,10 +4088,9 @@ const struct e1000_info e1000_ich10_info = { | FLAG_HAS_WOL | FLAG_HAS_CTRLEXT_ON_LOAD | FLAG_HAS_AMT - | FLAG_HAS_ERT | FLAG_HAS_FLASH | FLAG_APME_IN_WUC, - .pba = 10, + .pba = 18, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_ich8lan, .mac_ops = &ich8_mac_ops, diff --git a/drivers/net/ethernet/intel/e1000e/lib.c b/drivers/net/ethernet/intel/e1000e/mac.c index 0893ab107ad..decad98c105 100644 --- a/drivers/net/ethernet/intel/e1000e/lib.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -28,19 +28,6 @@ #include "e1000.h" -enum e1000_mng_mode { - e1000_mng_mode_none = 0, - e1000_mng_mode_asf, - e1000_mng_mode_pt, - e1000_mng_mode_ipmi, - e1000_mng_mode_host_if_only -}; - -#define E1000_FACTPS_MNGCG 0x20000000 - -/* Intel(R) Active Management Technology signature */ -#define E1000_IAMT_SIGNATURE 0x544D4149 - /** * e1000e_get_bus_info_pcie - Get PCIe bus information * @hw: pointer to the HW structure @@ -151,7 +138,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) { u32 i; - u8 mac_addr[ETH_ALEN] = {0}; + u8 mac_addr[ETH_ALEN] = { 0 }; /* Setup the receive address */ e_dbg("Programming MAC Address into RAR[0]\n"); @@ -159,7 +146,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) e1000e_rar_set(hw, hw->mac.addr, 0); /* Zero out the other (rar_entry_count - 1) receive addresses */ - e_dbg("Clearing RAR[1-%u]\n", rar_count-1); + e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); for (i = 1; i < rar_count; i++) e1000e_rar_set(hw, mac_addr, i); } @@ -185,26 +172,23 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); if (ret_val) - goto out; + return ret_val; - /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ - if (!((nvm_data & NVM_COMPAT_LOM) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES))) - goto out; + /* not supported on 82573 */ + if (hw->mac.type == e1000_82573) + return 0; ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, - &nvm_alt_mac_addr_offset); + &nvm_alt_mac_addr_offset); if (ret_val) { e_dbg("NVM Read Error\n"); - goto out; + return ret_val; } if ((nvm_alt_mac_addr_offset == 0xFFFF) || (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ - goto out; + return 0; if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; @@ -213,7 +197,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); if (ret_val) { e_dbg("NVM Read Error\n"); - goto out; + return ret_val; } alt_mac_addr[i] = (u8)(nvm_data & 0xFF); @@ -223,7 +207,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) /* if multicast bit is set, the alternate address will not be used */ if (is_multicast_ether_addr(alt_mac_addr)) { e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); - goto out; + return 0; } /* @@ -233,8 +217,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) */ e1000e_rar_set(hw, alt_mac_addr, 0); -out: - return ret_val; + return 0; } /** @@ -254,11 +237,10 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ - rar_low = ((u32) addr[0] | - ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) @@ -281,8 +263,7 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) * @mc_addr: pointer to a multicast address * * Generates a multicast address hash value which is used to determine - * the multicast filter table array address and new table value. See - * e1000_mta_set_generic() + * the multicast filter table array address and new table value. **/ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { @@ -318,7 +299,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) * values resulting from each mc_filter_type... * [0] [1] [2] [3] [4] [5] * 01 AA 00 12 34 56 - * LSB MSB + * LSB MSB * * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 @@ -341,7 +322,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) } hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | - (((u16) mc_addr[5]) << bit_shift))); + (((u16)mc_addr[5]) << bit_shift))); return hash_value; } @@ -365,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); /* update mta_shadow from mc_addr_list */ - for (i = 0; (u32) i < mc_addr_count; i++) { + for (i = 0; (u32)i < mc_addr_count; i++) { hash_value = e1000_hash_mc_addr(hw, mc_addr_list); hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); @@ -461,7 +442,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) return ret_val; if (!link) - return ret_val; /* No link detected */ + return 0; /* No link detected */ mac->get_link_status = false; @@ -475,17 +456,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ - if (!mac->autoneg) { - ret_val = -E1000_ERR_CONFIG; - return ret_val; - } + if (!mac->autoneg) + return -E1000_ERR_CONFIG; /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ - e1000e_config_collision_dist(hw); + mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. @@ -528,10 +507,10 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) * was just plugged in. The autoneg_failed flag does this. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && - (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); @@ -594,9 +573,9 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) * time to complete. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ - if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { - if (mac->autoneg_failed == 0) { - mac->autoneg_failed = 1; + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; return 0; } e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); @@ -650,18 +629,16 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) if (E1000_TXCW_ANE & er32(TXCW)) { status = er32(STATUS); if (status & E1000_STATUS_LU) { - /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ udelay(10); rxcw = er32(RXCW); if (rxcw & E1000_RXCW_SYNCH) { if (!(rxcw & E1000_RXCW_IV)) { mac->serdes_has_link = true; - e_dbg("SERDES: Link up - autoneg " - "completed successfully.\n"); + e_dbg("SERDES: Link up - autoneg completed successfully.\n"); } else { mac->serdes_has_link = false; - e_dbg("SERDES: Link down - invalid" - "codewords detected in autoneg.\n"); + e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); } } else { mac->serdes_has_link = false; @@ -706,8 +683,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) hw->fc.requested_mode = e1000_fc_none; - else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == - NVM_WORD0F_ASM_DIR) + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) hw->fc.requested_mode = e1000_fc_tx_pause; else hw->fc.requested_mode = e1000_fc_full; @@ -716,7 +692,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) } /** - * e1000e_setup_link - Setup flow control and link settings + * e1000e_setup_link_generic - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow @@ -725,16 +701,15 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ -s32 e1000e_setup_link(struct e1000_hw *hw) +s32 e1000e_setup_link_generic(struct e1000_hw *hw) { - struct e1000_mac_info *mac = &hw->mac; s32 ret_val; /* * In the case of the phy reset being blocked, we already have a link. * We do not need to set it up again. */ - if (e1000_check_reset_block(hw)) + if (hw->phy.ops.check_reset_block(hw)) return 0; /* @@ -753,11 +728,10 @@ s32 e1000e_setup_link(struct e1000_hw *hw) */ hw->fc.current_mode = hw->fc.requested_mode; - e_dbg("After fix-ups FlowControl is now = %x\n", - hw->fc.current_mode); + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary media_type subroutine to configure the link. */ - ret_val = mac->ops.setup_physical_interface(hw); + ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; @@ -876,7 +850,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) } if (i == FIBER_LINK_UP_LIMIT) { e_dbg("Never got a valid link from auto-neg!!!\n"); - mac->autoneg_failed = 1; + mac->autoneg_failed = true; /* * AutoNeg failed to achieve a link, so we'll call * mac->check_for_link. This routine will force the @@ -888,9 +862,9 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) e_dbg("Error while checking for link\n"); return ret_val; } - mac->autoneg_failed = 0; + mac->autoneg_failed = false; } else { - mac->autoneg_failed = 0; + mac->autoneg_failed = false; e_dbg("Valid Link Found\n"); } @@ -914,7 +888,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) /* Take the link out of reset */ ctrl &= ~E1000_CTRL_LRST; - e1000e_config_collision_dist(hw); + hw->mac.ops.config_collision_dist(hw); ret_val = e1000_commit_fc_settings_generic(hw); if (ret_val) @@ -945,18 +919,17 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) e_dbg("No signal detected\n"); } - return 0; + return ret_val; } /** - * e1000e_config_collision_dist - Configure collision distance + * e1000e_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used - * during link setup. Currently no func pointer exists and all - * implementations are handled in the generic version of this function. + * during link setup. **/ -void e1000e_config_collision_dist(struct e1000_hw *hw) +void e1000e_config_collision_dist_generic(struct e1000_hw *hw) { u32 tctl; @@ -995,7 +968,9 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) * XON frames. */ fcrtl = hw->fc.low_water; - fcrtl |= E1000_FCRTL_XONE; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + fcrth = hw->fc.high_water; } ew32(FCRTL, fcrtl); @@ -1121,8 +1096,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) return ret_val; if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { - e_dbg("Copper PHY and Auto Neg " - "has not completed.\n"); + e_dbg("Copper PHY and Auto Neg has not completed.\n"); return ret_val; } @@ -1186,11 +1160,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - e_dbg("Flow Control = FULL.\r\n"); + e_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - e_dbg("Flow Control = " - "Rx PAUSE frames only.\r\n"); + e_dbg("Flow Control = Rx PAUSE frames only.\n"); } } /* @@ -1202,11 +1175,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * 0 | 1 | 1 | 1 | e1000_fc_tx_pause */ else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && - (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && - (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && - (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - e_dbg("Flow Control = Tx PAUSE frames only.\r\n"); + e_dbg("Flow Control = Tx PAUSE frames only.\n"); } /* * For transmitting PAUSE frames ONLY. @@ -1221,14 +1194,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - e_dbg("Flow Control = Rx PAUSE frames only.\r\n"); + e_dbg("Flow Control = Rx PAUSE frames only.\n"); } else { /* * Per the IEEE spec, at this point flow control * should be disabled. */ hw->fc.current_mode = e1000_fc_none; - e_dbg("Flow Control = NONE.\r\n"); + e_dbg("Flow Control = NONE.\n"); } /* @@ -1268,7 +1241,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) * Read the status register for the current speed/duplex and store the current * speed and duplex for copper connections. **/ -s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex) +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) { u32 status; @@ -1301,7 +1275,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup * Sets the speed and duplex to gigabit full duplex (the only possible option) * for fiber/serdes links. **/ -s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex) +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, + u16 *duplex) { *speed = SPEED_1000; *duplex = FULL_DUPLEX; @@ -1423,11 +1398,11 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) } /** - * e1000e_id_led_init - + * e1000e_id_led_init_generic - * @hw: pointer to the HW structure * **/ -s32 e1000e_id_led_init(struct e1000_hw *hw) +s32 e1000e_id_led_init_generic(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; @@ -1504,11 +1479,10 @@ s32 e1000e_setup_led_generic(struct e1000_hw *hw) ledctl = er32(LEDCTL); hw->mac.ledctl_default = ledctl; /* Turn off LED0 */ - ledctl &= ~(E1000_LEDCTL_LED0_IVRT | - E1000_LEDCTL_LED0_BLINK | - E1000_LEDCTL_LED0_MODE_MASK); + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); ledctl |= (E1000_LEDCTL_MODE_LED_OFF << - E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_LEDCTL_LED0_MODE_SHIFT); ew32(LEDCTL, ledctl); } else if (hw->phy.media_type == e1000_media_type_copper) { ew32(LEDCTL, hw->mac.ledctl_mode1); @@ -1544,7 +1518,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw) if (hw->phy.media_type == e1000_media_type_fiber) { /* always blink LED0 for PCI-E fiber */ ledctl_blink = E1000_LEDCTL_LED0_BLINK | - (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); } else { /* * set the blink bit for each LED that's "on" (0x0E) @@ -1657,8 +1631,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw) ew32(CTRL, ctrl); while (timeout) { - if (!(er32(STATUS) & - E1000_STATUS_GIO_MASTER_ENABLE)) + if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) break; udelay(100); timeout--; @@ -1684,7 +1657,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); - goto out; + return; } mac->current_ifs_val = 0; @@ -1695,8 +1668,6 @@ void e1000e_reset_adaptive(struct e1000_hw *hw) mac->in_ifs_mode = false; ew32(AIT, 0); -out: - return; } /** @@ -1712,7 +1683,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) if (!mac->adaptive_ifs) { e_dbg("Not in Adaptive IFS mode!\n"); - goto out; + return; } if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { @@ -1723,7 +1694,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw) mac->current_ifs_val = mac->ifs_min_val; else mac->current_ifs_val += - mac->ifs_step_size; + mac->ifs_step_size; ew32(AIT, mac->current_ifs_val); } } @@ -1735,959 +1706,4 @@ void e1000e_update_adaptive(struct e1000_hw *hw) ew32(AIT, 0); } } -out: - return; -} - -/** - * e1000_raise_eec_clk - Raise EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Enable/Raise the EEPROM clock bit. - **/ -static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd | E1000_EECD_SK; - ew32(EECD, *eecd); - e1e_flush(); - udelay(hw->nvm.delay_usec); -} - -/** - * e1000_lower_eec_clk - Lower EEPROM clock - * @hw: pointer to the HW structure - * @eecd: pointer to the EEPROM - * - * Clear/Lower the EEPROM clock bit. - **/ -static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) -{ - *eecd = *eecd & ~E1000_EECD_SK; - ew32(EECD, *eecd); - e1e_flush(); - udelay(hw->nvm.delay_usec); -} - -/** - * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM - * @hw: pointer to the HW structure - * @data: data to send to the EEPROM - * @count: number of bits to shift out - * - * We need to shift 'count' bits out to the EEPROM. So, the value in the - * "data" parameter will be shifted out to the EEPROM one bit at a time. - * In order to do this, "data" must be broken down into bits. - **/ -static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u32 mask; - - mask = 0x01 << (count - 1); - if (nvm->type == e1000_nvm_eeprom_spi) - eecd |= E1000_EECD_DO; - - do { - eecd &= ~E1000_EECD_DI; - - if (data & mask) - eecd |= E1000_EECD_DI; - - ew32(EECD, eecd); - e1e_flush(); - - udelay(nvm->delay_usec); - - e1000_raise_eec_clk(hw, &eecd); - e1000_lower_eec_clk(hw, &eecd); - - mask >>= 1; - } while (mask); - - eecd &= ~E1000_EECD_DI; - ew32(EECD, eecd); -} - -/** - * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM - * @hw: pointer to the HW structure - * @count: number of bits to shift in - * - * In order to read a register from the EEPROM, we need to shift 'count' bits - * in from the EEPROM. Bits are "shifted in" by raising the clock input to - * the EEPROM (setting the SK bit), and then reading the value of the data out - * "DO" bit. During this "shifting in" process the data in "DI" bit should - * always be clear. - **/ -static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) -{ - u32 eecd; - u32 i; - u16 data; - - eecd = er32(EECD); - - eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); - data = 0; - - for (i = 0; i < count; i++) { - data <<= 1; - e1000_raise_eec_clk(hw, &eecd); - - eecd = er32(EECD); - - eecd &= ~E1000_EECD_DI; - if (eecd & E1000_EECD_DO) - data |= 1; - - e1000_lower_eec_clk(hw, &eecd); - } - - return data; -} - -/** - * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion - * @hw: pointer to the HW structure - * @ee_reg: EEPROM flag for polling - * - * Polls the EEPROM status bit for either read or write completion based - * upon the value of 'ee_reg'. - **/ -s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) -{ - u32 attempts = 100000; - u32 i, reg = 0; - - for (i = 0; i < attempts; i++) { - if (ee_reg == E1000_NVM_POLL_READ) - reg = er32(EERD); - else - reg = er32(EEWR); - - if (reg & E1000_NVM_RW_REG_DONE) - return 0; - - udelay(5); - } - - return -E1000_ERR_NVM; -} - -/** - * e1000e_acquire_nvm - Generic request for access to EEPROM - * @hw: pointer to the HW structure - * - * Set the EEPROM access request bit and wait for EEPROM access grant bit. - * Return successful if access grant bit set, else clear the request for - * EEPROM access and return -E1000_ERR_NVM (-1). - **/ -s32 e1000e_acquire_nvm(struct e1000_hw *hw) -{ - u32 eecd = er32(EECD); - s32 timeout = E1000_NVM_GRANT_ATTEMPTS; - - ew32(EECD, eecd | E1000_EECD_REQ); - eecd = er32(EECD); - - while (timeout) { - if (eecd & E1000_EECD_GNT) - break; - udelay(5); - eecd = er32(EECD); - timeout--; - } - - if (!timeout) { - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); - e_dbg("Could not acquire NVM grant\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000_standby_nvm - Return EEPROM to standby state - * @hw: pointer to the HW structure - * - * Return the EEPROM to a standby state. - **/ -static void e1000_standby_nvm(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - - if (nvm->type == e1000_nvm_eeprom_spi) { - /* Toggle CS to flush commands */ - eecd |= E1000_EECD_CS; - ew32(EECD, eecd); - e1e_flush(); - udelay(nvm->delay_usec); - eecd &= ~E1000_EECD_CS; - ew32(EECD, eecd); - e1e_flush(); - udelay(nvm->delay_usec); - } -} - -/** - * e1000_stop_nvm - Terminate EEPROM command - * @hw: pointer to the HW structure - * - * Terminates the current command by inverting the EEPROM's chip select pin. - **/ -static void e1000_stop_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - eecd = er32(EECD); - if (hw->nvm.type == e1000_nvm_eeprom_spi) { - /* Pull CS high */ - eecd |= E1000_EECD_CS; - e1000_lower_eec_clk(hw, &eecd); - } -} - -/** - * e1000e_release_nvm - Release exclusive access to EEPROM - * @hw: pointer to the HW structure - * - * Stop any current commands to the EEPROM and clear the EEPROM request bit. - **/ -void e1000e_release_nvm(struct e1000_hw *hw) -{ - u32 eecd; - - e1000_stop_nvm(hw); - - eecd = er32(EECD); - eecd &= ~E1000_EECD_REQ; - ew32(EECD, eecd); -} - -/** - * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write - * @hw: pointer to the HW structure - * - * Setups the EEPROM for reading and writing. - **/ -static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 eecd = er32(EECD); - u8 spi_stat_reg; - - if (nvm->type == e1000_nvm_eeprom_spi) { - u16 timeout = NVM_MAX_RETRY_SPI; - - /* Clear SK and CS */ - eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); - ew32(EECD, eecd); - e1e_flush(); - udelay(1); - - /* - * Read "Status Register" repeatedly until the LSB is cleared. - * The EEPROM will signal that the command has been completed - * by clearing bit 0 of the internal status register. If it's - * not cleared within 'timeout', then error out. - */ - while (timeout) { - e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, - hw->nvm.opcode_bits); - spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); - if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) - break; - - udelay(5); - e1000_standby_nvm(hw); - timeout--; - } - - if (!timeout) { - e_dbg("SPI NVM Status error\n"); - return -E1000_ERR_NVM; - } - } - - return 0; -} - -/** - * e1000e_read_nvm_eerd - Reads EEPROM using EERD register - * @hw: pointer to the HW structure - * @offset: offset of word in the EEPROM to read - * @words: number of words to read - * @data: word read from the EEPROM - * - * Reads a 16 bit word from the EEPROM using the EERD register. - **/ -s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - u32 i, eerd = 0; - s32 ret_val = 0; - - /* - * A check for invalid values: offset too large, too many words, - * too many words for the offset, and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - for (i = 0; i < words; i++) { - eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + - E1000_NVM_RW_REG_START; - - ew32(EERD, eerd); - ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); - if (ret_val) - break; - - data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); - } - - return ret_val; -} - -/** - * e1000e_write_nvm_spi - Write to EEPROM using SPI - * @hw: pointer to the HW structure - * @offset: offset within the EEPROM to be written to - * @words: number of words to write - * @data: 16 bit word(s) to be written to the EEPROM - * - * Writes data to EEPROM at offset using SPI interface. - * - * If e1000e_update_nvm_checksum is not called after this function , the - * EEPROM will most likely contain an invalid checksum. - **/ -s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) -{ - struct e1000_nvm_info *nvm = &hw->nvm; - s32 ret_val; - u16 widx = 0; - - /* - * A check for invalid values: offset too large, too many words, - * and not enough words. - */ - if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || - (words == 0)) { - e_dbg("nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; - } - - ret_val = nvm->ops.acquire(hw); - if (ret_val) - return ret_val; - - while (widx < words) { - u8 write_opcode = NVM_WRITE_OPCODE_SPI; - - ret_val = e1000_ready_nvm_eeprom(hw); - if (ret_val) { - nvm->ops.release(hw); - return ret_val; - } - - e1000_standby_nvm(hw); - - /* Send the WRITE ENABLE command (8 bit opcode) */ - e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, - nvm->opcode_bits); - - e1000_standby_nvm(hw); - - /* - * Some SPI eeproms use the 8th address bit embedded in the - * opcode - */ - if ((nvm->address_bits == 8) && (offset >= 128)) - write_opcode |= NVM_A8_OPCODE_SPI; - - /* Send the Write command (8-bit opcode + addr) */ - e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); - e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), - nvm->address_bits); - - /* Loop to allow for up to whole page write of eeprom */ - while (widx < words) { - u16 word_out = data[widx]; - word_out = (word_out >> 8) | (word_out << 8); - e1000_shift_out_eec_bits(hw, word_out, 16); - widx++; - - if ((((offset + widx) * 2) % nvm->page_size) == 0) { - e1000_standby_nvm(hw); - break; - } - } - } - - usleep_range(10000, 20000); - nvm->ops.release(hw); - return 0; -} - -/** - * e1000_read_pba_string_generic - Read device part number - * @hw: pointer to the HW structure - * @pba_num: pointer to device part number - * @pba_num_size: size of part number buffer - * - * Reads the product board assembly (PBA) number from the EEPROM and stores - * the value in pba_num. - **/ -s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, - u32 pba_num_size) -{ - s32 ret_val; - u16 nvm_data; - u16 pba_ptr; - u16 offset; - u16 length; - - if (pba_num == NULL) { - e_dbg("PBA string buffer was null\n"); - ret_val = E1000_ERR_INVALID_ARGUMENT; - goto out; - } - - ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - /* - * if nvm_data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (nvm_data != NVM_PBA_PTR_GUARD) { - e_dbg("NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - e_dbg("PBA string buffer too small\n"); - return E1000_ERR_NO_SPACE; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (nvm_data >> 12) & 0xF; - pba_num[1] = (nvm_data >> 8) & 0xF; - pba_num[2] = (nvm_data >> 4) & 0xF; - pba_num[3] = nvm_data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - goto out; - } - - ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - - if (length == 0xFFFF || length == 0) { - e_dbg("NVM PBA number section invalid length\n"); - ret_val = E1000_ERR_NVM_PBA_SECTION; - goto out; - } - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - e_dbg("PBA string buffer too small\n"); - ret_val = E1000_ERR_NO_SPACE; - goto out; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - goto out; - } - pba_num[offset * 2] = (u8)(nvm_data >> 8); - pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); - } - pba_num[offset * 2] = '\0'; - -out: - return ret_val; -} - -/** - * e1000_read_mac_addr_generic - Read device MAC address - * @hw: pointer to the HW structure - * - * Reads the device MAC address from the EEPROM and stores the value. - * Since devices with two ports use the same EEPROM, we increment the - * last bit in the MAC address for the second port. - **/ -s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) -{ - u32 rar_high; - u32 rar_low; - u16 i; - - rar_high = er32(RAH(0)); - rar_low = er32(RAL(0)); - - for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); - - for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) - hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); - - for (i = 0; i < ETH_ALEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; - - return 0; -} - -/** - * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum - * @hw: pointer to the HW structure - * - * Calculates the EEPROM checksum by reading/adding each word of the EEPROM - * and then verifies that the sum of the EEPROM is equal to 0xBABA. - **/ -s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { - ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error\n"); - return ret_val; - } - checksum += nvm_data; - } - - if (checksum != (u16) NVM_SUM) { - e_dbg("NVM Checksum Invalid\n"); - return -E1000_ERR_NVM; - } - - return 0; -} - -/** - * e1000e_update_nvm_checksum_generic - Update EEPROM checksum - * @hw: pointer to the HW structure - * - * Updates the EEPROM checksum by reading/adding each word of the EEPROM - * up to the checksum. Then calculates the EEPROM checksum and writes the - * value to the EEPROM. - **/ -s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) -{ - s32 ret_val; - u16 checksum = 0; - u16 i, nvm_data; - - for (i = 0; i < NVM_CHECKSUM_REG; i++) { - ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); - if (ret_val) { - e_dbg("NVM Read Error while updating checksum.\n"); - return ret_val; - } - checksum += nvm_data; - } - checksum = (u16) NVM_SUM - checksum; - ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); - if (ret_val) - e_dbg("NVM Write Error while updating checksum.\n"); - - return ret_val; -} - -/** - * e1000e_reload_nvm - Reloads EEPROM - * @hw: pointer to the HW structure - * - * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the - * extended control register. - **/ -void e1000e_reload_nvm(struct e1000_hw *hw) -{ - u32 ctrl_ext; - - udelay(10); - ctrl_ext = er32(CTRL_EXT); - ctrl_ext |= E1000_CTRL_EXT_EE_RST; - ew32(CTRL_EXT, ctrl_ext); - e1e_flush(); -} - -/** - * e1000_calculate_checksum - Calculate checksum for buffer - * @buffer: pointer to EEPROM - * @length: size of EEPROM to calculate a checksum for - * - * Calculates the checksum for some buffer on a specified length. The - * checksum calculated is returned. - **/ -static u8 e1000_calculate_checksum(u8 *buffer, u32 length) -{ - u32 i; - u8 sum = 0; - - if (!buffer) - return 0; - - for (i = 0; i < length; i++) - sum += buffer[i]; - - return (u8) (0 - sum); -} - -/** - * e1000_mng_enable_host_if - Checks host interface is enabled - * @hw: pointer to the HW structure - * - * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND - * - * This function checks whether the HOST IF is enabled for command operation - * and also checks whether the previous command is completed. It busy waits - * in case of previous command is not completed. - **/ -static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) -{ - u32 hicr; - u8 i; - - if (!(hw->mac.arc_subsystem_valid)) { - e_dbg("ARC subsystem not valid.\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - - /* Check that the host interface is enabled. */ - hicr = er32(HICR); - if ((hicr & E1000_HICR_EN) == 0) { - e_dbg("E1000_HOST_EN bit disabled.\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - /* check the previous command is completed */ - for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { - hicr = er32(HICR); - if (!(hicr & E1000_HICR_C)) - break; - mdelay(1); - } - - if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { - e_dbg("Previous command timeout failed .\n"); - return -E1000_ERR_HOST_INTERFACE_COMMAND; - } - - return 0; -} - -/** - * e1000e_check_mng_mode_generic - check management mode - * @hw: pointer to the HW structure - * - * Reads the firmware semaphore register and returns true (>0) if - * manageability is enabled, else false (0). - **/ -bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) -{ - u32 fwsm = er32(FWSM); - - return (fwsm & E1000_FWSM_MODE_MASK) == - (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); -} - -/** - * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx - * @hw: pointer to the HW structure - * - * Enables packet filtering on transmit packets if manageability is enabled - * and host interface is enabled. - **/ -bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) -{ - struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; - u32 *buffer = (u32 *)&hw->mng_cookie; - u32 offset; - s32 ret_val, hdr_csum, csum; - u8 i, len; - - hw->mac.tx_pkt_filtering = true; - - /* No manageability, no filtering */ - if (!e1000e_check_mng_mode(hw)) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - - /* - * If we can't read from the host interface for whatever - * reason, disable filtering. - */ - ret_val = e1000_mng_enable_host_if(hw); - if (ret_val) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - - /* Read in the header. Length and offset are in dwords. */ - len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; - offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; - for (i = 0; i < len; i++) - *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i); - hdr_csum = hdr->checksum; - hdr->checksum = 0; - csum = e1000_calculate_checksum((u8 *)hdr, - E1000_MNG_DHCP_COOKIE_LENGTH); - /* - * If either the checksums or signature don't match, then - * the cookie area isn't considered valid, in which case we - * take the safe route of assuming Tx filtering is enabled. - */ - if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { - hw->mac.tx_pkt_filtering = true; - goto out; - } - - /* Cookie area is valid, make the final check for filtering. */ - if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { - hw->mac.tx_pkt_filtering = false; - goto out; - } - -out: - return hw->mac.tx_pkt_filtering; -} - -/** - * e1000_mng_write_cmd_header - Writes manageability command header - * @hw: pointer to the HW structure - * @hdr: pointer to the host interface command header - * - * Writes the command header after does the checksum calculation. - **/ -static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, - struct e1000_host_mng_command_header *hdr) -{ - u16 i, length = sizeof(struct e1000_host_mng_command_header); - - /* Write the whole command header structure with new checksum. */ - - hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); - - length >>= 2; - /* Write the relevant command block into the ram area. */ - for (i = 0; i < length; i++) { - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, - *((u32 *) hdr + i)); - e1e_flush(); - } - - return 0; -} - -/** - * e1000_mng_host_if_write - Write to the manageability host interface - * @hw: pointer to the HW structure - * @buffer: pointer to the host interface buffer - * @length: size of the buffer - * @offset: location in the buffer to write to - * @sum: sum of the data (not checksum) - * - * This function writes the buffer content at the offset given on the host if. - * It also does alignment considerations to do the writes in most efficient - * way. Also fills up the sum of the buffer in *buffer parameter. - **/ -static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, - u16 length, u16 offset, u8 *sum) -{ - u8 *tmp; - u8 *bufptr = buffer; - u32 data = 0; - u16 remaining, i, j, prev_bytes; - - /* sum = only sum of the data and it is not checksum */ - - if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) - return -E1000_ERR_PARAM; - - tmp = (u8 *)&data; - prev_bytes = offset & 0x3; - offset >>= 2; - - if (prev_bytes) { - data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); - for (j = prev_bytes; j < sizeof(u32); j++) { - *(tmp + j) = *bufptr++; - *sum += *(tmp + j); - } - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); - length -= j - prev_bytes; - offset++; - } - - remaining = length & 0x3; - length -= remaining; - - /* Calculate length in DWORDs */ - length >>= 2; - - /* - * The device driver writes the relevant command block into the - * ram area. - */ - for (i = 0; i < length; i++) { - for (j = 0; j < sizeof(u32); j++) { - *(tmp + j) = *bufptr++; - *sum += *(tmp + j); - } - - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); - } - if (remaining) { - for (j = 0; j < sizeof(u32); j++) { - if (j < remaining) - *(tmp + j) = *bufptr++; - else - *(tmp + j) = 0; - - *sum += *(tmp + j); - } - E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); - } - - return 0; -} - -/** - * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface - * @hw: pointer to the HW structure - * @buffer: pointer to the host interface - * @length: size of the buffer - * - * Writes the DHCP information to the host interface. - **/ -s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) -{ - struct e1000_host_mng_command_header hdr; - s32 ret_val; - u32 hicr; - - hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; - hdr.command_length = length; - hdr.reserved1 = 0; - hdr.reserved2 = 0; - hdr.checksum = 0; - - /* Enable the host interface */ - ret_val = e1000_mng_enable_host_if(hw); - if (ret_val) - return ret_val; - - /* Populate the host interface with the contents of "buffer". */ - ret_val = e1000_mng_host_if_write(hw, buffer, length, - sizeof(hdr), &(hdr.checksum)); - if (ret_val) - return ret_val; - - /* Write the manageability command header */ - ret_val = e1000_mng_write_cmd_header(hw, &hdr); - if (ret_val) - return ret_val; - - /* Tell the ARC a new command is pending. */ - hicr = er32(HICR); - ew32(HICR, hicr | E1000_HICR_C); - - return 0; -} - -/** - * e1000e_enable_mng_pass_thru - Check if management passthrough is needed - * @hw: pointer to the HW structure - * - * Verifies the hardware needs to leave interface enabled so that frames can - * be directed to and from the management interface. - **/ -bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) -{ - u32 manc; - u32 fwsm, factps; - bool ret_val = false; - - manc = er32(MANC); - - if (!(manc & E1000_MANC_RCV_TCO_EN)) - goto out; - - if (hw->mac.has_fwsm) { - fwsm = er32(FWSM); - factps = er32(FACTPS); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((fwsm & E1000_FWSM_MODE_MASK) == - (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { - ret_val = true; - goto out; - } - } else if ((hw->mac.type == e1000_82574) || - (hw->mac.type == e1000_82583)) { - u16 data; - - factps = er32(FACTPS); - e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); - - if (!(factps & E1000_FACTPS_MNGCG) && - ((data & E1000_NVM_INIT_CTRL2_MNGM) == - (e1000_mng_mode_pt << 13))) { - ret_val = true; - goto out; - } - } else if ((manc & E1000_MANC_SMBUS_EN) && - !(manc & E1000_MANC_ASF_EN)) { - ret_val = true; - goto out; - } - -out: - return ret_val; } diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c new file mode 100644 index 00000000000..473f8e71151 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -0,0 +1,367 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!hw->mac.arc_subsystem_valid) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if ((hicr & E1000_HICR_EN) == 0) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + + factps = er32(FACTPS); + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3911401ed65..7152eb11b7b 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION +#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -137,7 +137,7 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = { {E1000_TDFPC, "TDFPC"}, /* List Terminator */ - {} + {0, NULL} }; /* @@ -183,18 +183,18 @@ static void e1000e_dump(struct e1000_adapter *adapter) struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc; struct my_u0 { - u64 a; - u64 b; + __le64 a; + __le64 b; } *u0; struct e1000_buffer *buffer_info; struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc_ps; union e1000_rx_desc_extended *rx_desc; struct my_u1 { - u64 a; - u64 b; - u64 c; - u64 d; + __le64 a; + __le64 b; + __le64 c; + __le64 d; } *u1; u32 staterr; int i = 0; @@ -221,7 +221,7 @@ static void e1000e_dump(struct e1000_adapter *adapter) /* Print Tx Ring Summary */ if (!netdev || !netif_running(netdev)) - goto exit; + return; dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); @@ -308,7 +308,7 @@ rx_ring_summary: /* Print Rx Ring */ if (!netif_msg_rx_status(adapter)) - goto exit; + return; dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); switch (adapter->rx_ps_pages) { @@ -449,9 +449,6 @@ rx_ring_summary: } } } - -exit: - return; } /** @@ -487,22 +484,27 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, /** * e1000_rx_checksum - Receive Checksum Offload - * @adapter: board private structure - * @status_err: receive descriptor status and error fields - * @csum: receive descriptor csum field - * @sk_buff: socket buffer with received data + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data **/ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, - u32 csum, struct sk_buff *skb) + __le16 csum, struct sk_buff *skb) { u16 status = (u16)status_err; u8 errors = (u8)(status_err >> 24); skb_checksum_none_assert(skb); + /* Rx checksum disabled */ + if (!(adapter->netdev->features & NETIF_F_RXCSUM)) + return; + /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; + /* TCP/UDP checksum error bit is set */ if (errors & E1000_RXD_ERR_TCPE) { /* let the stack verify checksum errors */ @@ -524,7 +526,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, * Hardware complements the payload checksum, so we undo it * and then put the value in host order for further stack use. */ - __sum16 sum = (__force __sum16)htons(csum); + __sum16 sum = (__force __sum16)swab16((__force u16)csum); skb->csum = csum_unfold(~sum); skb->ip_summed = CHECKSUM_COMPLETE; } @@ -545,7 +547,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, * which has bit 24 set while ME is accessing Host CSR registers, wait * if it is set and try again a number of times. **/ -static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, +static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail, unsigned int i) { unsigned int j = 0; @@ -562,12 +564,12 @@ static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, return 0; } -static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) +static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) { - u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); + struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - if (e1000e_update_tail_wa(hw, tail, i)) { + if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { u32 rctl = er32(RCTL); ew32(RCTL, rctl & ~E1000_RCTL_EN); e_err("ME firmware caused invalid RDT - resetting\n"); @@ -575,12 +577,12 @@ static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) } } -static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) +static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) { - u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); + struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_hw *hw = &adapter->hw; - if (e1000e_update_tail_wa(hw, tail, i)) { + if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { u32 tctl = er32(TCTL); ew32(TCTL, tctl & ~E1000_TCTL_EN); e_err("ME firmware caused invalid TDT - resetting\n"); @@ -590,14 +592,14 @@ static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) /** * e1000_alloc_rx_buffers - Replace used receive buffers - * @adapter: address of board private structure + * @rx_ring: Rx descriptor ring **/ -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, +static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { + struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_extended *rx_desc; struct e1000_buffer *buffer_info; struct sk_buff *skb; @@ -644,9 +646,9 @@ map_skb: */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(adapter, i); + e1000e_update_rdt_wa(rx_ring, i); else - writel(i, adapter->hw.hw_addr + rx_ring->tail); + writel(i, rx_ring->tail); } i++; if (i == rx_ring->count) @@ -659,15 +661,15 @@ map_skb: /** * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @adapter: address of board private structure + * @rx_ring: Rx descriptor ring **/ -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, +static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { + struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_packet_split *rx_desc; - struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct sk_buff *skb; @@ -747,10 +749,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(adapter, i << 1); + e1000e_update_rdt_wa(rx_ring, i << 1); else - writel(i << 1, - adapter->hw.hw_addr + rx_ring->tail); + writel(i << 1, rx_ring->tail); } i++; @@ -765,17 +766,17 @@ no_buffers: /** * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers - * @adapter: address of board private structure + * @rx_ring: Rx descriptor ring * @cleaned_count: number of buffers to allocate this pass **/ -static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, +static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, int cleaned_count, gfp_t gfp) { + struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_extended *rx_desc; - struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned int i; @@ -834,26 +835,33 @@ check_page: * such as IA-64). */ wmb(); if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_rdt_wa(adapter, i); + e1000e_update_rdt_wa(rx_ring, i); else - writel(i, adapter->hw.hw_addr + rx_ring->tail); + writel(i, rx_ring->tail); } } +static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, + struct sk_buff *skb) +{ + if (netdev->features & NETIF_F_RXHASH) + skb->rxhash = le32_to_cpu(rss); +} + /** - * e1000_clean_rx_irq - Send received data up the network stack; legacy - * @adapter: board private structure + * e1000_clean_rx_irq - Send received data up the network stack + * @rx_ring: Rx descriptor ring * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ -static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, - int *work_done, int work_to_do) +static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) { + struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; u32 length, staterr; @@ -918,15 +926,24 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, goto next_desc; } - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { /* recycle */ buffer_info->skb = skb; goto next_desc; } /* adjust length to remove Ethernet CRC */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - length -= 4; + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + /* If configured to store CRC, don't subtract FCS, + * but keep the FCS bytes out of the total_rx_bytes + * counter + */ + if (netdev->features & NETIF_F_RXFCS) + total_rx_bytes -= 4; + else + length -= 4; + } total_rx_bytes += length; total_rx_packets++; @@ -957,8 +974,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, /* Receive Checksum Offload */ e1000_rx_checksum(adapter, staterr, - le16_to_cpu(rx_desc->wb.lower.hi_dword. - csum_ip.csum), skb); + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); e1000_receive_skb(adapter, netdev, skb, staterr, rx_desc->wb.upper.vlan); @@ -968,7 +986,7 @@ next_desc: /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { - adapter->alloc_rx_buf(adapter, cleaned_count, + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } @@ -983,16 +1001,18 @@ next_desc: cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; return cleaned; } -static void e1000_put_txbuf(struct e1000_adapter *adapter, - struct e1000_buffer *buffer_info) +static void e1000_put_txbuf(struct e1000_ring *tx_ring, + struct e1000_buffer *buffer_info) { + struct e1000_adapter *adapter = tx_ring->adapter; + if (buffer_info->dma) { if (buffer_info->mapped_as_page) dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, @@ -1063,8 +1083,8 @@ static void e1000_print_hw_hang(struct work_struct *work) "PHY 1000BASE-T Status <%x>\n" "PHY Extended Status <%x>\n" "PCI Status <%x>\n", - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), + readl(tx_ring->head), + readl(tx_ring->tail), tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, @@ -1080,16 +1100,16 @@ static void e1000_print_hw_hang(struct work_struct *work) /** * e1000_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure + * @tx_ring: Tx descriptor ring * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ -static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) +static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) { + struct e1000_adapter *adapter = tx_ring->adapter; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; - struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_tx_desc *tx_desc, *eop_desc; struct e1000_buffer *buffer_info; unsigned int i, eop; @@ -1119,7 +1139,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) } } - e1000_put_txbuf(adapter, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info); tx_desc->upper.data = 0; i++; @@ -1173,19 +1193,19 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) /** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split - * @adapter: board private structure + * @rx_ring: Rx descriptor ring * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ -static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, - int *work_done, int work_to_do) +static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) { + struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_hw *hw = &adapter->hw; union e1000_rx_desc_packet_split *rx_desc, *next_rxd; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; struct e1000_buffer *buffer_info, *next_buffer; struct e1000_ps_page *ps_page; struct sk_buff *skb; @@ -1236,7 +1256,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, goto next_desc; } - if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { dev_kfree_skb_irq(skb); goto next_desc; } @@ -1253,43 +1274,50 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, skb_put(skb, length); { - /* - * this looks ugly, but it seems compiler issues make it - * more efficient than reusing j - */ - int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); - - /* - * page alloc/put takes too long and effects small packet - * throughput, so unsplit small packets and save the alloc/put - * only valid in softirq (napi) context to call kmap_* - */ - if (l1 && (l1 <= copybreak) && - ((length + l1) <= adapter->rx_ps_bsize0)) { - u8 *vaddr; - - ps_page = &buffer_info->ps_pages[0]; + /* + * this looks ugly, but it seems compiler issues make + * it more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); /* - * there is no documentation about how to call - * kmap_atomic, so we can't hold the mapping - * very long + * page alloc/put takes too long and effects small + * packet throughput, so unsplit small packets and + * save the alloc/put only valid in softirq (napi) + * context to call kmap_* */ - dma_sync_single_for_cpu(&pdev->dev, ps_page->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); - memcpy(skb_tail_pointer(skb), vaddr, l1); - kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); - dma_sync_single_for_device(&pdev->dev, ps_page->dma, - PAGE_SIZE, DMA_FROM_DEVICE); - - /* remove the CRC */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - l1 -= 4; - - skb_put(skb, l1); - goto copydone; - } /* if */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* + * there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr); + dma_sync_single_for_device(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + l1 -= 4; + } + + skb_put(skb, l1); + goto copydone; + } /* if */ } for (j = 0; j < PS_PAGE_BUFFERS; j++) { @@ -1311,15 +1339,19 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, /* strip the ethernet crc, problem is we're using pages now so * this whole operation can get a little cpu intensive */ - if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) - pskb_trim(skb, skb->len - 4); + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + pskb_trim(skb, skb->len - 4); + } copydone: total_rx_bytes += skb->len; total_rx_packets++; - e1000_rx_checksum(adapter, staterr, le16_to_cpu( - rx_desc->wb.lower.hi_dword.csum_ip.csum), skb); + e1000_rx_checksum(adapter, staterr, + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); if (rx_desc->wb.upper.header_status & cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) @@ -1334,7 +1366,7 @@ next_desc: /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= E1000_RX_BUFFER_WRITE) { - adapter->alloc_rx_buf(adapter, cleaned_count, + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } @@ -1349,7 +1381,7 @@ next_desc: cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; @@ -1375,13 +1407,12 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/ - -static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, - int *work_done, int work_to_do) +static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) { + struct e1000_adapter *adapter = rx_ring->adapter; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; union e1000_rx_desc_extended *rx_desc, *next_rxd; struct e1000_buffer *buffer_info, *next_buffer; u32 length, staterr; @@ -1424,7 +1455,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* errors is only valid for DD + EOP descriptors */ if (unlikely((staterr & E1000_RXD_STAT_EOP) && - (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK))) { + ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL)))) { /* recycle both page and skb */ buffer_info->skb = skb; /* an error means any chain goes out the window too */ @@ -1470,12 +1502,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (length <= copybreak && skb_tailroom(skb) >= length) { u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page, - KM_SKB_DATA_SOFTIRQ); + vaddr = kmap_atomic(buffer_info->page); memcpy(skb_tail_pointer(skb), vaddr, length); - kunmap_atomic(vaddr, - KM_SKB_DATA_SOFTIRQ); + kunmap_atomic(vaddr); /* re-use the page, so don't erase * buffer_info->page */ skb_put(skb, length); @@ -1491,8 +1521,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* Receive Checksum Offload XXX recompute due to CRC strip? */ e1000_rx_checksum(adapter, staterr, - le16_to_cpu(rx_desc->wb.lower.hi_dword. - csum_ip.csum), skb); + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1513,7 +1544,7 @@ next_desc: /* return some buffers to hardware, one at a time is too slow */ if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { - adapter->alloc_rx_buf(adapter, cleaned_count, + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); cleaned_count = 0; } @@ -1528,7 +1559,7 @@ next_desc: cleaned_count = e1000_desc_unused(rx_ring); if (cleaned_count) - adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC); + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; @@ -1537,11 +1568,11 @@ next_desc: /** * e1000_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure + * @rx_ring: Rx descriptor ring **/ -static void e1000_clean_rx_ring(struct e1000_adapter *adapter) +static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) { - struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct pci_dev *pdev = adapter->pdev; @@ -1601,8 +1632,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) rx_ring->next_to_use = 0; adapter->flags2 &= ~FLAG2_IS_DISCARDING; - writel(0, adapter->hw.hw_addr + rx_ring->head); - writel(0, adapter->hw.hw_addr + rx_ring->tail); + writel(0, rx_ring->head); + writel(0, rx_ring->tail); } static void e1000e_downshift_workaround(struct work_struct *work) @@ -1633,7 +1664,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) */ if (icr & E1000_ICR_LSC) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* * ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers @@ -1699,7 +1730,7 @@ static irqreturn_t e1000_intr(int irq, void *data) */ if (icr & E1000_ICR_LSC) { - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* * ICH8 workaround-- Call gig speed drop workaround on cable * disconnect (LSC) before accessing any PHY registers @@ -1756,7 +1787,7 @@ static irqreturn_t e1000_msix_other(int irq, void *data) if (icr & E1000_ICR_OTHER) { if (!(icr & E1000_ICR_LSC)) goto no_link_interrupt; - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; /* guard against interrupt when we're going down */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); @@ -1781,7 +1812,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data) adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; - if (!e1000_clean_tx_irq(adapter)) + if (!e1000_clean_tx_irq(tx_ring)) /* Ring was not completely cleaned, so fire another interrupt */ ew32(ICS, tx_ring->ims_val); @@ -1792,14 +1823,15 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *rx_ring = adapter->rx_ring; /* Write the ITR value calculated at the end of the * previous interrupt. */ - if (adapter->rx_ring->set_itr) { - writel(1000000000 / (adapter->rx_ring->itr_val * 256), - adapter->hw.hw_addr + adapter->rx_ring->itr_register); - adapter->rx_ring->set_itr = 0; + if (rx_ring->set_itr) { + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + rx_ring->set_itr = 0; } if (napi_schedule_prep(&adapter->napi)) { @@ -1839,9 +1871,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) adapter->eiac_mask |= rx_ring->ims_val; if (rx_ring->itr_val) writel(1000000000 / (rx_ring->itr_val * 256), - hw->hw_addr + rx_ring->itr_register); + rx_ring->itr_register); else - writel(1, hw->hw_addr + rx_ring->itr_register); + writel(1, rx_ring->itr_register); ivar = E1000_IVAR_INT_ALLOC_VALID | vector; /* Configure Tx vector */ @@ -1849,9 +1881,9 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) vector++; if (tx_ring->itr_val) writel(1000000000 / (tx_ring->itr_val * 256), - hw->hw_addr + tx_ring->itr_register); + tx_ring->itr_register); else - writel(1, hw->hw_addr + tx_ring->itr_register); + writel(1, tx_ring->itr_register); adapter->eiac_mask |= tx_ring->ims_val; ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); @@ -1965,8 +1997,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter) e1000_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) - goto out; - adapter->rx_ring->itr_register = E1000_EITR_82574(vector); + return err; + adapter->rx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); adapter->rx_ring->itr_val = adapter->itr; vector++; @@ -1980,20 +2013,20 @@ static int e1000_request_msix(struct e1000_adapter *adapter) e1000_intr_msix_tx, 0, adapter->tx_ring->name, netdev); if (err) - goto out; - adapter->tx_ring->itr_register = E1000_EITR_82574(vector); + return err; + adapter->tx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); adapter->tx_ring->itr_val = adapter->itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, e1000_msix_other, 0, netdev->name, netdev); if (err) - goto out; + return err; e1000_configure_msix(adapter); + return 0; -out: - return err; } /** @@ -2162,13 +2195,13 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, /** * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure + * @tx_ring: Tx descriptor ring * * Return 0 on success, negative on failure **/ -int e1000e_setup_tx_resources(struct e1000_adapter *adapter) +int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) { - struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_adapter *adapter = tx_ring->adapter; int err = -ENOMEM, size; size = sizeof(struct e1000_buffer) * tx_ring->count; @@ -2196,13 +2229,13 @@ err: /** * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure + * @rx_ring: Rx descriptor ring * * Returns 0 on success, negative on failure **/ -int e1000e_setup_rx_resources(struct e1000_adapter *adapter) +int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) { - struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_adapter *adapter = rx_ring->adapter; struct e1000_buffer *buffer_info; int i, size, desc_len, err = -ENOMEM; @@ -2249,18 +2282,18 @@ err: /** * e1000_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure + * @tx_ring: Tx descriptor ring **/ -static void e1000_clean_tx_ring(struct e1000_adapter *adapter) +static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) { - struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(adapter, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info); } netdev_reset_queue(adapter->netdev); @@ -2272,22 +2305,22 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - writel(0, adapter->hw.hw_addr + tx_ring->head); - writel(0, adapter->hw.hw_addr + tx_ring->tail); + writel(0, tx_ring->head); + writel(0, tx_ring->tail); } /** * e1000e_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure + * @tx_ring: Tx descriptor ring * * Free all transmit software resources **/ -void e1000e_free_tx_resources(struct e1000_adapter *adapter) +void e1000e_free_tx_resources(struct e1000_ring *tx_ring) { + struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *tx_ring = adapter->tx_ring; - e1000_clean_tx_ring(adapter); + e1000_clean_tx_ring(tx_ring); vfree(tx_ring->buffer_info); tx_ring->buffer_info = NULL; @@ -2299,18 +2332,17 @@ void e1000e_free_tx_resources(struct e1000_adapter *adapter) /** * e1000e_free_rx_resources - Free Rx Resources - * @adapter: board private structure + * @rx_ring: Rx descriptor ring * * Free all receive software resources **/ - -void e1000e_free_rx_resources(struct e1000_adapter *adapter) +void e1000e_free_rx_resources(struct e1000_ring *rx_ring) { + struct e1000_adapter *adapter = rx_ring->adapter; struct pci_dev *pdev = adapter->pdev; - struct e1000_ring *rx_ring = adapter->rx_ring; int i; - e1000_clean_rx_ring(adapter); + e1000_clean_rx_ring(rx_ring); for (i = 0; i < rx_ring->count; i++) kfree(rx_ring->buffer_info[i].ps_pages); @@ -2346,7 +2378,7 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, unsigned int retval = itr_setting; if (packets == 0) - goto update_itr_done; + return itr_setting; switch (itr_setting) { case lowest_latency: @@ -2381,7 +2413,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter, break; } -update_itr_done: return retval; } @@ -2464,13 +2495,19 @@ set_itr_now: **/ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) { - adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + int size = sizeof(struct e1000_ring); + + adapter->tx_ring = kzalloc(size, GFP_KERNEL); if (!adapter->tx_ring) goto err; + adapter->tx_ring->count = adapter->tx_ring_count; + adapter->tx_ring->adapter = adapter; - adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + adapter->rx_ring = kzalloc(size, GFP_KERNEL); if (!adapter->rx_ring) goto err; + adapter->rx_ring->count = adapter->rx_ring_count; + adapter->rx_ring->adapter = adapter; return 0; err: @@ -2498,10 +2535,10 @@ static int e1000_clean(struct napi_struct *napi, int budget) !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) goto clean_rx; - tx_cleaned = e1000_clean_tx_irq(adapter); + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); clean_rx: - adapter->clean_rx(adapter, &work_done, budget); + adapter->clean_rx(adapter->rx_ring, &work_done, budget); if (!tx_cleaned) work_done = budget; @@ -2746,8 +2783,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_ring *tx_ring = adapter->tx_ring; u64 tdba; - u32 tdlen, tctl, tipg, tarc; - u32 ipgr1, ipgr2; + u32 tdlen, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ tdba = tx_ring->dma; @@ -2757,20 +2793,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(TDLEN, tdlen); ew32(TDH, 0); ew32(TDT, 0); - tx_ring->head = E1000_TDH; - tx_ring->tail = E1000_TDT; - - /* Set the default values for the Tx Inter Packet Gap timer */ - tipg = DEFAULT_82543_TIPG_IPGT_COPPER; /* 8 */ - ipgr1 = DEFAULT_82543_TIPG_IPGR1; /* 8 */ - ipgr2 = DEFAULT_82543_TIPG_IPGR2; /* 6 */ - - if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN) - ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /* 7 */ - - tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; - tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; - ew32(TIPG, tipg); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH; + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; /* Set the Tx Interrupt Delay register */ ew32(TIDV, adapter->tx_int_delay); @@ -2793,15 +2817,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) */ txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; ew32(TXDCTL(0), txdctl); - /* erratum work around: set txdctl the same for both queues */ - ew32(TXDCTL(1), txdctl); } - - /* Program the Transmit Control Register */ - tctl = er32(TCTL); - tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | - (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), er32(TXDCTL(0))); if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { tarc = er32(TARC(0)); @@ -2834,9 +2852,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* enable Report Status bit */ adapter->txd_cmd |= E1000_TXD_CMD_RS; - ew32(TCTL, tctl); - - e1000e_config_collision_dist(hw); + hw->mac.ops.config_collision_dist(hw); } /** @@ -2944,8 +2960,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) * per packet. */ pages = PAGE_USE_COUNT(adapter->netdev->mtu); - if (!(adapter->flags & FLAG_HAS_ERT) && (pages <= 3) && - (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) adapter->rx_ps_pages = pages; else adapter->rx_ps_pages = 0; @@ -2982,6 +2997,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) ew32(PSRCTL, psrctl); } + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + ew32(RFCTL, rfctl); ew32(RCTL, rctl); /* just started the receive unit, no need to restart */ @@ -3072,8 +3103,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) ew32(RDLEN, rdlen); ew32(RDH, 0); ew32(RDT, 0); - rx_ring->head = E1000_RDH; - rx_ring->tail = E1000_RDT; + rx_ring->head = adapter->hw.hw_addr + E1000_RDH; + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); @@ -3092,23 +3123,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) } ew32(RXCSUM, rxcsum); - /* - * Enable early receives on supported devices, only takes effect when - * packet size is equal or larger than the specified value (in 8 byte - * units), e.g. using jumbo frames when setting to E1000_ERT_2048 - */ - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) { + if (adapter->hw.mac.type == e1000_pch2lan) { + /* + * With jumbo frames, excessive C-state transition + * latencies result in dropped transactions. + */ if (adapter->netdev->mtu > ETH_DATA_LEN) { u32 rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl | 0x3); - if (adapter->flags & FLAG_HAS_ERT) - ew32(ERT, E1000_ERT_2048 | (1 << 13)); - /* - * With jumbo frames and early-receive enabled, - * excessive C-state transition latencies result in - * dropped transactions. - */ pm_qos_update_request(&adapter->netdev->pm_qos_req, 55); } else { pm_qos_update_request(&adapter->netdev->pm_qos_req, @@ -3237,6 +3259,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev) e1000e_vlan_filter_disable(adapter); } else { int count; + if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; } else { @@ -3268,22 +3291,62 @@ static void e1000e_set_rx_mode(struct net_device *netdev) e1000e_vlan_strip_disable(adapter); } +static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + int i; + static const u32 rsskey[10] = { + 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, + 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe + }; + + /* Fill out hash function seed */ + for (i = 0; i < 10; i++) + ew32(RSSRK(i), rsskey[i]); + + /* Direct all traffic to queue 0 */ + for (i = 0; i < 32; i++) + ew32(RETA(i), 0); + + /* + * Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. + */ + rxcsum = er32(RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + ew32(RXCSUM, rxcsum); + + mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + ew32(MRQC, mrqc); +} + /** * e1000_configure - configure the hardware for Rx and Tx * @adapter: private board structure **/ static void e1000_configure(struct e1000_adapter *adapter) { + struct e1000_ring *rx_ring = adapter->rx_ring; + e1000e_set_rx_mode(adapter->netdev); e1000_restore_vlan(adapter); e1000_init_manageability_pt(adapter); e1000_configure_tx(adapter); + + if (adapter->netdev->features & NETIF_F_RXHASH) + e1000e_setup_rss_hash(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring), - GFP_KERNEL); + adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); } /** @@ -3379,9 +3442,7 @@ void e1000e_reset(struct e1000_adapter *adapter) * if short on Rx space, Rx wins and must trump Tx * adjustment or use Early Receive if available */ - if ((pba < min_rx_space) && - (!(adapter->flags & FLAG_HAS_ERT))) - /* ERT enabled in e1000_configure_rx */ + if (pba < min_rx_space) pba = min_rx_space; } @@ -3395,26 +3456,29 @@ void e1000e_reset(struct e1000_adapter *adapter) * (or the size used for early receive) above it in the Rx FIFO. * Set it to the lower of: * - 90% of the Rx FIFO size, and - * - the full Rx FIFO size minus the early receive size (for parts - * with ERT support assuming ERT set to E1000_ERT_2048), or * - the full Rx FIFO size minus one full frame */ if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) fc->pause_time = 0xFFFF; else fc->pause_time = E1000_FC_PAUSE_TIME; - fc->send_xon = 1; + fc->send_xon = true; fc->current_mode = fc->requested_mode; switch (hw->mac.type) { + case e1000_ich9lan: + case e1000_ich10lan: + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + fc->high_water = 0x2800; + fc->low_water = fc->high_water - 8; + break; + } + /* fall-through */ default: - if ((adapter->flags & FLAG_HAS_ERT) && - (adapter->netdev->mtu > ETH_DATA_LEN)) - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - (E1000_ERT_2048 << 3))); - else - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - adapter->max_frame_size)); + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; @@ -3447,11 +3511,10 @@ void e1000e_reset(struct e1000_adapter *adapter) /* * Disable Adaptive Interrupt Moderation if 2 full packets cannot - * fit in receive buffer and early-receive not supported. + * fit in receive buffer. */ if (adapter->itr_setting & 0x3) { - if (((adapter->max_frame_size * 2) > (pba << 10)) && - !(adapter->flags & FLAG_HAS_ERT)) { + if ((adapter->max_frame_size * 2) > (pba << 10)) { if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { dev_info(&adapter->pdev->dev, "Interrupt Throttle Rate turned off\n"); @@ -3593,8 +3656,8 @@ void e1000e_down(struct e1000_adapter *adapter) spin_unlock(&adapter->stats64_lock); e1000e_flush_descriptors(adapter); - e1000_clean_tx_ring(adapter); - e1000_clean_rx_ring(adapter); + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3634,6 +3697,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->rx_ps_bsize0 = 128; adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + adapter->tx_ring_count = E1000_DEFAULT_TXD; + adapter->rx_ring_count = E1000_DEFAULT_RXD; spin_lock_init(&adapter->stats64_lock); @@ -3721,8 +3786,9 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) if (adapter->flags & FLAG_MSI_TEST_FAILED) { adapter->int_mode = E1000E_INT_MODE_LEGACY; e_info("MSI interrupt test failed, using legacy interrupt.\n"); - } else + } else { e_dbg("MSI interrupt test succeeded!\n"); + } free_irq(adapter->pdev->irq, netdev); pci_disable_msi(adapter->pdev); @@ -3792,12 +3858,12 @@ static int e1000_open(struct net_device *netdev) netif_carrier_off(netdev); /* allocate transmit descriptors */ - err = e1000e_setup_tx_resources(adapter); + err = e1000e_setup_tx_resources(adapter->tx_ring); if (err) goto err_setup_tx; /* allocate receive descriptors */ - err = e1000e_setup_rx_resources(adapter); + err = e1000e_setup_rx_resources(adapter->rx_ring); if (err) goto err_setup_rx; @@ -3817,9 +3883,8 @@ static int e1000_open(struct net_device *netdev) E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) e1000_update_mng_vlan(adapter); - /* DMA latency requirement to workaround early-receive/jumbo issue */ - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) + /* DMA latency requirement to workaround jumbo issue */ + if (adapter->hw.mac.type == e1000_pch2lan) pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); @@ -3873,9 +3938,9 @@ static int e1000_open(struct net_device *netdev) err_req_irq: e1000e_release_hw_control(adapter); e1000_power_down_phy(adapter); - e1000e_free_rx_resources(adapter); + e1000e_free_rx_resources(adapter->rx_ring); err_setup_rx: - e1000e_free_tx_resources(adapter); + e1000e_free_tx_resources(adapter->tx_ring); err_setup_tx: e1000e_reset(adapter); pm_runtime_put_sync(&pdev->dev); @@ -3911,8 +3976,8 @@ static int e1000_close(struct net_device *netdev) } e1000_power_down_phy(adapter); - e1000e_free_tx_resources(adapter); - e1000e_free_rx_resources(adapter); + e1000e_free_tx_resources(adapter->tx_ring); + e1000e_free_rx_resources(adapter->rx_ring); /* * kill manageability vlan ID if supported, but not if a vlan with @@ -3930,8 +3995,7 @@ static int e1000_close(struct net_device *netdev) !test_bit(__E1000_TESTING, &adapter->state)) e1000e_release_hw_control(adapter); - if ((adapter->flags & FLAG_HAS_ERT) || - (adapter->hw.mac.type == e1000_pch2lan)) + if (adapter->hw.mac.type == e1000_pch2lan) pm_qos_remove_request(&adapter->netdev->pm_qos_req); pm_runtime_put_sync(&pdev->dev); @@ -4566,13 +4630,12 @@ link_up: #define E1000_TX_FLAGS_VLAN 0x00000002 #define E1000_TX_FLAGS_TSO 0x00000004 #define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 #define E1000_TX_FLAGS_VLAN_SHIFT 16 -static int e1000_tso(struct e1000_adapter *adapter, - struct sk_buff *skb) +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) { - struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; unsigned int i; @@ -4641,9 +4704,9 @@ static int e1000_tso(struct e1000_adapter *adapter, return 1; } -static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) { - struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_context_desc *context_desc; struct e1000_buffer *buffer_info; unsigned int i; @@ -4704,12 +4767,11 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) #define E1000_MAX_PER_TXD 8192 #define E1000_MAX_TXD_PWR 12 -static int e1000_tx_map(struct e1000_adapter *adapter, - struct sk_buff *skb, unsigned int first, - unsigned int max_per_txd, unsigned int nr_frags, - unsigned int mss) +static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, + unsigned int first, unsigned int max_per_txd, + unsigned int nr_frags, unsigned int mss) { - struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; struct e1000_buffer *buffer_info; unsigned int len = skb_headlen(skb); @@ -4795,16 +4857,15 @@ dma_error: i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(adapter, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info); } return 0; } -static void e1000_tx_queue(struct e1000_adapter *adapter, - int tx_flags, int count) +static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) { - struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_adapter *adapter = tx_ring->adapter; struct e1000_tx_desc *tx_desc = NULL; struct e1000_buffer *buffer_info; u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; @@ -4829,6 +4890,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); } + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + i = tx_ring->next_to_use; do { @@ -4846,6 +4910,10 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -4857,9 +4925,9 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, tx_ring->next_to_use = i; if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) - e1000e_update_tdt_wa(adapter, i); + e1000e_update_tdt_wa(tx_ring, i); else - writel(i, adapter->hw.hw_addr + tx_ring->tail); + writel(i, tx_ring->tail); /* * we need this if more than one processor can write to our tail @@ -4907,11 +4975,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, return 0; } -static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { - struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_adapter *adapter = tx_ring->adapter; - netif_stop_queue(netdev); + netif_stop_queue(adapter->netdev); /* * Herbert's original patch had: * smp_mb__after_netif_stop_queue(); @@ -4923,25 +4991,23 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) * We need to check again in a case another CPU has just * made room available. */ - if (e1000_desc_unused(adapter->tx_ring) < size) + if (e1000_desc_unused(tx_ring) < size) return -EBUSY; /* A reprieve! */ - netif_start_queue(netdev); + netif_start_queue(adapter->netdev); ++adapter->restart_queue; return 0; } -static int e1000_maybe_stop_tx(struct net_device *netdev, int size) +static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { - struct e1000_adapter *adapter = netdev_priv(netdev); - - if (e1000_desc_unused(adapter->tx_ring) >= size) + if (e1000_desc_unused(tx_ring) >= size) return 0; - return __e1000_maybe_stop_tx(netdev, size); + return __e1000_maybe_stop_tx(tx_ring, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -4995,7 +5061,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (skb->data_len && (hdr_len == len)) { unsigned int pull_size; - pull_size = min((unsigned int)4, skb->data_len); + pull_size = min_t(unsigned int, 4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { e_err("__pskb_pull_tail failed.\n"); dev_kfree_skb_any(skb); @@ -5024,7 +5090,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, * need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if (e1000_maybe_stop_tx(netdev, count + 2)) + if (e1000_maybe_stop_tx(tx_ring, count + 2)) return NETDEV_TX_BUSY; if (vlan_tx_tag_present(skb)) { @@ -5034,7 +5100,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = e1000_tso(adapter, skb); + tso = e1000_tso(tx_ring, skb); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -5042,7 +5108,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (tso) tx_flags |= E1000_TX_FLAGS_TSO; - else if (e1000_tx_csum(adapter, skb)) + else if (e1000_tx_csum(tx_ring, skb)) tx_flags |= E1000_TX_FLAGS_CSUM; /* @@ -5053,13 +5119,16 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IP)) tx_flags |= E1000_TX_FLAGS_IPV4; + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss); + count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); if (count) { netdev_sent_queue(netdev, skb->len); - e1000_tx_queue(adapter, tx_flags, count); + e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); + e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); } else { dev_kfree_skb_any(skb); @@ -5165,10 +5234,22 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* Jumbo frame support */ - if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && - !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { - e_err("Jumbo Frames not supported.\n"); - return -EINVAL; + if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { + if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* + * IP payload checksum (enabled with jumbos/packet-split when + * Rx checksum is enabled) and generation of RSS hash is + * mutually exclusive in the hardware. + */ + if ((netdev->features & NETIF_F_RXCSUM) && + (netdev->features & NETIF_F_RXHASH)) { + e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n"); + return -EINVAL; + } } /* Supported frame sizes */ @@ -5322,7 +5403,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); if (retval) - goto out; + goto release; /* copy MAC MTA to PHY MTA - only needed for pchlan */ for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { @@ -5366,7 +5447,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); if (retval) e_err("Could not set PHY Host Wakeup bit\n"); -out: +release: hw->phy.ops.release(hw); return retval; @@ -5908,7 +5989,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) ret_val = e1000_read_pba_string_generic(hw, pba_str, E1000_PBANUM_LENGTH); if (ret_val) - strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1); + strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); e_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, pba_str); } @@ -5923,7 +6004,8 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) return; ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); - if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) { + le16_to_cpus(&buf); + if (!ret_val && (!(buf & (1 << 0)))) { /* Deep Smart Power Down (DSPD) */ dev_warn(&adapter->pdev->dev, "Warning: detected DSPD enabled in EEPROM\n"); @@ -5931,7 +6013,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) } static int e1000_set_features(struct net_device *netdev, - netdev_features_t features) + netdev_features_t features) { struct e1000_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; @@ -5940,9 +6022,37 @@ static int e1000_set_features(struct net_device *netdev, adapter->flags |= FLAG_TSO_FORCE; if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | - NETIF_F_RXCSUM))) + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | + NETIF_F_RXALL))) return 0; + /* + * IP payload checksum (enabled with jumbos/packet-split when Rx + * checksum is enabled) and generation of RSS hash is mutually + * exclusive in the hardware. + */ + if (adapter->rx_ps_pages && + (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) { + e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n"); + return -EINVAL; + } + + if (changed & NETIF_F_RXFCS) { + if (features & NETIF_F_RXFCS) { + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } else { + /* We need to take it back to defaults, which might mean + * stripping is still disabled at the adapter level. + */ + if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + else + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } + } + + netdev->features = features; + if (netif_running(netdev)) e1000e_reinit_locked(adapter); else @@ -5991,7 +6101,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; resource_size_t mmio_start, mmio_len; resource_size_t flash_start, flash_len; - static int cards_found; u16 aspm_disable_flag = 0; int i, err, pci_using_dac; @@ -6087,7 +6196,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, e1000e_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); - strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len; @@ -6124,7 +6233,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.phy.ms_type = e1000_ms_hw_default; } - if (e1000_check_reset_block(&adapter->hw)) + if (hw->phy.ops.check_reset_block(hw)) e_info("PHY reset is blocked due to SOL/IDER session.\n"); /* Set initial default active device features */ @@ -6133,11 +6242,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev, NETIF_F_HW_VLAN_TX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_CSUM); /* Set user-changeable features (subset of all device features) */ netdev->hw_features = netdev->features; + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) netdev->features |= NETIF_F_HW_VLAN_FILTER; @@ -6231,11 +6344,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && (adapter->hw.bus.func == 1)) - e1000_read_nvm(&adapter->hw, - NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, + 1, &eeprom_data); else - e1000_read_nvm(&adapter->hw, - NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, + 1, &eeprom_data); } /* fetch WoL from EEPROM */ @@ -6268,7 +6381,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_get_hw_control(adapter); - strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1); + strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); err = register_netdev(netdev); if (err) goto err_register; @@ -6287,7 +6400,7 @@ err_register: if (!(adapter->flags & FLAG_HAS_AMT)) e1000e_release_hw_control(adapter); err_eeprom: - if (!e1000_check_reset_block(&adapter->hw)) + if (!hw->phy.ops.check_reset_block(hw)) e1000_phy_hw_reset(&adapter->hw); err_hw_init: kfree(adapter->tx_ring); @@ -6449,7 +6562,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, - { } /* terminate list */ + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); @@ -6468,7 +6581,9 @@ static struct pci_driver e1000_driver = { .probe = e1000_probe, .remove = __devexit_p(e1000_remove), #ifdef CONFIG_PM - .driver.pm = &e1000_pm_ops, + .driver = { + .pm = &e1000_pm_ops, + }, #endif .shutdown = e1000_shutdown, .err_handler = &e1000_err_handler @@ -6485,7 +6600,7 @@ static int __init e1000_init_module(void) int ret; pr_info("Intel(R) PRO/1000 Network Driver - %s\n", e1000e_driver_version); - pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n"); + pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n"); ret = pci_register_driver(&e1000_driver); return ret; @@ -6510,4 +6625,4 @@ MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -/* e1000_main.c */ +/* netdev.c */ diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c new file mode 100644 index 00000000000..a969f1af1b4 --- /dev/null +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -0,0 +1,643 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS <linux.nics@intel.com> + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + usleep_range(10000, 20000); +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 20e93b08e7f..ff796e42c3e 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -113,11 +113,20 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define MAX_ITR 100000 #define MIN_ITR 100 -/* IntMode (Interrupt Mode) +/* + * IntMode (Interrupt Mode) + * + * Valid Range: varies depending on kernel configuration & hardware support + * + * legacy=0, MSI=1, MSI-X=2 * - * Valid Range: 0 - 2 + * When MSI/MSI-X support is enabled in kernel- + * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise + * When MSI/MSI-X support is not enabled in kernel- + * Default Value: 0 (legacy) * - * Default Value: 2 (MSI-X) + * When a mode is specified that is not allowed/supported, it will be + * demoted to the most advanced interrupt mode available. */ E1000_PARAM(IntMode, "Interrupt Mode"); #define MAX_INTMODE 2 @@ -388,12 +397,33 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) static struct e1000_option opt = { .type = range_option, .name = "Interrupt Mode", - .err = "defaulting to 2 (MSI-X)", - .def = E1000E_INT_MODE_MSIX, - .arg = { .r = { .min = MIN_INTMODE, - .max = MAX_INTMODE } } +#ifndef CONFIG_PCI_MSI + .err = "defaulting to 0 (legacy)", + .def = E1000E_INT_MODE_LEGACY, + .arg = { .r = { .min = 0, + .max = 0 } } +#endif }; +#ifdef CONFIG_PCI_MSI + if (adapter->flags & FLAG_HAS_MSIX) { + opt.err = kstrdup("defaulting to 2 (MSI-X)", + GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSIX; + opt.arg.r.max = E1000E_INT_MODE_MSIX; + } else { + opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSI; + opt.arg.r.max = E1000E_INT_MODE_MSI; + } + + if (!opt.err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate memory\n"); + return; + } +#endif + if (num_IntMode > bd) { unsigned int int_mode = IntMode[bd]; e1000_validate_option(&int_mode, &opt, adapter); @@ -401,6 +431,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) } else { adapter->int_mode = opt.def; } + +#ifdef CONFIG_PCI_MSI + kfree(opt.err); +#endif } { /* Smart Power Down */ static const struct e1000_option opt = { @@ -429,10 +463,13 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) if (num_CrcStripping > bd) { unsigned int crc_stripping = CrcStripping[bd]; e1000_validate_option(&crc_stripping, &opt, adapter); - if (crc_stripping == OPTION_ENABLED) + if (crc_stripping == OPTION_ENABLED) { adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } } else { adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; } } { /* Kumeran Lock Loss Workaround */ diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 8666476cb9b..35b45578c60 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -26,8 +26,6 @@ *******************************************************************************/ -#include <linux/delay.h> - #include "e1000.h" static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); @@ -132,30 +130,30 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) u16 phy_id; u16 retry_count = 0; - if (!(phy->ops.read_reg)) - goto out; + if (!phy->ops.read_reg) + return 0; while (retry_count < 2) { ret_val = e1e_rphy(hw, PHY_ID1, &phy_id); if (ret_val) - goto out; + return ret_val; phy->id = (u32)(phy_id << 16); udelay(20); ret_val = e1e_rphy(hw, PHY_ID2, &phy_id); if (ret_val) - goto out; + return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); if (phy->id != 0 && phy->id != PHY_REVISION_MASK) - goto out; + return 0; retry_count++; } -out: - return ret_val; + + return 0; } /** @@ -382,29 +380,25 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, s32 ret_val = 0; if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + if (!hw->phy.ops.acquire) + return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } - if (offset > MAX_PHY_MULTI_PAGE_REG) { + if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: + if (!ret_val) + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); if (!locked) hw->phy.ops.release(hw); -out: + return ret_val; } @@ -453,30 +447,25 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, s32 ret_val = 0; if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + if (!hw->phy.ops.acquire) + return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } - if (offset > MAX_PHY_MULTI_PAGE_REG) { + if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) - goto release; - } - - ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - -release: + if (!ret_val) + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); if (!locked) hw->phy.ops.release(hw); -out: return ret_val; } @@ -523,15 +512,16 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { u32 kmrnctrlsta; - s32 ret_val = 0; if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & @@ -547,8 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, if (!locked) hw->phy.ops.release(hw); -out: - return ret_val; + return 0; } /** @@ -596,15 +585,16 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { u32 kmrnctrlsta; - s32 ret_val = 0; if (!locked) { - if (!(hw->phy.ops.acquire)) - goto out; + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) - goto out; + return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & @@ -617,8 +607,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, if (!locked) hw->phy.ops.release(hw); -out: - return ret_val; + return 0; } /** @@ -663,17 +652,14 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); if (ret_val) - goto out; + return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; - ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); - -out: - return ret_val; + return e1e_wphy(hw, I82577_CFG_REG, phy_data); } /** @@ -1019,12 +1005,12 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames - * but not send pause frames). + * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames - * but we do not support receiving pause frames). + * but we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration - * in the EEPROM is used. + * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: @@ -1064,8 +1050,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) break; default: e_dbg("Flow control param set incorrectly\n"); - ret_val = -E1000_ERR_CONFIG; - return ret_val; + return -E1000_ERR_CONFIG; } ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); @@ -1136,13 +1121,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { ret_val = e1000_wait_autoneg(hw); if (ret_val) { - e_dbg("Error while waiting for " - "autoneg to complete\n"); + e_dbg("Error while waiting for autoneg to complete\n"); return ret_val; } } - hw->mac.get_link_status = 1; + hw->mac.get_link_status = true; return ret_val; } @@ -1186,16 +1170,14 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw) * Check link status. Wait up to 100 microseconds for link to become * valid. */ - ret_val = e1000e_phy_has_link_generic(hw, - COPPER_LINK_UP_LIMIT, - 10, - &link); + ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); if (ret_val) return ret_val; if (link) { e_dbg("Valid link established!!!\n"); - e1000e_config_collision_dist(hw); + hw->mac.ops.config_collision_dist(hw); ret_val = e1000e_config_fc_after_link_up(hw); } else { e_dbg("Unable to establish link!!!\n"); @@ -1251,10 +1233,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) return ret_val; @@ -1262,12 +1242,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) e_dbg("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - return ret_val; + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } return ret_val; @@ -1401,25 +1377,25 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, PHY_CONTROL, &data); if (ret_val) - goto out; + return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &data); ret_val = e1e_wphy(hw, PHY_CONTROL, data); if (ret_val) - goto out; + return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) - goto out; + return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) - goto out; + return ret_val; e_dbg("IFE PMC: %X\n", data); @@ -1428,27 +1404,22 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; } -out: - return ret_val; + return 0; } /** @@ -1506,7 +1477,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) e_dbg("Forcing 10mb\n"); } - e1000e_config_collision_dist(hw); + hw->mac.ops.config_collision_dist(hw); ew32(CTRL, ctrl); } @@ -1833,22 +1804,20 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; - if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { - ret_val = -E1000_ERR_PHY; - goto out; - } + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; -out: - return ret_val; + return 0; } /** @@ -1918,7 +1887,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; - return ret_val; + return 0; } /** @@ -2073,24 +2042,23 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) - goto out; + return ret_val; phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) ? false : true; if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); if (ret_val) - goto out; + return ret_val; } else { /* Polarity is forced */ phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) @@ -2100,7 +2068,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) - goto out; + return ret_val; phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; @@ -2109,8 +2077,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; -out: - return ret_val; + return 0; } /** @@ -2154,7 +2121,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) s32 ret_val; u32 ctrl; - ret_val = e1000_check_reset_block(hw); + ret_val = phy->ops.check_reset_block(hw); if (ret_val) return 0; @@ -2188,6 +2155,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) s32 e1000e_get_cfg_done(struct e1000_hw *hw) { mdelay(10); + return 0; } @@ -2369,7 +2337,6 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) **/ s32 e1000e_determine_phy_address(struct e1000_hw *hw) { - s32 ret_val = -E1000_ERR_PHY_TYPE; u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; @@ -2388,17 +2355,15 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw) * If phy_type is valid, break - we found our * PHY address */ - if (phy_type != e1000_phy_unknown) { - ret_val = 0; - goto out; - } + if (phy_type != e1000_phy_unknown) + return 0; + usleep_range(1000, 2000); i++; } while (i < 10); } -out: - return ret_val; + return -E1000_ERR_PHY_TYPE; } /** @@ -2439,7 +2404,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); - goto out; + goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); @@ -2464,13 +2429,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) - goto out; + goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); -out: +release: hw->phy.ops.release(hw); return ret_val; } @@ -2498,7 +2463,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); - goto out; + goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); @@ -2523,12 +2488,12 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) - goto out; + goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); -out: +release: hw->phy.ops.release(hw); return ret_val; } @@ -2556,7 +2521,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true, false); - goto out; + goto release; } hw->phy.addr = 1; @@ -2568,12 +2533,12 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) page); if (ret_val) - goto out; + goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); -out: +release: hw->phy.ops.release(hw); return ret_val; } @@ -2600,7 +2565,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false, false); - goto out; + goto release; } hw->phy.addr = 1; @@ -2611,13 +2576,13 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) page); if (ret_val) - goto out; + goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); -out: +release: hw->phy.ops.release(hw); return ret_val; } @@ -2642,14 +2607,14 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); - goto out; + return ret_val; } ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); if (ret_val) { e_dbg("Could not read PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); - goto out; + return ret_val; } /* @@ -2664,15 +2629,14 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) if (ret_val) { e_dbg("Could not write PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); - goto out; + return ret_val; } - /* Select Host Wakeup Registers page */ - ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); - - /* caller now able to write registers on the Wakeup registers page */ -out: - return ret_val; + /* + * Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); } /** @@ -2694,7 +2658,7 @@ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { e_dbg("Could not set Port Control page\n"); - goto out; + return ret_val; } /* Restore 769.17 to its original value */ @@ -2702,7 +2666,7 @@ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) if (ret_val) e_dbg("Could not restore PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); -out: + return ret_val; } @@ -2750,7 +2714,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) { e_dbg("Could not enable PHY wakeup reg access\n"); - goto out; + return ret_val; } } @@ -2760,7 +2724,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); if (ret_val) { e_dbg("Could not write address opcode to page %d\n", page); - goto out; + return ret_val; } if (read) { @@ -2775,13 +2739,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, if (ret_val) { e_dbg("Could not access PHY reg %d.%d\n", page, reg); - goto out; + return ret_val; } if (!page_set) ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); -out: return ret_val; } @@ -3137,7 +3100,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); if (ret_val) { e_dbg("Could not write the Address Offset port register\n"); - goto out; + return ret_val; } /* Read or write the data value next */ @@ -3146,12 +3109,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, else ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); - if (ret_val) { + if (ret_val) e_dbg("Could not access the Data port register\n"); - goto out; - } -out: return ret_val; } @@ -3172,39 +3132,34 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) u16 data; if (hw->phy.type != e1000_phy_82578) - goto out; + return 0; /* Do not apply workaround if in PHY loopback bit 14 set */ e1e_rphy(hw, PHY_CONTROL, &data); if (data & PHY_CONTROL_LB) - goto out; + return 0; /* check if link is up and at 1Gbps */ ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); if (ret_val) - goto out; + return ret_val; - data &= BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_MASK; + data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; - if (data != (BM_CS_STATUS_LINK_UP | - BM_CS_STATUS_RESOLVED | - BM_CS_STATUS_SPEED_1000)) - goto out; + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return 0; - mdelay(200); + msleep(200); /* flush the packets in the fifo buffer */ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED); if (ret_val) - goto out; - - ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); + return ret_val; -out: - return ret_val; + return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); } /** @@ -3246,39 +3201,32 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); if (ret_val) - goto out; + return ret_val; e1000e_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); if (ret_val) - goto out; + return ret_val; udelay(1); if (phy->autoneg_wait_to_complete) { e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); if (ret_val) - goto out; + return ret_val; if (!link) e_dbg("Link taking longer than expected.\n"); /* Try once more */ - ret_val = e1000e_phy_has_link_generic(hw, - PHY_FORCE_LIMIT, - 100000, - &link); - if (ret_val) - goto out; + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); } -out: return ret_val; } @@ -3300,23 +3248,22 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) - goto out; + return ret_val; if (!link) { e_dbg("Phy info is only valid if link is up\n"); - ret_val = -E1000_ERR_CONFIG; - goto out; + return -E1000_ERR_CONFIG; } phy->polarity_correction = true; ret_val = e1000_check_polarity_82577(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); if (ret_val) - goto out; + return ret_val; phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; @@ -3324,11 +3271,11 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) - goto out; + return ret_val; ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data); if (ret_val) - goto out; + return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok @@ -3343,8 +3290,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) phy->remote_rx = e1000_1000t_rx_status_undefined; } -out: - return ret_val; + return 0; } /** @@ -3362,7 +3308,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) - goto out; + return ret_val; length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> I82577_DSTATUS_CABLE_LENGTH_SHIFT; @@ -3372,6 +3318,5 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) phy->cable_length = length; -out: - return ret_val; + return 0; } diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index c6e4621b626..6565c463185 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2011 Intel Corporation. +# Copyright(c) 1999 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b8e20f037d0..08bdc33715e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 08a757eb660..b927d79ab53 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index f5fc5725ea9..89eb1f85b9f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -134,6 +134,8 @@ #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ /* diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4519a136717..f67cbd3fa30 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 73aac082c44..f57338afd71 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) { int i; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index e45996b4ea3..cbddc4e51e3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 469d95eaa15..5988b8958ba 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index eddb0f83dce..dbcfa3d5cae 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 40407124e72..fa2c6ba6213 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index a2a7ca9fa73..825b0228cac 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2011 Intel Corporation. + Copyright(c) 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index b17d7c20f81..789de5b83aa 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 8510797b9d8..4c32ac66ff3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 0a860bc1198..ccdf36d503f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3d12e67eebb..8e33bdd33ee 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7998bf4d594..e10821a0f24 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1577,7 +1577,9 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc; struct igb_rx_buffer *rx_buffer_info; struct igb_tx_buffer *tx_buffer_info; + struct netdev_queue *txq; u16 rx_ntc, tx_ntc, count = 0; + unsigned int total_bytes = 0, total_packets = 0; /* initialize next to clean and descriptor values */ rx_ntc = rx_ring->next_to_clean; @@ -1601,6 +1603,8 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, /* unmap buffer on tx side */ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; + total_bytes += tx_buffer_info->bytecount; + total_packets += tx_buffer_info->gso_segs; igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); /* increment rx/tx next to clean counters */ @@ -1615,6 +1619,9 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); } + txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); + netdev_tx_completed_queue(txq, total_packets, total_bytes); + /* re-map buffers to ring, store next to clean values */ igb_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 01e5e89ef95..c4902411d74 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2011 Intel Corporation. + Copyright(c) 2007-2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -68,7 +68,7 @@ char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; +static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -173,7 +173,9 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter); #endif #ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int igb_suspend(struct device *); +#endif static int igb_resume(struct device *); #ifdef CONFIG_PM_RUNTIME static int igb_runtime_suspend(struct device *dev); @@ -1767,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; + struct igb_adapter *adapter = netdev_priv(netdev); if (changed & NETIF_F_HW_VLAN_RX) igb_vlan_mode(netdev, features); + if (!(changed & NETIF_F_RXALL)) + return 0; + + netdev->features = features; + + if (netif_running(netdev)) + igb_reinit_locked(adapter); + else + igb_reset(adapter); + return 0; } @@ -1952,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; + netdev->hw_features |= NETIF_F_RXALL; /* set this bit last since it cannot be part of hw_features */ netdev->features |= NETIF_F_HW_VLAN_FILTER; @@ -1962,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, NETIF_F_IPV6_CSUM | NETIF_F_SG; + netdev->priv_flags |= IFF_SUPP_NOFCS; + if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; @@ -2750,6 +2766,8 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); + + netdev_tx_reset_queue(txring_txq(ring)); } /** @@ -2999,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter) wr32(E1000_QDE, ALL_QUEUES); } + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + wr32(E1000_RCTL, rctl); } @@ -3242,7 +3276,6 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) buffer_info = &tx_ring->tx_buffer_info[i]; igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } - netdev_tx_reset_queue(txring_txq(tx_ring)); size = sizeof(struct igb_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -4003,8 +4036,8 @@ set_itr_now: } } -void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, - u32 type_tucmd, u32 mss_l4len_idx) +static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) { struct e1000_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -4290,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, /* write last descriptor with RS and EOP bits */ cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); + if (unlikely(skb->no_fcs)) + cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type; /* set the timestamp */ @@ -5012,7 +5047,8 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter) vf_devfn = pdev->devfn + 0x80; pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); while (pvfdev) { - if (pvfdev->devfn == vf_devfn) + if (pvfdev->devfn == vf_devfn && + (pvfdev->bus->number >= pdev->bus->number)) vfs_found++; vf_devfn += vf_stride; pvfdev = pci_get_device(hw->vendor_id, @@ -5623,7 +5659,7 @@ static irqreturn_t igb_intr(int irq, void *data) return IRQ_HANDLED; } -void igb_ring_irq_enable(struct igb_q_vector *q_vector) +static void igb_ring_irq_enable(struct igb_q_vector *q_vector) { struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; @@ -6094,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) goto next_desc; } - if (igb_test_staterr(rx_desc, - E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { + if (unlikely((igb_test_staterr(rx_desc, + E1000_RXDEXT_ERR_FRAME_ERR_MASK)) + && !(rx_ring->netdev->features & NETIF_F_RXALL))) { dev_kfree_skb_any(skb); goto next_desc; } @@ -6709,6 +6746,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, } #ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int igb_suspend(struct device *dev) { int retval; @@ -6728,6 +6766,7 @@ static int igb_suspend(struct device *dev) return 0; } +#endif /* CONFIG_PM_SLEEP */ static int igb_resume(struct device *dev) { diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index 0fa3db3dd8b..044b0ad5fcb 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel(R) 82576 Virtual Function Linux driver -# Copyright(c) 2009 - 2010 Intel Corporation. +# Copyright(c) 2009 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index 79f2604673f..3e18045d8f8 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -97,10 +97,6 @@ #define E1000_ERR_MAC_INIT 5 #define E1000_ERR_MBX 15 -#ifndef ETH_ADDR_LEN -#define ETH_ADDR_LEN 6 -#endif - /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 7b600a1f636..8ce67064b9c 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -343,10 +343,10 @@ static int igbvf_get_coalesce(struct net_device *netdev, { struct igbvf_adapter *adapter = netdev_priv(netdev); - if (adapter->itr_setting <= 3) - ec->rx_coalesce_usecs = adapter->itr_setting; + if (adapter->requested_itr <= 3) + ec->rx_coalesce_usecs = adapter->requested_itr; else - ec->rx_coalesce_usecs = adapter->itr_setting >> 2; + ec->rx_coalesce_usecs = adapter->current_itr >> 2; return 0; } @@ -365,15 +365,16 @@ static int igbvf_set_coalesce(struct net_device *netdev, /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { - adapter->itr = IGBVF_START_ITR; - adapter->itr_setting = ec->rx_coalesce_usecs; + adapter->current_itr = IGBVF_START_ITR; + adapter->requested_itr = ec->rx_coalesce_usecs; } else { - adapter->itr = ec->rx_coalesce_usecs << 2; - adapter->itr_setting = adapter->itr; + adapter->current_itr = ec->rx_coalesce_usecs << 2; + adapter->requested_itr = 1000000000 / + (adapter->current_itr * 256); } - writel(adapter->itr, - hw->hw_addr + adapter->rx_ring[0].itr_register); + writel(adapter->current_itr, + hw->hw_addr + adapter->rx_ring->itr_register); return 0; } @@ -468,6 +469,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = { void igbvf_set_ethtool_ops(struct net_device *netdev) { - /* have to "undeclare" const on this struct to remove warnings */ - SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); + SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops); } diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index fd4a7b780fd..a895e2f7b34 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -43,7 +43,18 @@ struct igbvf_info; struct igbvf_adapter; /* Interrupt defines */ -#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ +#define IGBVF_START_ITR 488 /* ~8000 ints/sec */ +#define IGBVF_4K_ITR 980 +#define IGBVF_20K_ITR 196 +#define IGBVF_70K_ITR 56 + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + /* Interrupt modes, as used by the IntMode parameter */ #define IGBVF_INT_MODE_LEGACY 0 @@ -155,6 +166,7 @@ struct igbvf_ring { char name[IFNAMSIZ + 5]; u32 eims_value; u32 itr_val; + enum latency_range itr_range; u16 itr_register; int set_itr; @@ -187,10 +199,8 @@ struct igbvf_adapter { unsigned long state; /* Interrupt Throttle Rate */ - u32 itr; - u32 itr_setting; - u16 tx_itr; - u16 rx_itr; + u32 requested_itr; /* ints/sec or adaptive */ + u32 current_itr; /* Actual ITR register value, not ints/sec */ /* * Tx @@ -299,13 +309,6 @@ enum igbvf_state_t { __IGBVF_DOWN }; -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - extern char igbvf_driver_name[]; extern const char igbvf_driver_version[]; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 048aae248d0..b4b65bc9fc5 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index c2883c45d47..24370bcb0e2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index fd3da3076c2..217c143686d 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -53,7 +53,7 @@ const char igbvf_driver_version[] = DRV_VERSION; static const char igbvf_driver_string[] = "Intel(R) Gigabit Virtual Function Network Driver"; static const char igbvf_copyright[] = - "Copyright (c) 2009 - 2011 Intel Corporation."; + "Copyright (c) 2009 - 2012 Intel Corporation."; static int igbvf_poll(struct napi_struct *napi, int budget); static void igbvf_reset(struct igbvf_adapter *); @@ -632,14 +632,13 @@ void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) * traffic pattern. Constants in this function were computed * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time - * while increasing bulk throughput. This functionality is controlled - * by the InterruptThrottleRate module parameter. + * while increasing bulk throughput. **/ -static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, - u16 itr_setting, int packets, - int bytes) +static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, + enum latency_range itr_setting, + int packets, int bytes) { - unsigned int retval = itr_setting; + enum latency_range retval = itr_setting; if (packets == 0) goto update_itr_done; @@ -675,65 +674,87 @@ static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, retval = low_latency; } break; + default: + break; } update_itr_done: return retval; } -static void igbvf_set_itr(struct igbvf_adapter *adapter) +static int igbvf_range_to_itr(enum latency_range current_range) { - struct e1000_hw *hw = &adapter->hw; - u16 current_itr; - u32 new_itr = adapter->itr; - - adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, - adapter->total_tx_packets, - adapter->total_tx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) - adapter->tx_itr = low_latency; - - adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, - adapter->total_rx_packets, - adapter->total_rx_bytes); - /* conservative mode (itr 3) eliminates the lowest_latency setting */ - if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) - adapter->rx_itr = low_latency; + int new_itr; - current_itr = max(adapter->rx_itr, adapter->tx_itr); - - switch (current_itr) { + switch (current_range) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: - new_itr = 70000; + new_itr = IGBVF_70K_ITR; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = IGBVF_20K_ITR; break; case bulk_latency: - new_itr = 4000; + new_itr = IGBVF_4K_ITR; break; default: + new_itr = IGBVF_START_ITR; break; } + return new_itr; +} - if (new_itr != adapter->itr) { +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + u32 new_itr; + + adapter->tx_ring->itr_range = + igbvf_update_itr(adapter, + adapter->tx_ring->itr_val, + adapter->total_tx_packets, + adapter->total_tx_bytes); + + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->requested_itr == 3 && + adapter->tx_ring->itr_range == lowest_latency) + adapter->tx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); + + + if (new_itr != adapter->tx_ring->itr_val) { + u32 current_itr = adapter->tx_ring->itr_val; /* * this attempts to bias the interrupt rate towards Bulk * by adding intermediate steps when interrupt rate is * increasing */ - new_itr = new_itr > adapter->itr ? - min(adapter->itr + (new_itr >> 2), new_itr) : - new_itr; - adapter->itr = new_itr; - adapter->rx_ring->itr_val = 1952; - - if (adapter->msix_entries) - adapter->rx_ring->set_itr = 1; - else - ew32(ITR, 1952); + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->tx_ring->itr_val = new_itr; + + adapter->tx_ring->set_itr = 1; + } + + adapter->rx_ring->itr_range = + igbvf_update_itr(adapter, adapter->rx_ring->itr_val, + adapter->total_rx_packets, + adapter->total_rx_bytes); + if (adapter->requested_itr == 3 && + adapter->rx_ring->itr_range == lowest_latency) + adapter->rx_ring->itr_range = low_latency; + + new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); + + if (new_itr != adapter->rx_ring->itr_val) { + u32 current_itr = adapter->rx_ring->itr_val; + new_itr = new_itr > current_itr ? + min(current_itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->rx_ring->itr_val = new_itr; + + adapter->rx_ring->set_itr = 1; } } @@ -835,6 +856,11 @@ static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *tx_ring = adapter->tx_ring; + if (tx_ring->set_itr) { + writel(tx_ring->itr_val, + adapter->hw.hw_addr + tx_ring->itr_register); + adapter->tx_ring->set_itr = 0; + } adapter->total_tx_bytes = 0; adapter->total_tx_packets = 0; @@ -937,19 +963,10 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter) igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); adapter->eims_enable_mask |= tx_ring->eims_value; - if (tx_ring->itr_val) - writel(tx_ring->itr_val, - hw->hw_addr + tx_ring->itr_register); - else - writel(1952, hw->hw_addr + tx_ring->itr_register); - + writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); adapter->eims_enable_mask |= rx_ring->eims_value; - if (rx_ring->itr_val) - writel(rx_ring->itr_val, - hw->hw_addr + rx_ring->itr_register); - else - writel(1952, hw->hw_addr + rx_ring->itr_register); + writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); /* set vector for other causes, i.e. link changes */ @@ -1027,7 +1044,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) goto out; adapter->tx_ring->itr_register = E1000_EITR(vector); - adapter->tx_ring->itr_val = 1952; + adapter->tx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, @@ -1037,7 +1054,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) goto out; adapter->rx_ring->itr_register = E1000_EITR(vector); - adapter->rx_ring->itr_val = 1952; + adapter->rx_ring->itr_val = adapter->current_itr; vector++; err = request_irq(adapter->msix_entries[vector].vector, @@ -1151,7 +1168,7 @@ static int igbvf_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - if (adapter->itr_setting & 3) + if (adapter->requested_itr & 3) igbvf_set_itr(adapter); if (!test_bit(__IGBVF_DOWN, &adapter->state)) @@ -1194,11 +1211,6 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - igbvf_irq_disable(adapter); - - if (!test_bit(__IGBVF_DOWN, &adapter->state)) - igbvf_irq_enable(adapter); - if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); @@ -1526,8 +1538,8 @@ static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) adapter->tx_abs_int_delay = 32; adapter->rx_int_delay = 0; adapter->rx_abs_int_delay = 8; - adapter->itr_setting = 3; - adapter->itr = 20000; + adapter->requested_itr = 3; + adapter->current_itr = IGBVF_START_ITR; /* Set various function pointers */ adapter->ei->init_ops(&adapter->hw); @@ -1700,6 +1712,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + netdev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } @@ -2700,18 +2713,19 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "PF still in reset state, assigning new address." " Is the PF interface up?\n"); - dev_hw_addr_random(adapter->netdev, hw->mac.addr); + eth_hw_addr_random(netdev); + memcpy(adapter->hw.mac.addr, netdev->dev_addr, + netdev->addr_len); } else { err = hw->mac.ops.read_mac_addr(hw); if (err) { dev_err(&pdev->dev, "Error reading MAC address\n"); goto err_hw_init; } + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); } - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - if (!is_valid_ether_addr(netdev->perm_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", netdev->dev_addr); @@ -2719,6 +2733,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, goto err_hw_init; } + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, (unsigned long) adapter); diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 77e18d3d6b1..7dc6341715d 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index af3822f9ea9..30a6cc42603 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -246,7 +246,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, for (i = 0; i < cnt; i++) { hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); hash_list[i] = hash_value & 0x0FFFF; - mc_addr_list += ETH_ADDR_LEN; + mc_addr_list += ETH_ALEN; } mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); @@ -333,10 +333,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) **/ static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) { - int i; - - for (i = 0; i < ETH_ADDR_LEN; i++) - hw->mac.addr[i] = hw->mac.perm_addr[i]; + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); return E1000_SUCCESS; } diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index d7ed58fcd9b..57db3c68dfc 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2010 Intel Corporation. + Copyright(c) 2009 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index cb23448fe2f..4d2ae97ff1b 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -75,18 +75,6 @@ struct ixgb_adapter; #include "ixgb_ee.h" #include "ixgb_ids.h" -#define PFX "ixgb: " - -#ifdef _DEBUG_DRIVER_ -#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args) -#else -#define IXGB_DBG(fmt, args...) \ -do { \ - if (0) \ - printk(KERN_DEBUG PFX fmt, ##args); \ -} while (0) -#endif - /* TX/RX descriptor defines */ #define DEFAULT_TXD 256 #define MAX_TXD 4096 diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c index 2ed925f3881..eca216b9b85 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c @@ -533,10 +533,8 @@ __le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index) { - if ((index < IXGB_EEPROM_SIZE) && - (ixgb_check_and_get_eeprom_data(hw) == true)) { - return hw->eeprom[index]; - } + if (index < IXGB_EEPROM_SIZE && ixgb_check_and_get_eeprom_data(hw)) + return hw->eeprom[index]; return 0; } @@ -558,7 +556,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw, ENTER(); - if (ixgb_check_and_get_eeprom_data(hw) == true) { + if (ixgb_check_and_get_eeprom_data(hw)) { for (i = 0; i < ETH_ALEN; i++) { mac_addr[i] = ee_map->mac_addr[i]; } @@ -578,7 +576,7 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw) { - if (ixgb_check_and_get_eeprom_data(hw) == true) + if (ixgb_check_and_get_eeprom_data(hw)) return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16); @@ -599,7 +597,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw) { struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; - if (ixgb_check_and_get_eeprom_data(hw) == true) + if (ixgb_check_and_get_eeprom_data(hw)) return le16_to_cpu(ee_map->device_id); return 0; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 9bd5faf64a8..82aaa792cbf 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1136,10 +1136,8 @@ ixgb_set_multi(struct net_device *netdev) u8 *mta = kmalloc(IXGB_MAX_NUM_MULTICAST_ADDRESSES * ETH_ALEN, GFP_ATOMIC); u8 *addr; - if (!mta) { - pr_err("allocation of multicast memory failed\n"); + if (!mta) goto alloc_failed; - } IXGB_WRITE_REG(hw, RCTL, rctl); @@ -2070,8 +2068,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) /* All receives must fit into a single buffer */ - IXGB_DBG("Receive packet consumed multiple buffers " - "length<%x>\n", length); + pr_debug("Receive packet consumed multiple buffers length<%x>\n", + length); dev_kfree_skb_irq(skb); goto rxdesc_done; diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 7d7387fbdec..8be1d1b2132 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2010 Intel Corporation. +# Copyright(c) 1999 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 258164d6d45..80e26ff30eb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -72,12 +72,6 @@ /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_512 512 /* Used for packet split */ -#define IXGBE_RXBUFFER_2K 2048 -#define IXGBE_RXBUFFER_3K 3072 -#define IXGBE_RXBUFFER_4K 4096 -#define IXGBE_RXBUFFER_7K 7168 -#define IXGBE_RXBUFFER_8K 8192 -#define IXGBE_RXBUFFER_15K 15360 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ /* @@ -102,14 +96,11 @@ #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) -#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 -#define IXGBE_MAX_RSC_INT_RATE 162760 - #define IXGBE_MAX_VF_MC_ENTRIES 30 #define IXGBE_MAX_VF_FUNCTIONS 64 #define IXGBE_MAX_VFTA_ENTRIES 128 @@ -156,19 +147,19 @@ struct vf_macvlans { struct ixgbe_tx_buffer { union ixgbe_adv_tx_desc *next_to_watch; unsigned long time_stamp; - dma_addr_t dma; - u32 length; - u32 tx_flags; struct sk_buff *skb; - u32 bytecount; - u16 gso_segs; + unsigned int bytecount; + unsigned short gso_segs; + __be16 protocol; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; }; struct ixgbe_rx_buffer { struct sk_buff *skb; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -180,7 +171,6 @@ struct ixgbe_queue_stats { struct ixgbe_tx_queue_stats { u64 restart_queue; u64 tx_busy; - u64 completed; u64 tx_done_old; }; @@ -190,22 +180,18 @@ struct ixgbe_rx_queue_stats { u64 non_eop_descs; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; + u64 csum_err; }; -enum ixbge_ring_state_t { +enum ixgbe_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_DETECT_HANG, __IXGBE_HANG_CHECK_ARMED, - __IXGBE_RX_PS_ENABLED, __IXGBE_RX_RSC_ENABLED, + __IXGBE_RX_CSUM_UDP_ZERO_ERR, + __IXGBE_RX_FCOE_BUFSZ, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) #define check_for_tx_hang(ring) \ test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) #define set_check_for_tx_hang(ring) \ @@ -220,18 +206,20 @@ enum ixbge_ring_state_t { clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) struct ixgbe_ring { struct ixgbe_ring *next; /* pointer to next ring in q_vector */ + struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ void *desc; /* descriptor ring memory */ - struct device *dev; /* device for DMA mapping */ - struct net_device *netdev; /* netdev ring belongs to */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; unsigned long state; u8 __iomem *tail; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ u16 count; /* amount of descriptors */ - u16 rx_buf_len; u8 queue_index; /* needed for multiqueue queue management */ u8 reg_idx; /* holds the special value that gets @@ -239,12 +227,17 @@ struct ixgbe_ring { * associated with this ring, which is * different for DCB and RSS modes */ - u8 atr_sample_rate; - u8 atr_count; - u16 next_to_use; u16 next_to_clean; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + u8 dcb_tc; struct ixgbe_queue_stats stats; struct u64_stats_sync syncp; @@ -252,11 +245,6 @@ struct ixgbe_ring { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; - int numa_node; - unsigned int size; /* length in bytes */ - dma_addr_t dma; /* phys. address of descriptor ring */ - struct rcu_head rcu; - struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -287,6 +275,22 @@ struct ixgbe_ring_feature { int mask; } ____cacheline_internodealigned_in_smp; +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ + return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0; +} +#else +#define ixgbe_rx_pg_order(_ring) 0 +#endif +#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) +#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring)) + struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -296,6 +300,10 @@ struct ixgbe_ring_container { u8 itr; /* current ITR setting for ring */ }; +/* iterator for handling rings in ring container */ +#define ixgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ? 8 : 1) #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS @@ -315,8 +323,13 @@ struct ixgbe_q_vector { struct ixgbe_ring_container rx, tx; struct napi_struct napi; - cpumask_var_t affinity_mask; + cpumask_t affinity_mask; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; /* @@ -329,6 +342,13 @@ struct ixgbe_q_vector { #define IXGBE_10K_ITR 400 #define IXGBE_8K_ITR 500 +/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ +static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) { u16 ntc = ring->next_to_clean; @@ -337,11 +357,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } -#define IXGBE_RX_DESC_ADV(R, i) \ +#define IXGBE_RX_DESC(R, i) \ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) -#define IXGBE_TX_DESC_ADV(R, i) \ +#define IXGBE_TX_DESC(R, i) \ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) -#define IXGBE_TX_CTXTDESC_ADV(R, i) \ +#define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 @@ -361,18 +381,25 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) #define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 -#define MIN_MSIX_Q_VECTORS 2 +#define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) +/* default to trying for four seconds */ +#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) + /* board specific private data structure */ struct ixgbe_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + unsigned long state; /* Some features need tri-state capability, * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) #define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) #define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) @@ -409,60 +436,52 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - u16 bd_number; - struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - - /* DCB parameters */ - struct ieee_pfc *ixgbe_ieee_pfc; - struct ieee_ets *ixgbe_ieee_ets; - struct ixgbe_dcb_config dcb_cfg; - struct ixgbe_dcb_config temp_dcb_cfg; - u8 dcb_set_bitmap; - u8 dcbx_cap; - enum ixgbe_fc_mode last_lfc_mode; - - /* Interrupt Throttle Rate */ - u32 rx_itr_setting; - u32 tx_itr_setting; - u16 eitr_low; - u16 eitr_high; - - /* Work limits */ + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; u16 tx_work_limit; + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + /* TX */ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; - int num_tx_queues; - u32 tx_timeout_count; - bool detect_tx_hung; u64 restart_queue; u64 lsc_int; + u32 tx_timeout_count; /* RX */ - struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; - int num_rx_queues; + struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; int num_rx_pools; /* == num_rx_queues in 82598 */ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; u64 non_eop_descs; - int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ - struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; - struct msix_entry *msix_entries; - u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; -/* default to trying for four seconds */ -#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) + struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - /* OS defined structs */ - struct net_device *netdev; - struct pci_dev *pdev; + /* DCB parameters */ + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; + struct ixgbe_dcb_config dcb_cfg; + struct ixgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; + enum ixgbe_fc_mode last_lfc_mode; + + int num_msix_vectors; + int max_msix_q_vectors; /* true count of q_vectors for device */ + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; u32 test_icr; struct ixgbe_ring test_tx_ring; @@ -473,10 +492,6 @@ struct ixgbe_adapter { u16 msg_enable; struct ixgbe_hw_stats stats; - /* Interrupt Throttle Rate */ - u32 rx_eitr_param; - u32 tx_eitr_param; - u64 tx_busy; unsigned int tx_ring_count; unsigned int rx_ring_count; @@ -485,25 +500,30 @@ struct ixgbe_adapter { bool link_up; unsigned long link_check_timeout; - struct work_struct service_task; struct timer_list service_timer; + struct work_struct service_task; + + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union ixgbe_atr_input fdir_mask; + int fdir_filter_count; u32 fdir_pballoc; u32 atr_sample_rate; - unsigned long fdir_overflow; /* number of times ATR was backed off */ spinlock_t fdir_perfect_lock; + #ifdef IXGBE_FCOE struct ixgbe_fcoe fcoe; #endif /* IXGBE_FCOE */ - u64 rsc_total_count; - u64 rsc_total_flush; u32 wol; + + u16 bd_number; + u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; - int node; - u32 led_reg; u32 interrupt_event; + u32 led_reg; /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -513,9 +533,6 @@ struct ixgbe_adapter { struct vf_macvlans vf_mvs; struct vf_macvlans *mv_list; - struct hlist_head fdir_filter_list; - union ixgbe_atr_input fdir_mask; - int fdir_filter_count; u32 timer_event_accumulator; u32 vferr_refcount; }; @@ -535,12 +552,16 @@ enum ixbge_state_t { __IXGBE_IN_SFP_INIT, }; -struct ixgbe_rsc_cb { +struct ixgbe_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; dma_addr_t dma; - u16 skb_cnt; - bool delay_unmap; + u16 append_cnt; + bool page_released; }; -#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) +#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) enum ixgbe_boards { board_82598, @@ -560,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, extern char ixgbe_driver_name[]; extern const char ixgbe_driver_version[]; +#ifdef IXGBE_FCOE extern char ixgbe_default_device_descr[]; +#endif /* IXGBE_FCOE */ extern void ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter); @@ -585,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, struct ixgbe_tx_buffer *); extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); +extern int ixgbe_poll(struct napi_struct *napi, int budget); extern int ethtool_ioctl(struct ifreq *ifr); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); @@ -604,18 +628,20 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); extern void ixgbe_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_IXGBE_DCB extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); +#endif extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_do_reset(struct net_device *netdev); #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); -extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len); +extern int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 staterr); + struct sk_buff *skb); extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, @@ -632,4 +658,9 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info); #endif /* IXGBE_FCOE */ +static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index ef2afefb0cd..85d2e2c4ce4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -213,15 +213,15 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); } for (i = 0; ((i < hw->mac.max_rx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } @@ -617,7 +617,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, *link_up = false; } - if (*link_up == false) + if (!*link_up) goto out; } @@ -645,7 +645,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_1GB_FULL; - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && (ixgbe_validate_link_ready(hw) != 0)) *link_up = false; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 772072147be..9c14685358e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -779,7 +779,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; /* Check to see if speed passed in is supported. */ - hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); + status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, + &autoneg); if (status != 0) goto out; @@ -1906,38 +1907,17 @@ out: **/ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { -#define IXGBE_MAX_SECRX_POLL 30 - int i; - int secrxreg; - /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg |= IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); - if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) - break; - else - /* Use interrupt-safe sleep just in case */ - udelay(10); - } - - /* For informational purposes only */ - if (i >= IXGBE_MAX_SECRX_POLL) - hw_dbg(hw, "Rx unit being enabled before security " - "path fully disabled. Continuing with init.\n"); + hw->mac.ops.disable_rx_buff(hw); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); - secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); - secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; - IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); - IXGBE_WRITE_FLUSH(hw); + + hw->mac.ops.enable_rx_buff(hw); return 0; } @@ -2102,6 +2082,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .get_media_type = &ixgbe_get_media_type_82599, .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, .enable_rx_dma = &ixgbe_enable_rx_dma_82599, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_mac_addr = &ixgbe_get_mac_addr_generic, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_device_caps = &ixgbe_get_device_caps_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index a3aa6333073..49aa41fe7b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -128,14 +128,14 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); - regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); - regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | + IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } @@ -2011,13 +2011,20 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - fcrth = hw->fc.high_water[packetbuf_num] << 10; fcrtl = hw->fc.low_water << 10; if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + fcrth = hw->fc.high_water[packetbuf_num] << 10; fcrth |= IXGBE_FCRTH_FCEN; if (hw->fc.send_xon) fcrtl |= IXGBE_FCRTL_XONE; + } else { + /* + * If Tx flow control is disabled, set our high water mark + * to Rx FIFO size minus 32 in order prevent Tx switch + * loopback from stalling on DMA. + */ + fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); @@ -2578,6 +2585,58 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) } /** + * ixgbe_disable_rx_buff_generic - Stops the receive data path + * @hw: pointer to hardware structure + * + * Stops the receive data path and waits for the HW to internally + * empty the Rx security block. + **/ +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +{ +#define IXGBE_MAX_SECRX_POLL 40 + int i; + int secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + udelay(10); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + hw_dbg(hw, "Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + return 0; + +} + +/** + * ixgbe_enable_rx_buff - Enables the receive data path + * @hw: pointer to hardware structure + * + * Enables the receive data path + **/ +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +{ + int secrxreg; + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL @@ -3336,7 +3395,7 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * @hw: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed - * @lenght: lenght of buffer, must be multiple of 4 bytes + * @length: length of buffer, must be multiple of 4 bytes * * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 863f9c1f145..204f06235b4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -74,8 +74,10 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num); s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); s32 ixgbe_validate_mac_addr(u8 *mac_addr); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 318caf4bf62..8bfaaee5ac5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index e162775064d..24333b71816 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index fcd0e479721..d3695edfcb8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index 2f318935561..ba835708fca 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 32cd97bc794..888a419dc3d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index a59d5dc59d0..4dec47faeb0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index da31735311f..dde65f95140 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -111,7 +111,9 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { - u8 err = 0; + int err = 0; + u8 prio_tc[MAX_USER_PRIORITY] = {0}; + int i; struct ixgbe_adapter *adapter = netdev_priv(netdev); /* Fail command if not in CEE mode */ @@ -120,14 +122,23 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) /* verify there is something to do, if not then exit */ if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return err; + goto out; - if (state > 0) + if (state > 0) { err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs); - else + ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); + } else { err = ixgbe_setup_tc(netdev, 0); + } - return err; + if (err) + goto out; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); + +out: + return err ? 1 : 0; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -479,7 +490,7 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) return 0; } -static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 0; @@ -503,7 +514,7 @@ static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) return rval; } -static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } @@ -574,7 +585,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int i; + int i, err = 0; __u8 max_tc = 0; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -601,12 +612,17 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, return -EINVAL; if (max_tc != netdev_get_num_tc(dev)) - ixgbe_setup_tc(dev, max_tc); + err = ixgbe_setup_tc(dev, max_tc); + + if (err) + goto err_out; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); - return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); + err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); +err_out: + return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, @@ -719,6 +735,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets ets = {0}; struct ieee_pfc pfc = {0}; + int err = 0; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || @@ -749,10 +766,10 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); - ixgbe_setup_tc(dev, 0); + err = ixgbe_setup_tc(dev, 0); } - return 0; + return err ? 1 : 0; } const struct dcbnl_rtnl_ops dcbnl_ops = { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index da7e580f517..31a2bf76a34 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -35,6 +35,7 @@ #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/vmalloc.h> +#include <linux/highmem.h> #include <linux/uaccess.h> #include "ixgbe.h" @@ -58,7 +59,7 @@ struct ixgbe_stats { sizeof(((struct rtnl_link_stats64 *)0)->m), \ offsetof(struct rtnl_link_stats64, m) -static struct ixgbe_stats ixgbe_gstrings_stats[] = { +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, @@ -120,19 +121,23 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = { #endif /* IXGBE_FCOE */ }; -#define IXGBE_QUEUE_STATS_LEN \ - ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \ - ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \ +/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues + +#define IXGBE_QUEUE_STATS_LEN ( \ + (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) #define IXGBE_PB_STATS_LEN ( \ - (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \ - IXGBE_FLAG_DCB_ENABLED) ? \ - (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ - sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ - / sizeof(u64) : 0) + (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ + sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ + / sizeof(u64)) #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ IXGBE_PB_STATS_LEN + \ IXGBE_QUEUE_STATS_LEN) @@ -931,12 +936,12 @@ static int ixgbe_set_ringparam(struct net_device *netdev, if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD); + new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); if ((new_tx_count == adapter->tx_ring[0]->count) && @@ -1078,8 +1083,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < adapter->num_tx_queues; j++) { + for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->tx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; + continue; + } + do { start = u64_stats_fetch_begin_bh(&ring->syncp); data[i] = ring->stats.packets; @@ -1087,8 +1099,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; } - for (j = 0; j < adapter->num_rx_queues; j++) { + for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { ring = adapter->rx_ring[j]; + if (!ring) { + data[i] = 0; + data[i+1] = 0; + i += 2; + continue; + } + do { start = u64_stats_fetch_begin_bh(&ring->syncp); data[i] = ring->stats.packets; @@ -1096,22 +1115,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); i += 2; } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxontxc[j]; - data[i++] = adapter->stats.pxofftxc[j]; - } - for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) { - data[i++] = adapter->stats.pxonrxc[j]; - data[i++] = adapter->stats.pxoffrxc[j]; - } + + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; + } + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; } } static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct ixgbe_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; int i; @@ -1126,31 +1143,29 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->num_tx_queues; i++) { + for (i = 0; i < netdev->num_tx_queues; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "tx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } - for (i = 0; i < adapter->num_rx_queues; i++) { + for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) { sprintf(p, "rx_queue_%u_packets", i); p += ETH_GSTRING_LEN; sprintf(p, "rx_queue_%u_bytes", i); p += ETH_GSTRING_LEN; } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { - sprintf(p, "tx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "tx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) { - sprintf(p, "rx_pb_%u_pxon", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rx_pb_%u_pxoff", i); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "tx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) { + sprintf(p, "rx_pb_%u_pxon", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; } /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ break; @@ -1577,7 +1592,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) tx_ring->dev = &adapter->pdev->dev; tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; - tx_ring->numa_node = adapter->node; err = ixgbe_setup_tx_resources(tx_ring); if (err) @@ -1602,8 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) rx_ring->dev = &adapter->pdev->dev; rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; - rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; - rx_ring->numa_node = adapter->node; err = ixgbe_setup_rx_resources(rx_ring); if (err) { @@ -1689,63 +1701,72 @@ static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter) } static void ixgbe_create_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) + unsigned int frame_size) { memset(skb->data, 0xFF, frame_size); - frame_size &= ~1; - memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + frame_size >>= 1; + memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size + 10], 0xBE, 1); + memset(&skb->data[frame_size + 12], 0xAF, 1); } -static int ixgbe_check_lbtest_frame(struct sk_buff *skb, - unsigned int frame_size) +static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, + unsigned int frame_size) { - frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { - return 0; - } - } - return 13; + unsigned char *data; + bool match = true; + + frame_size >>= 1; + + data = kmap(rx_buffer->page) + rx_buffer->page_offset; + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + + kunmap(rx_buffer->page); + + return match; } static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, - struct ixgbe_ring *tx_ring, - unsigned int size) + struct ixgbe_ring *tx_ring, + unsigned int size) { union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer_info; - struct ixgbe_tx_buffer *tx_buffer_info; - const int bufsz = rx_ring->rx_buf_len; - u32 staterr; + struct ixgbe_rx_buffer *rx_buffer; + struct ixgbe_tx_buffer *tx_buffer; u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ rx_ntc = rx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); - while (staterr & IXGBE_RXD_STAT_DD) { + while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { /* check Rx buffer */ - rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; - /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ - dma_unmap_single(rx_ring->dev, - rx_buffer_info->dma, - bufsz, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); /* verify contents of skb */ - if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size)) + if (ixgbe_check_lbtest_frame(rx_buffer, size)) count++; + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + /* unmap buffer on Tx side */ - tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); /* increment Rx/Tx next to clean counters */ rx_ntc++; @@ -1756,8 +1777,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = 0; /* fetch next descriptor */ - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); } /* re-map buffers to ring, store next to clean values */ @@ -2094,8 +2114,6 @@ static int ixgbe_get_coalesce(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; - /* only valid if in constant ITR mode */ if (adapter->rx_itr_setting <= 1) ec->rx_coalesce_usecs = adapter->rx_itr_setting; @@ -2119,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev, * this function must be called before setting the new value of * rx_itr_setting */ -static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, - struct ethtool_coalesce *ec) +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + /* nothing to do if LRO or RSC are not enabled */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) || + !(netdev->features & NETIF_F_LRO)) return false; - /* if interrupt rate is too high then disable RSC */ - if (ec->rx_coalesce_usecs != 1 && - ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) { - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - e_info(probe, "rx-usecs set too low, disabling RSC\n"); - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - return true; - } - } else { - /* check the feature flag value and enable RSC if necessary */ - if ((netdev->features & NETIF_F_LRO) && - !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - e_info(probe, "rx-usecs set to %d, re-enabling RSC\n", - ec->rx_coalesce_usecs); + /* check the feature flag value and enable RSC if necessary */ + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs value high enough " + "to re-enable RSC\n"); return true; } + /* if interrupt rate is too high then disable RSC */ + } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + e_info(probe, "rx-usecs set too low, disabling RSC\n"); + return true; } return false; } @@ -2163,16 +2179,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev, && ec->tx_coalesce_usecs) return -EINVAL; - if (ec->tx_max_coalesced_frames_irq) - adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; - if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) return -EINVAL; - /* check the old value and enable RSC if necessary */ - need_reset = ixgbe_update_rsc(adapter, ec); - if (ec->rx_coalesce_usecs > 1) adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; else @@ -2193,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, else tx_itr_param = adapter->tx_itr_setting; + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; else @@ -2200,7 +2213,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, for (i = 0; i < num_vectors; i++) { q_vector = adapter->q_vector[i]; - q_vector->tx.work_limit = adapter->tx_work_limit; if (q_vector->tx.count && !q_vector->rx.count) /* tx only */ q_vector->itr = tx_itr_param; @@ -2314,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* if RSS is disabled then report no hashing */ + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return 0; + + /* Report default options for RSS on ixgbe */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case UDP_V4_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case UDP_V6_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + + return 0; +} + static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { @@ -2335,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); break; + case ETHTOOL_GRXFH: + ret = ixgbe_get_rss_hash_opts(adapter, cmd); + break; default: break; } @@ -2569,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, return err; } +#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + u32 flags2 = adapter->flags2; + + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || + !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP; + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST)) + return -EINVAL; + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP; + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || + !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ixgbe_hw *hw = &adapter->hw; + u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) + e_warn(drv, "enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + + /* Perform hash on these packet types */ + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP | + IXGBE_MRQC_RSS_FIELD_IPV6_UDP); + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } + + return 0; +} + static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -2581,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLDEL: ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); break; + case ETHTOOL_SRXFH: + ret = ixgbe_set_rss_hash_opt(adapter, cmd); + break; default: break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index d18d6157dd2..77ea4b71653 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -357,22 +357,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, */ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 staterr) + struct sk_buff *skb) { - u16 xid; - u32 fctl; - u32 fceofe, fcerr, fcstat; int rc = -EINVAL; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; struct fcoe_crc_eof *crc; + __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); + __le32 ddp_err; + u32 fctl; + u16 xid; - fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); - fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); - if (fcerr == IXGBE_FCERR_BADCRC) - skb_checksum_none_assert(skb); + if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC)) + skb->ip_summed = CHECKSUM_NONE; else skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -382,6 +380,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); + fctl = ntoh24(fh->fh_f_ctl); if (fctl & FC_FC_EX_CTX) xid = be16_to_cpu(fh->fh_ox_id); @@ -396,27 +395,39 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, if (!ddp->udl) goto ddp_out; - if (fcerr | fceofe) + ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE | + IXGBE_RXDADV_ERR_FCERR); + if (ddp_err) goto ddp_out; - fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); - if (fcstat) { + switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { + /* return 0 to bypass going to ULD for DDPed data */ + case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): /* update length of DDPed data */ ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); - /* unmap the sg list when FCP_RSP is received */ - if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { - pci_unmap_sg(adapter->pdev, ddp->sgl, - ddp->sgc, DMA_FROM_DEVICE); - ddp->err = (fcerr | fceofe); - ddp->sgl = NULL; - ddp->sgc = 0; - } - /* return 0 to bypass going to ULD for DDPed data */ - if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) - rc = 0; - else if (ddp->len) + rc = 0; + break; + /* unmap the sg list when FCPRSP is received */ + case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): + pci_unmap_sg(adapter->pdev, ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; + ddp->sgc = 0; + /* fall through */ + /* if DDP length is present pass it through to ULD */ + case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): + /* update length of DDPed data */ + ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ddp->len) rc = ddp->len; + break; + /* no match will return as an error */ + case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): + default: + break; } + /* In target mode, check the last data frame of the sequence. * For DDP in target mode, data is already DDPed but the header * indication of the last data frame ould allow is to tell if we @@ -436,17 +447,18 @@ ddp_out: /** * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * @tx_ring: tx desc ring - * @skb: associated skb - * @tx_flags: tx flags + * @first: first tx_buffer structure containing skb, tx_flags, and protocol * @hdr_len: hdr_len to be returned * * This sets up large send offload for FCoE * - * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error + * Returns : 0 indicates success, < 0 for error */ -int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len) +int ixgbe_fso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) { + struct sk_buff *skb = first->skb; struct fc_frame_header *fh; u32 vlan_macip_lens; u32 fcoe_sof_eof = 0; @@ -519,9 +531,18 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, *hdr_len = sizeof(struct fcoe_crc_eof); /* hdr_len includes fc_hdr if FCoE LSO is enabled */ - if (skb_is_gso(skb)) - *hdr_len += (skb_transport_offset(skb) + - sizeof(struct fc_frame_header)); + if (skb_is_gso(skb)) { + *hdr_len += skb_transport_offset(skb) + + sizeof(struct fc_frame_header); + /* update gso_segs and bytecount */ + first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= IXGBE_TX_FLAGS_FSO; + } + + /* set flag indicating FCOE to ixgbe_tx_map call */ + first->tx_flags |= IXGBE_TX_FLAGS_FCOE; /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; @@ -532,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, sizeof(struct fc_frame_header); vlan_macip_lens |= (skb_transport_offset(skb) - 4) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; /* write context desc */ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); - return skb_is_gso(skb); + return 0; } static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 261fd62dda1..1dbed17c810 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c new file mode 100644 index 00000000000..027d7a75be3 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -0,0 +1,929 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "ixgbe.h" +#include "ixgbe_sriov.h" + +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return false; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} +#ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + *tx = tc << 2; + *rx = tc << 3; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs > 4) { + if (tc < 3) { + *tx = tc << 5; + *rx = tc << 4; + } else if (tc < 5) { + *tx = ((tc + 2) << 4); + *rx = tc << 4; + } else if (tc < num_tcs) { + *tx = ((tc + 8) << 3); + *rx = tc << 4; + } + } else { + *rx = tc << 5; + switch (tc) { + case 0: + *tx = 0; + break; + case 1: + *tx = 64; + break; + case 2: + *tx = 96; + break; + case 3: + *tx = 112; + break; + default: + break; + } + } + break; + default: + break; + } +} + +/** + * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + int i, j, k; + u8 num_tcs = netdev_get_num_tc(dev); + + if (!num_tcs) + return false; + + for (i = 0, k = 0; i < num_tcs; i++) { + unsigned int tx_s, rx_s; + u16 count = dev->tc_to_txq[i].count; + + ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); + for (j = 0; j < count; j++, k++) { + adapter->tx_ring[k]->reg_idx = tx_s + j; + adapter->rx_ring[k]->reg_idx = rx_s + j; + adapter->tx_ring[k]->dcb_tc = i; + adapter->rx_ring[k]->dcb_tc = i; + } + } + + return true; +} +#endif + +/** + * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for Flow Director to the assigned rings. + * + **/ +static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) +{ + int i; + bool ret = false; + + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + ret = true; + } + + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for FCoE mode to the assigned rings. + * + */ +static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + int i; + u8 fcoe_rx_i = 0, fcoe_tx_i = 0; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_cache_ring_fdir(adapter); + else + ixgbe_cache_ring_rss(adapter); + + fcoe_rx_i = f->mask; + fcoe_tx_i = f->mask; + } + for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { + adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; + adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; + } + return true; +} + +#endif /* IXGBE_FCOE */ +/** + * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +{ + adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; + adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; + if (adapter->num_vfs) + return true; + else + return false; +} + +/** + * ixgbe_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start with + * the "most" features enabled at the same time, then trickle down to the + * least amount of features turned on at once. + **/ +static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) +{ + /* start with default case */ + adapter->rx_ring[0]->reg_idx = 0; + adapter->tx_ring[0]->reg_idx = 0; + + if (ixgbe_cache_ring_sriov(adapter)) + return; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb(adapter)) + return; +#endif + +#ifdef IXGBE_FCOE + if (ixgbe_cache_ring_fcoe(adapter)) + return; +#endif /* IXGBE_FCOE */ + + if (ixgbe_cache_ring_fdir(adapter)) + return; + + if (ixgbe_cache_ring_rss(adapter)) + return; +} + +/** + * ixgbe_set_sriov_queues: Allocate queues for IOV use + * @adapter: board private structure to initialize + * + * IOV doesn't actually use anything, so just NAK the + * request for now and let the other queue routines + * figure out what to do. + */ +static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + return false; +} + +/** + * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + f->mask = 0xF; + adapter->num_rx_queues = f->indices; + adapter->num_tx_queues = f->indices; + ret = true; + } + + return ret; +} + +/** + * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * @adapter: board private structure to initialize + * + * Flow Director is an advanced Rx filter, attempting to get Rx flows back + * to the original CPU that initiated the Tx session. This runs in addition + * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the + * Rx load across CPUs using RSS. + * + **/ +static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; + + f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); + f_fdir->mask = 0; + + /* + * Use RSS in addition to Flow Director to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { + adapter->num_tx_queues = f_fdir->indices; + adapter->num_rx_queues = f_fdir->indices; + ret = true; + } else { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + return ret; +} + +#ifdef IXGBE_FCOE +/** + * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * @adapter: board private structure to initialize + * + * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. + * The ring feature mask is not used as a mask for FCoE, as it can take any 8 + * rx queues out of the max number of rx queues, instead, it is used as the + * index of the first rx queue used by FCoE. + * + **/ +static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) +{ + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + f->indices = min_t(int, num_online_cpus(), f->indices); + + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + e_info(probe, "FCoE enabled with RSS\n"); + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) + ixgbe_set_fdir_queues(adapter); + else + ixgbe_set_rss_queues(adapter); + } + + /* adding FCoE rx rings to the end */ + f->mask = adapter->num_rx_queues; + adapter->num_rx_queues += f->indices; + adapter->num_tx_queues += f->indices; + + return true; +} +#endif /* IXGBE_FCOE */ + +/* Artificial max queue cap per traffic class in DCB mode */ +#define DCB_QUEUE_CAP 8 + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + int per_tc_q, q, i, offset = 0; + struct net_device *dev = adapter->netdev; + int tcs = netdev_get_num_tc(dev); + + if (!tcs) + return false; + + /* Map queue offset and counts onto allocated tx queues */ + per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); + q = min_t(int, num_online_cpus(), per_tc_q); + + for (i = 0; i < tcs; i++) { + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + + adapter->num_tx_queues = q * tcs; + adapter->num_rx_queues = q * tcs; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u8 prio_tc[MAX_USER_PRIORITY] = {0}; + int tc; + struct ixgbe_ring_feature *f = + &adapter->ring_feature[RING_F_FCOE]; + + ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); + tc = prio_tc[adapter->fcoe.up]; + f->indices = dev->tc_to_txq[tc].count; + f->mask = dev->tc_to_txq[tc].offset; + } +#endif + + return true; +} +#endif + +/** + * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +{ + /* Start with base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + + if (ixgbe_set_sriov_queues(adapter)) + goto done; + +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_queues(adapter)) + goto done; + +#endif +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ + if (ixgbe_set_fdir_queues(adapter)) + goto done; + + if (ixgbe_set_rss_queues(adapter)) + goto done; + + /* fallback to base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + +done: + if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || + (adapter->netdev->reg_state == NETREG_UNREGISTERING)) + return 0; + + /* Notify the stack of the (possibly) reduced queue counts. */ + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); + return netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); +} + +static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, + int vectors) +{ + int err, vector_threshold; + + /* We'll want at least 2 (vector_threshold): + * 1) TxQ[0] + RxQ[0] handler + * 2) Other (Link Status Change, etc.) + */ + vector_threshold = MIN_MSIX_COUNT; + + /* + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + while (vectors >= vector_threshold) { + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, + vectors); + if (!err) /* Success in acquiring all requested vectors. */ + break; + else if (err < 0) + vectors = 0; /* Nasty failure, quit now */ + else /* err == number of vectors we should try again with */ + vectors = err; + } + + if (vectors < vector_threshold) { + /* Can't allocate enough MSI-X interrupts? Oh well. + * This just means we'll go with either a single MSI + * vector or fall back to legacy interrupts. + */ + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI-X interrupts\n"); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else { + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + adapter->num_msix_vectors = min(vectors, + adapter->max_msix_q_vectors + NON_Q_VECTORS); + } +} + +static void ixgbe_add_ring(struct ixgbe_ring *ring, + struct ixgbe_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_idx: index of vector in adapter struct + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, + int txr_count, int txr_idx, + int rxr_count, int rxr_idx) +{ + struct ixgbe_q_vector *q_vector; + struct ixgbe_ring *ring; + int node = -1; + int cpu = -1; + int ring_count, size; + + ring_count = txr_count + rxr_count; + size = sizeof(struct ixgbe_q_vector) + + (sizeof(struct ixgbe_ring) * ring_count); + + /* customize cpu for Flow Director mapping */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + if (cpu_online(v_idx)) { + cpu = v_idx; + node = cpu_to_node(cpu); + } + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + else + cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); + q_vector->numa_node = node; + + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbe_poll, 64); + + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + while (txr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + ixgbe_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; + ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; + + /* update count and index */ + txr_count--; + txr_idx++; + + /* push pointer to next ring */ + ring++; + } + + while (rxr_count) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + ixgbe_add_ring(ring, &q_vector->rx); + + /* + * 82599 errata, UDP frames with a 0 checksum + * can be marked as checksum errors. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; + ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; + + /* update count and index */ + rxr_count--; + rxr_idx++; + + /* push pointer to next ring */ + ring++; + } + + return 0; +} + +/** + * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) +{ + struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbe_ring *ring; + + ixgbe_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + ixgbe_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* + * ixgbe_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int rxr_remaining = adapter->num_rx_queues; + int txr_remaining = adapter->num_tx_queues; + int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + + /* only one q_vector if MSI-X is disabled. */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + q_vectors = 1; + + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); + err = ixgbe_alloc_q_vector(adapter, v_idx, + 0, 0, rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + } + } + + for (; q_vectors; v_idx++, q_vectors--) { + int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); + int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); + err = ixgbe_alloc_q_vector(adapter, v_idx, + tqpv, txr_idx, + rqpv, rxr_idx); + + if (err) + goto err_out; + + /* update counts and index */ + rxr_remaining -= rqpv; + rxr_idx += rqpv; + txr_remaining -= tqpv; + txr_idx += tqpv; + } + + return 0; + +err_out: + while (v_idx) { + v_idx--; + ixgbe_free_q_vector(adapter, v_idx); + } + + return -ENOMEM; +} + +/** + * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) +{ + int v_idx, q_vectors; + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + q_vectors = 1; + + for (v_idx = 0; v_idx < q_vectors; v_idx++) + ixgbe_free_q_vector(adapter, v_idx); +} + +static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) +{ + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; + pci_disable_msi(adapter->pdev); + } +} + +/** + * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int err = 0; + int vector, v_budget; + + /* + * It's easy to be greedy for MSI-X vectors, but it really + * doesn't do us much good if we have a lot more vectors + * than CPU's. So let's be conservative and only ask for + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. + */ + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; + + /* + * At the same time, hardware can only support a maximum of + * hw.mac->max_msix_vectors vectors. With features + * such as RSS and VMDq, we can easily surpass the number of Rx and Tx + * descriptor queues supported by our device. Thus, we cap it off in + * those rare cases where the cpu count also exceeds our vector limit. + */ + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + /* A failure in MSI-X entry allocation isn't fatal, but it does + * mean we disable MSI-X capabilities of the adapter. */ + adapter->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), GFP_KERNEL); + if (adapter->msix_entries) { + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + ixgbe_acquire_msix_vectors(adapter, v_budget); + + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + goto out; + } + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + e_err(probe, + "ATR is not supported while multiple " + "queues are disabled. Disabling Flow Director\n"); + } + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->atr_sample_rate = 0; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + ixgbe_disable_sriov(adapter); + + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + } else { + netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, + "Unable to allocate MSI interrupt, " + "falling back to legacy. Error: %d\n", err); + /* reset err */ + err = 0; + } + +out: + return err; +} + +/** + * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Kernel support (MSI, MSI-X) + * - which can be user-defined (via MODULE_PARAM) + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + err = ixgbe_set_num_queues(adapter); + if (err) + return err; + + err = ixgbe_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ixgbe_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + ixgbe_cache_ring_register(adapter); + + e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + + set_bit(__IXGBE_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + ixgbe_reset_interrupt_capability(adapter); +err_set_interrupt: + return err; +} + +/** + * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); +} + +void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, + u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) +{ + struct ixgbe_adv_tx_context_desc *context_desc; + u16 i = tx_ring->next_to_use; + + context_desc = IXGBE_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1ee5d0fbb90..398fc223cab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -55,8 +55,13 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; +#ifdef IXGBE_FCOE char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; +#else +static char ixgbe_default_device_descr[] = + "Intel(R) 10 Gigabit Network Connection"; +#endif #define MAJ 3 #define MIN 6 #define BUILD 7 @@ -64,7 +69,7 @@ char ixgbe_default_device_descr[] = __stringify(BUILD) "-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2011 Intel Corporation."; + "Copyright (c) 1999-2012 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -131,6 +136,11 @@ MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ +static unsigned int allow_unsupported_sfp; +module_param(allow_unsupported_sfp, uint, 0); +MODULE_PARM_DESC(allow_unsupported_sfp, + "Allow unsupported and untested SFP+ modules on 82599-based adapters"); + MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL"); @@ -284,7 +294,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) struct ixgbe_reg_info *reginfo; int n = 0; struct ixgbe_ring *tx_ring; - struct ixgbe_tx_buffer *tx_buffer_info; + struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; struct my_u0 { u64 a; u64 b; } *u0; struct ixgbe_ring *rx_ring; @@ -324,14 +334,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; - tx_buffer_info = - &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; + tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)tx_buffer_info->dma, - tx_buffer_info->length, - tx_buffer_info->next_to_watch, - (u64)tx_buffer_info->time_stamp); + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp); } /* Print TX Rings */ @@ -361,18 +370,18 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) "leng ntw timestamp bi->skb\n"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); - tx_buffer_info = &tx_ring->tx_buffer_info[i]; + tx_desc = IXGBE_TX_DESC(tx_ring, i); + tx_buffer = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; pr_info("T [0x%03X] %016llX %016llX %016llX" " %04X %p %016llX %p", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)tx_buffer_info->dma, - tx_buffer_info->length, - tx_buffer_info->next_to_watch, - (u64)tx_buffer_info->time_stamp, - tx_buffer_info->skb); + (u64)dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) pr_cont(" NTC/U\n"); @@ -384,11 +393,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_cont("\n"); if (netif_msg_pktdata(adapter) && - tx_buffer_info->dma != 0) + dma_unmap_len(tx_buffer, len) != 0) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(tx_buffer_info->dma), - tx_buffer_info->length, true); + phys_to_virt(dma_unmap_addr(tx_buffer, + dma)), + dma_unmap_len(tx_buffer, len), + true); } } @@ -442,7 +453,7 @@ rx_ring_summary: for (i = 0; i < rx_ring->count; i++) { rx_buffer_info = &rx_ring->rx_buffer_info[i]; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + rx_desc = IXGBE_RX_DESC(rx_ring, i); u0 = (struct my_u0 *)rx_desc; staterr = le32_to_cpu(rx_desc->wb.upper.status_error); if (staterr & IXGBE_RXD_STAT_DD) { @@ -464,17 +475,7 @@ rx_ring_summary: print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, phys_to_virt(rx_buffer_info->dma), - rx_ring->rx_buf_len, true); - - if (rx_ring->rx_buf_len - < IXGBE_RXBUFFER_2K) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - rx_buffer_info->page_dma + - rx_buffer_info->page_offset - ), - PAGE_SIZE/2, true); + ixgbe_rx_bufsz(rx_ring), true); } } @@ -584,32 +585,26 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring, - struct ixgbe_tx_buffer *tx_buffer) +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *tx_buffer) { - if (tx_buffer->dma) { - if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) - dma_unmap_page(ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); - else + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, - tx_buffer->dma, - tx_buffer->length, - DMA_TO_DEVICE); + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } - tx_buffer->dma = 0; -} - -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, - struct ixgbe_tx_buffer *tx_buffer_info) -{ - ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); - if (tx_buffer_info->skb) - dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; - /* tx_buffer_info must be completely set up in the transmit path */ + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ } static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) @@ -666,7 +661,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) { - return ring->tx_stats.completed; + return ring->stats.packets; } static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) @@ -746,56 +741,88 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, union ixgbe_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; - u16 i = tx_ring->next_to_clean; + unsigned int i = tx_ring->next_to_clean; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return true; tx_buffer = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_desc = IXGBE_TX_DESC(tx_ring, i); + i -= tx_ring->count; - for (; budget; budget--) { + do { union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; + /* prevent any other reads prior to eop_desc */ + rmb(); + /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; - /* count the packet as being completed */ - tx_ring->tx_stats.completed++; - /* clear next_to_watch to prevent false hangs */ tx_buffer->next_to_watch = NULL; - /* prevent any other reads prior to eop_desc being verified */ - rmb(); + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; - do { - ixgbe_unmap_tx_resource(tx_ring, tx_buffer); - tx_desc->wb.status = 0; - if (likely(tx_desc == eop_desc)) { - eop_desc = NULL; - dev_kfree_skb_any(tx_buffer->skb); - tx_buffer->skb = NULL; + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); - total_bytes += tx_buffer->bytecount; - total_packets += tx_buffer->gso_segs; - } + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { tx_buffer++; tx_desc++; i++; - if (unlikely(i == tx_ring->count)) { - i = 0; - + if (unlikely(!i)) { + i -= tx_ring->count; tx_buffer = tx_ring->tx_buffer_info; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); + tx_desc = IXGBE_TX_DESC(tx_ring, 0); } - } while (eop_desc); - } + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; tx_ring->next_to_clean = i; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; @@ -807,7 +834,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ struct ixgbe_hw *hw = &adapter->hw; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); e_err(drv, "Detected Tx Unit Hang\n" " Tx Queue <%d>\n" " TDH, TDT <%x>, <%x>\n" @@ -835,6 +861,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, return true; } + netdev_tx_completed_queue(txring_txq(tx_ring), + total_packets, total_bytes); + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { @@ -842,9 +871,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__IXGBE_DOWN, &adapter->state)) { - netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) + && !test_bit(__IXGBE_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } @@ -853,63 +884,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } #ifdef CONFIG_IXGBE_DCA -static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, +static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 rxctrl; - u8 reg_idx = rx_ring->reg_idx; + u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); + u16 reg_offset; - rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); switch (hw->mac.type) { case ixgbe_mac_82598EB: - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; - rxctrl |= dca3_get_tag(rx_ring->dev, cpu); + reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; - rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) << - IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); + reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx); + txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599; break; default: - break; + /* for unknown hardware do not write register */ + return; } - rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; - rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; - rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); - IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN | + IXGBE_DCA_TXCTRL_DATA_RRO_EN | + IXGBE_DCA_TXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, reg_offset, txctrl); } -static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, +static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring, int cpu) { struct ixgbe_hw *hw = &adapter->hw; - u32 txctrl; - u8 reg_idx = tx_ring->reg_idx; + u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); + u8 reg_idx = rx_ring->reg_idx; + switch (hw->mac.type) { - case ixgbe_mac_82598EB: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; - txctrl |= dca3_get_tag(tx_ring->dev, cpu); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); - break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; - txctrl |= (dca3_get_tag(tx_ring->dev, cpu) << - IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); + rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599; break; default: break; } + + /* + * We can enable relaxed ordering for reads, but not writes when + * DCA is enabled. This is due to a known issue in some chipsets + * which will cause the DCA tag to be cleared. + */ + rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_DCA_EN | + IXGBE_DCA_RXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); } static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) @@ -921,10 +957,10 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) if (q_vector->cpu == cpu) goto out_no_update; - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_update_tx_dca(adapter, ring, cpu); - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_update_rx_dca(adapter, ring, cpu); q_vector->cpu = cpu; @@ -984,14 +1020,17 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) return 0; } -#endif /* CONFIG_IXGBE_DCA */ -static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc, +#endif /* CONFIG_IXGBE_DCA */ +static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); + if (ring->netdev->features & NETIF_F_RXHASH) + skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } +#ifdef IXGBE_FCOE /** * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type * @adapter: address of board private structure @@ -1010,73 +1049,45 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); } -/** - * ixgbe_receive_skb - Send a completed packet up the stack - * @adapter: board private structure - * @skb: packet to send up - * @status: hardware indication of status of receive - * @rx_ring: rx descriptor ring (for a specific queue) to setup - * @rx_desc: rx descriptor - **/ -static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb, u8 status, - struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc) -{ - struct ixgbe_adapter *adapter = q_vector->adapter; - struct napi_struct *napi = &q_vector->napi; - bool is_vlan = (status & IXGBE_RXD_STAT_VP); - u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - - if (is_vlan && (tag & VLAN_VID_MASK)) - __vlan_hwaccel_put_tag(skb, tag); - - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(napi, skb); - else - netif_rx(skb); -} - +#endif /* IXGBE_FCOE */ /** * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum - * @adapter: address of board private structure - * @status_err: hardware indication of status of receive + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed * @skb: skb currently being received and modified - * @status_err: status error value of last descriptor in packet **/ -static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, +static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb, - u32 status_err) + struct sk_buff *skb) { - skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); /* Rx csum disabled */ - if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ - if ((status_err & IXGBE_RXD_STAT_IPCS) && - (status_err & IXGBE_RXDADV_ERR_IPE)) { - adapter->hw_csum_rx_error++; + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { + ring->rx_stats.csum_err++; return; } - if (!(status_err & IXGBE_RXD_STAT_L4CS)) + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS)) return; - if (status_err & IXGBE_RXDADV_ERR_TCPE) { - u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; /* * 82599 errata, UDP frames with a 0 checksum can be marked as * checksum errors. */ - if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && - (adapter->hw.mac.type == ixgbe_mac_82599EB)) + if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) && + test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state)) return; - adapter->hw_csum_rx_error++; + ring->rx_stats.csum_err++; return; } @@ -1086,6 +1097,10 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) { + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only @@ -1096,8 +1111,51 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) writel(val, rx_ring->tail); } +static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma = bi->dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(dma)) + return true; + + /* alloc new page for storage */ + if (likely(!page)) { + page = alloc_pages(GFP_ATOMIC | __GFP_COLD, + ixgbe_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->page = page; + } + + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + put_page(page); + bi->page = NULL; + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->dma = dma; + bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); + + return true; +} + /** - * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split + * ixgbe_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace **/ @@ -1105,344 +1163,599 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; - struct sk_buff *skb; u16 i = rx_ring->next_to_use; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev) + /* nothing to do */ + if (!cleaned_count) return; - while (cleaned_count--) { - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); - bi = &rx_ring->rx_buffer_info[i]; - skb = bi->skb; + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; - if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); - if (!skb) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } + do { + if (!ixgbe_alloc_mapped_page(rx_ring, bi)) + break; - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - bi->dma = 0; - goto no_buffers; - } + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = IXGBE_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; } - if (ring_is_ps_enabled(rx_ring)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); - if (!bi->page) { - rx_ring->rx_stats.alloc_rx_page_failed++; - goto no_buffers; - } - } + /* clear the hdr_addr for the next_to_use descriptor */ + rx_desc->read.hdr_addr = 0; - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { - rx_ring->rx_stats.alloc_rx_page_failed++; - bi->page_dma = 0; - goto no_buffers; - } - } + cleaned_count--; + } while (cleaned_count); - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - rx_desc->read.hdr_addr = 0; - } + i += rx_ring->count; - i++; - if (i == rx_ring->count) - i = 0; + if (rx_ring->next_to_use != i) + ixgbe_release_rx_desc(rx_ring, i); +} + +/** + * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + **/ +static unsigned int ixgbe_get_headlen(unsigned char *data, + unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == __constant_htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; } -no_buffers: - if (rx_ring->next_to_use != i) { - rx_ring->next_to_use = i; - ixgbe_release_rx_desc(rx_ring, i); + /* handle L3 protocols */ + if (protocol == __constant_htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol */ + nexthdr = hdr.ipv4->protocol; + hdr.network += hlen; +#ifdef IXGBE_FCOE + } else if (protocol == __constant_htons(ETH_P_FCOE)) { + if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) + return max_len; + hdr.network += FCOE_HEADER_LEN; +#endif + } else { + return hdr.network - data; } + + /* finally sort out TCP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; } -static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) +static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - /* HW will not DMA in data larger than the given buffer, even if it - * parses the (NFS, of course) header to be larger. In that case, it - * fills the header buffer and spills the rest into the page. - */ - u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); - u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hlen > IXGBE_RX_HDR_SIZE) - hlen = IXGBE_RX_HDR_SIZE; - return hlen; + __le32 rsc_enabled; + u32 rsc_cnt; + + if (!ring_is_rsc_enabled(rx_ring)) + return; + + rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + /* If this is an RSC frame rsc_cnt should be non-zero */ + if (!rsc_enabled) + return; + + rsc_cnt = le32_to_cpu(rsc_enabled); + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; +} + +static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, + struct sk_buff *skb) +{ + u16 hdr_len = skb_headlen(skb); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), + IXGBE_CB(skb)->append_cnt); +} + +static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if append_cnt is 0 then frame is not RSC */ + if (!IXGBE_CB(skb)->append_cnt) + return; + + rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + + ixgbe_set_rsc_gso_size(rx_ring, skb); + + /* gso_size is computed using append_cnt so always clear it last */ + IXGBE_CB(skb)->append_cnt = 0; +} + +/** + * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + ixgbe_update_rsc_stats(rx_ring, skb); + + ixgbe_rx_hash(rx_ring, rx_desc, skb); + + ixgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); + __vlan_hwaccel_put_tag(skb, vid); + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, + struct sk_buff *skb) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; + + if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); } /** - * ixgbe_transform_rsc_queue - change rsc queue into a full packet - * @skb: pointer to the last skb in the rsc queue + * ixgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress * - * This function changes a queue full of hw rsc buffers into a completed - * packet. It uses the ->prev pointers to find the first packet and then - * turns it into the frag list owner. + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. **/ -static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) +static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - unsigned int frag_list_size = 0; - unsigned int skb_cnt = 1; + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(IXGBE_RX_DESC(rx_ring, ntc)); + + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + + /* append_cnt indicates packet is RSC, if so fetch nextp */ + if (IXGBE_CB(skb)->append_cnt) { + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } + + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * ixgbe_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + struct net_device *netdev = rx_ring->netdev; + unsigned char *va; + unsigned int pull_len; + + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; + + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbe_test_staterr(rx_desc, + IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_any(skb); + return true; + } + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = skb_frag_size(frag); + if (pull_len > 256) + pull_len = ixgbe_get_headlen(va, pull_len); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + /* + * if we sucked the frag empty then we should free it, + * if there are other frags here something is screwed up in hardware + */ + if (skb_frag_size(frag) == 0) { + BUG_ON(skb_shinfo(skb)->nr_frags != 1); + skb_shinfo(skb)->nr_frags = 0; + __skb_frag_unref(frag); + skb->truesize -= ixgbe_rx_bufsz(rx_ring); + } + + /* if skb_pad returns an error the skb was freed */ + if (unlikely(skb->len < 60)) { + int pad_len = 60 - skb->len; - while (skb->prev) { - struct sk_buff *prev = skb->prev; - frag_list_size += skb->len; - skb->prev = NULL; - skb = prev; - skb_cnt++; + if (skb_pad(skb, pad_len)) + return true; + __skb_put(skb, pad_len); } - skb_shinfo(skb)->frag_list = skb->next; - skb->next = NULL; - skb->len += frag_list_size; - skb->data_len += frag_list_size; - skb->truesize += frag_list_size; - IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; + return false; +} + +/** + * ixgbe_can_reuse_page - determine if we can reuse a page + * @rx_buffer: pointer to rx_buffer containing the page we want to reuse + * + * Returns true if page can be reused in another Rx buffer + **/ +static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) +{ + struct page *page = rx_buffer->page; + + /* if we are only owner of page and it is local we can reuse it */ + return likely(page_count(page) == 1) && + likely(page_to_nid(page) == numa_node_id()); +} + +/** + * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Syncronizes page for reuse by the adapter + **/ +static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *old_buff) +{ + struct ixgbe_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + u16 bufsz = ixgbe_rx_bufsz(rx_ring); + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->page = old_buff->page; + new_buff->dma = old_buff->dma; - return skb; + /* flip page offset to other buffer and store to new_buff */ + new_buff->page_offset = old_buff->page_offset ^ bufsz; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, + new_buff->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* bump ref count on page before it is given to the stack */ + get_page(new_buff->page); } -static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) +/** + * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function is based on skb_add_rx_frag. I would have used that + * function however it doesn't handle the truesize case correctly since we + * are allocating more memory than might be used for a single receive. + **/ +static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, + struct sk_buff *skb, int size) { - return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & - IXGBE_RXDADV_RSCCNT_MASK); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + rx_buffer->page, rx_buffer->page_offset, + size); + skb->len += size; + skb->data_len += size; + skb->truesize += ixgbe_rx_bufsz(rx_ring); } +/** + * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * + * Returns true if all work is completed without reaching budget + **/ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int budget) { - struct ixgbe_adapter *adapter = q_vector->adapter; - union ixgbe_adv_rx_desc *rx_desc, *next_rxd; - struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; - struct sk_buff *skb; unsigned int total_rx_bytes = 0, total_rx_packets = 0; - const int current_node = numa_node_id(); #ifdef IXGBE_FCOE + struct ixgbe_adapter *adapter = q_vector->adapter; int ddp_bytes = 0; #endif /* IXGBE_FCOE */ - u32 staterr; - u16 i; - u16 cleaned_count = 0; - bool pkt_is_rsc = false; - - i = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - - while (staterr & IXGBE_RXD_STAT_DD) { - u32 upper_len = 0; - - rmb(); /* read descriptor and rx_buffer_info after status DD */ - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - - skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - prefetch(skb->data); - - if (ring_is_rsc_enabled(rx_ring)) - pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); - - /* linear means we are building an skb from multiple pages */ - if (!skb_is_nonlinear(skb)) { - u16 hlen; - if (pkt_is_rsc && - !(staterr & IXGBE_RXD_STAT_EOP) && - !skb->prev) { - /* - * When HWRSC is enabled, delay unmapping - * of the first packet. It carries the - * header information, HW may still - * access the header after the writeback. - * Only unmap it when EOP is reached - */ - IXGBE_RSC_CB(skb)->delay_unmap = true; - IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; - } else { - dma_unmap_single(rx_ring->dev, - rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - } - rx_buffer_info->dma = 0; + u16 cleaned_count = ixgbe_desc_unused(rx_ring); - if (ring_is_ps_enabled(rx_ring)) { - hlen = ixgbe_get_hlen(rx_desc); - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } else { - hlen = le16_to_cpu(rx_desc->wb.upper.length); - } + do { + struct ixgbe_rx_buffer *rx_buffer; + union ixgbe_adv_rx_desc *rx_desc; + struct sk_buff *skb; + struct page *page; + u16 ntc; - skb_put(skb, hlen); - } else { - /* assume packet split since header is unmapped */ - upper_len = le16_to_cpu(rx_desc->wb.upper.length); + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; } - if (upper_len) { - dma_unmap_page(rx_ring->dev, - rx_buffer_info->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, - rx_buffer_info->page_offset, - upper_len); - - if ((page_count(rx_buffer_info->page) == 1) && - (page_to_nid(rx_buffer_info->page) == current_node)) - get_page(rx_buffer_info->page); - else - rx_buffer_info->page = NULL; + ntc = rx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, ntc); + rx_buffer = &rx_ring->rx_buffer_info[ntc]; - skb->len += upper_len; - skb->data_len += upper_len; - skb->truesize += PAGE_SIZE / 2; - } + if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) + break; - i++; - if (i == rx_ring->count) - i = 0; + /* + * This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * RXD_STAT_DD bit is set + */ + rmb(); - next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i); - prefetch(next_rxd); - cleaned_count++; + page = rx_buffer->page; + prefetchw(page); - if (pkt_is_rsc) { - u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> - IXGBE_RXDADV_NEXTP_SHIFT; - next_buffer = &rx_ring->rx_buffer_info[nextp]; - } else { - next_buffer = &rx_ring->rx_buffer_info[i]; - } + skb = rx_buffer->skb; - if (!(staterr & IXGBE_RXD_STAT_EOP)) { - if (ring_is_ps_enabled(rx_ring)) { - rx_buffer_info->skb = next_buffer->skb; - rx_buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } else { - skb->next = next_buffer->skb; - skb->next->prev = skb; - } - rx_ring->rx_stats.non_eop_descs++; - goto next_desc; - } + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; - if (skb->prev) { - skb = ixgbe_transform_rsc_queue(skb); - /* if we got here without RSC the packet is invalid */ - if (!pkt_is_rsc) { - __pskb_trim(skb, 0); - rx_buffer_info->skb = skb; - goto next_desc; - } - } + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif - if (ring_is_rsc_enabled(rx_ring)) { - if (IXGBE_RSC_CB(skb)->delay_unmap) { - dma_unmap_single(rx_ring->dev, - IXGBE_RSC_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_RSC_CB(skb)->dma = 0; - IXGBE_RSC_CB(skb)->delay_unmap = false; + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; } - } - if (pkt_is_rsc) { - if (ring_is_ps_enabled(rx_ring)) - rx_ring->rx_stats.rsc_count += - skb_shinfo(skb)->nr_frags; - else - rx_ring->rx_stats.rsc_count += - IXGBE_RSC_CB(skb)->skb_cnt; - rx_ring->rx_stats.rsc_flush++; + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { - dev_kfree_skb_any(skb); - goto next_desc; + /* pull page into skb */ + ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, + le16_to_cpu(rx_desc->wb.upper.length)); + + if (ixgbe_can_reuse_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); } - ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); - if (adapter->netdev->features & NETIF_F_RXHASH) - ixgbe_rx_hash(rx_desc, skb); + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); + + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) + continue; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; total_rx_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { - ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, - staterr); + ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); if (!ddp_bytes) { dev_kfree_skb_any(skb); - goto next_desc; + continue; } } + #endif /* IXGBE_FCOE */ - ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + ixgbe_rx_skb(q_vector, skb); + /* update budget accounting */ budget--; -next_desc: - rx_desc->wb.upper.status_error = 0; - - if (!budget) - break; - - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - cleaned_count = 0; - } - - /* use prefetched values */ - rx_desc = next_rxd; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - } - - rx_ring->next_to_clean = i; - cleaned_count = ixgbe_desc_unused(rx_ring); - - if (cleaned_count) - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + } while (likely(budget)); #ifdef IXGBE_FCOE /* include DDPed FCoE data */ @@ -1457,8 +1770,8 @@ next_desc: total_rx_bytes += ddp_bytes; total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); } -#endif /* IXGBE_FCOE */ +#endif /* IXGBE_FCOE */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -1466,6 +1779,9 @@ next_desc: q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + return !!budget; } @@ -1498,10 +1814,10 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) struct ixgbe_ring *ring; q_vector = adapter->q_vector[v_idx]; - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->rx) ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) + ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); if (q_vector->tx.ring && !q_vector->rx.ring) { @@ -1569,20 +1885,19 @@ enum latency_range { static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) { - u64 bytes_perint; - struct ixgbe_adapter *adapter = q_vector->adapter; int bytes = ring_container->total_bytes; int packets = ring_container->total_packets; u32 timepassed_us; + u64 bytes_perint; u8 itr_setting = ring_container->itr; if (packets == 0) return; /* simple throttlerate management - * 0-20MB/s lowest (100000 ints/s) - * 20-100MB/s low (20000 ints/s) - * 100-1249MB/s bulk (8000 ints/s) + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) + * 20-1249MB/s bulk (8000 ints/s) */ /* what was last interrupt timeslice? */ timepassed_us = q_vector->itr >> 2; @@ -1590,17 +1905,17 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, switch (itr_setting) { case lowest_latency: - if (bytes_perint > adapter->eitr_low) + if (bytes_perint > 10) itr_setting = low_latency; break; case low_latency: - if (bytes_perint > adapter->eitr_high) + if (bytes_perint > 20) itr_setting = bulk_latency; - else if (bytes_perint <= adapter->eitr_low) + else if (bytes_perint <= 10) itr_setting = lowest_latency; break; case bulk_latency: - if (bytes_perint <= adapter->eitr_high) + if (bytes_perint <= 20) itr_setting = low_latency; break; } @@ -1626,7 +1941,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; int v_idx = q_vector->v_idx; - u32 itr_reg = q_vector->itr; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: @@ -1678,14 +1993,14 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) ((9 * new_itr) + q_vector->itr); /* save the algorithm value here */ - q_vector->itr = new_itr & IXGBE_MAX_EITR; + q_vector->itr = new_itr; ixgbe_write_eitr(q_vector); } } /** - * ixgbe_check_overtemp_subtask - check for over tempurature + * ixgbe_check_overtemp_subtask - check for over temperature * @adapter: pointer to adapter **/ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) @@ -1997,76 +2312,53 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) return IRQ_HANDLED; } -static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, - int r_idx) -{ - struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; - struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; - - rx_ring->q_vector = q_vector; - rx_ring->next = q_vector->rx.ring; - q_vector->rx.ring = rx_ring; - q_vector->rx.count++; -} - -static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, - int t_idx) -{ - struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; - struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; - - tx_ring->q_vector = q_vector; - tx_ring->next = q_vector->tx.ring; - q_vector->tx.ring = tx_ring; - q_vector->tx.count++; - q_vector->tx.work_limit = a->tx_work_limit; -} - /** - * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors - * @adapter: board private structure to initialize + * ixgbe_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean * - * This function maps descriptor rings to the queue-specific vectors - * we were allotted through the MSI-X enabling code. Ideally, we'd have - * one vector per ring/queue, but on a constrained vector budget, we - * group the rings as "efficiently" as possible. You would add new - * mapping configurations in here. + * This function is used for legacy and MSI, NAPI mode **/ -static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) +int ixgbe_poll(struct napi_struct *napi, int budget) { - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0; - int txr_remaining = adapter->num_tx_queues, txr_idx = 0; - int v_start = 0; + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; + int per_ring_budget; + bool clean_complete = true; - /* only one q_vector if MSI-X is disabled. */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif - /* - * If we don't have enough vectors for a 1-to-1 mapping, we'll have to - * group them so there are multiple queues per vector. - * - * Re-adjusting *qpv takes care of the remainder. - */ - for (; v_start < q_vectors && rxr_remaining; v_start++) { - int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start); - for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--) - map_vector_to_rxq(adapter, v_start, rxr_idx); - } + ixgbe_for_each_ring(ring, q_vector->tx) + clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - /* - * If there are not enough q_vectors for each ring to have it's own - * vector then we must pair up Rx/Tx on a each vector - */ - if ((v_start + txr_remaining) > q_vectors) - v_start = 0; + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; - for (; v_start < q_vectors && txr_remaining; v_start++) { - int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start); - for (; tqpv; tqpv--, txr_idx++, txr_remaining--) - map_vector_to_txq(adapter, v_start, txr_idx); - } + ixgbe_for_each_ring(ring, q_vector->rx) + clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, + per_ring_budget); + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + + return 0; } /** @@ -2112,14 +2404,14 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { /* assign the mask for this irq */ irq_set_affinity_hint(entry->vector, - q_vector->affinity_mask); + &q_vector->affinity_mask); } } err = request_irq(adapter->msix_entries[vector].vector, ixgbe_msix_other, 0, netdev->name, adapter); if (err) { - e_err(probe, "request_irq for msix_lsc failed: %d\n", err); + e_err(probe, "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } @@ -2153,7 +2445,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) u32 eicr; /* - * Workaround for silicon errata on 82598. Mask the interrupts + * Workaround for silicon errata #26 on 82598. Mask the interrupt * before the read of EICR. */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); @@ -2193,47 +2485,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); - if (napi_schedule_prep(&(q_vector->napi))) { - /* would disable interrupts here but EIAM disabled it */ - __napi_schedule(&(q_vector->napi)); - } + /* would disable interrupts here but EIAM disabled it */ + napi_schedule(&q_vector->napi); /* * re-enable link(maybe) and non-queue interrupts, no flush. * ixgbe_poll will re-enable the queue interrupts */ - if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable(adapter, false, false); return IRQ_HANDLED; } -static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter) -{ - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - int i; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i]->q_vector = NULL; - adapter->rx_ring[i]->next = NULL; - } - for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i]->q_vector = NULL; - adapter->tx_ring[i]->next = NULL; - } - - for (i = 0; i < q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container)); - memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container)); - } -} - /** * ixgbe_request_irq - initialize interrupts * @adapter: board private structure @@ -2246,9 +2510,6 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) struct net_device *netdev = adapter->netdev; int err; - /* map all of the rings to the q_vectors */ - ixgbe_map_rings_to_vectors(adapter); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) err = ixgbe_request_msix_irqs(adapter); else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) @@ -2258,13 +2519,9 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, netdev->name, adapter); - if (err) { + if (err) e_err(probe, "request_irq failed, Error %d\n", err); - /* place q_vectors and rings back into a known good state */ - ixgbe_reset_q_vectors(adapter); - } - return err; } @@ -2294,9 +2551,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) } else { free_irq(adapter->pdev->irq, adapter); } - - /* clear q_vector state information */ - ixgbe_reset_q_vectors(adapter); } /** @@ -2387,12 +2641,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * to or less than the number of on chip descriptors, which is * currently 40. */ - if (!adapter->tx_itr_setting || !adapter->rx_itr_setting) + if (!ring->q_vector || (ring->q_vector->itr < 8)) txdctl |= (1 << 16); /* WTHRESH = 1 */ else txdctl |= (8 << 16); /* WTHRESH = 8 */ - /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */ + /* + * Setting PTHRESH to 32 both improves performance + * and avoids a TX hang with DFP enabled + */ txdctl |= (1 << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ @@ -2411,6 +2668,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); + netdev_tx_reset_queue(txring_txq(ring)); + /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) @@ -2527,18 +2786,12 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK; - if (ring_is_ps_enabled(rx_ring)) { -#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; +#if PAGE_SIZE > IXGBE_MAX_RXBUFFER + srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #else - srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #endif - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - } else { - srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - } + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); } @@ -2608,6 +2861,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } @@ -2621,38 +2879,25 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, { struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; - int rx_buf_len; u8 reg_idx = ring->reg_idx; if (!ring_is_rsc_enabled(ring)) return; - rx_buf_len = ring->rx_buf_len; rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); rscctrl |= IXGBE_RSCCTL_RSCEN; /* * we must limit the number of descriptors so that the * total size of max desc * buf_len is not greater - * than 65535 + * than 65536 */ - if (ring_is_ps_enabled(ring)) { -#if (MAX_SKB_FRAGS > 16) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (MAX_SKB_FRAGS > 8) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#elif (MAX_SKB_FRAGS > 4) - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; +#if (PAGE_SIZE <= 8192) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; +#elif (PAGE_SIZE <= 16384) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; #else - rscctrl |= IXGBE_RSCCTL_MAXDESC_1; + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; #endif - } else { - if (rx_buf_len < IXGBE_RXBUFFER_4K) - rscctrl |= IXGBE_RSCCTL_MAXDESC_16; - else if (rx_buf_len < IXGBE_RXBUFFER_8K) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; - else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; - } IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } @@ -2830,7 +3075,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); vf_shift = adapter->num_vfs % 32; - reg_offset = (adapter->num_vfs > 32) ? 1 : 0; + reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); @@ -2869,23 +3114,10 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - int rx_buf_len; struct ixgbe_ring *rx_ring; int i; u32 mhadd, hlreg0; - /* Decide whether to use packet split mode or not */ - /* On by default */ - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - - /* Do not use packet split if we're in SR-IOV Mode */ - if (adapter->num_vfs) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - - /* Disable packet split due to 82599 erratum #45 */ - if (hw->mac.type == ixgbe_mac_82599EB) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - #ifdef IXGBE_FCOE /* adjust max frame to be able to do baby jumbo for FCoE */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && @@ -2904,27 +3136,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ max_frame += VLAN_HLEN; - /* Set the RX buffer length according to the mode */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_buf_len = IXGBE_RX_HDR_SIZE; - } else { - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (netdev->mtu <= ETH_DATA_LEN)) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - /* - * Make best use of allocation by using all but 1K of a - * power of 2 allocation that will be used for skb->head. - */ - else if (max_frame <= IXGBE_RXBUFFER_3K) - rx_buf_len = IXGBE_RXBUFFER_3K; - else if (max_frame <= IXGBE_RXBUFFER_7K) - rx_buf_len = IXGBE_RXBUFFER_7K; - else if (max_frame <= IXGBE_RXBUFFER_15K) - rx_buf_len = IXGBE_RXBUFFER_15K; - else - rx_buf_len = IXGBE_MAX_RXBUFFER; - } - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; @@ -2936,32 +3147,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) */ for (i = 0; i < adapter->num_rx_queues; i++) { rx_ring = adapter->rx_ring[i]; - rx_ring->rx_buf_len = rx_buf_len; - - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) - set_ring_ps_enabled(rx_ring); - else - clear_ring_ps_enabled(rx_ring); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) set_ring_rsc_enabled(rx_ring); else clear_ring_rsc_enabled(rx_ring); - #ifdef IXGBE_FCOE if (netdev->features & NETIF_F_FCOE_MTU) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; - if ((i >= f->mask) && (i < f->mask + f->indices)) { - clear_ring_ps_enabled(rx_ring); - if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) - rx_ring->rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; - } else if (!ring_is_rsc_enabled(rx_ring) && - !ring_is_ps_enabled(rx_ring)) { - rx_ring->rx_buf_len = - IXGBE_FCOE_JUMBO_FRAME_SIZE; - } + if ((i >= f->mask) && (i < f->mask + f->indices)) + set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state); } #endif /* IXGBE_FCOE */ } @@ -3235,6 +3430,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); /* set all bits that we expect to always be set */ + fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ fctrl |= IXGBE_FCTRL_BAM; fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ fctrl |= IXGBE_FCTRL_PMCF; @@ -3283,6 +3479,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); } + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode */ + fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ + IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ + IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ + + fctrl &= ~(IXGBE_FCTRL_DPF); + /* NOTE: VLAN filtering is disabled by setting PROMISC */ + } + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); if (netdev->features & NETIF_F_HW_VLAN_RX) @@ -3554,6 +3762,8 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) static void ixgbe_configure(struct ixgbe_adapter *adapter) { + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_configure_pb(adapter); #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); @@ -3567,6 +3777,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_fcoe(adapter); #endif /* IXGBE_FCOE */ + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.disable_rx_buff(hw); + break; + default: + break; + } + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ixgbe_init_fdir_signature_82599(&adapter->hw, adapter->fdir_pballoc); @@ -3576,6 +3796,15 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_fdir_filter_restore(adapter); } + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + hw->mac.ops.enable_rx_buff(hw); + break; + default: + break; + } + ixgbe_configure_virtualization(adapter); ixgbe_configure_tx(adapter); @@ -3849,6 +4078,27 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** + * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers + * @rx_ring: ring to setup + * + * On many IA platforms the L1 cache has a critical stride of 4K, this + * results in each receive buffer starting in the same cache set. To help + * reduce the pressure on this cache set we can interleave the offsets so + * that only every other buffer will be in the same cache set. + **/ +static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) +{ + struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; + u16 i; + + for (i = 0; i < rx_ring->count; i += 2) { + rx_buffer[0].page_offset = 0; + rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); + rx_buffer = &rx_buffer[2]; + } +} + +/** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ @@ -3864,50 +4114,40 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - struct ixgbe_rx_buffer *rx_buffer_info; - - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - if (rx_buffer_info->dma) { - dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_buffer_info->dma = 0; - } - if (rx_buffer_info->skb) { - struct sk_buff *skb = rx_buffer_info->skb; - rx_buffer_info->skb = NULL; - do { - struct sk_buff *this = skb; - if (IXGBE_RSC_CB(this)->delay_unmap) { - dma_unmap_single(dev, - IXGBE_RSC_CB(this)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_RSC_CB(this)->dma = 0; - IXGBE_RSC_CB(skb)->delay_unmap = false; - } - skb = skb->prev; - dev_kfree_skb(this); - } while (skb); - } - if (!rx_buffer_info->page) - continue; - if (rx_buffer_info->page_dma) { - dma_unmap_page(dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; + struct ixgbe_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + if (IXGBE_CB(skb)->page_released) { + dma_unmap_page(dev, + IXGBE_CB(skb)->dma, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } + dev_kfree_skb(skb); } - put_page(rx_buffer_info->page); - rx_buffer_info->page = NULL; - rx_buffer_info->page_offset = 0; + rx_buffer->skb = NULL; + if (rx_buffer->dma) + dma_unmap_page(dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + rx_buffer->dma = 0; + if (rx_buffer->page) + put_page(rx_buffer->page); + rx_buffer->page = NULL; } size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); + ixgbe_init_rx_page_offset(rx_ring); + /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -4073,55 +4313,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /** - * ixgbe_poll - NAPI Rx polling callback - * @napi: structure for representing this polling device - * @budget: how many packets driver is allowed to clean - * - * This function is used for legacy and MSI, NAPI mode - **/ -static int ixgbe_poll(struct napi_struct *napi, int budget) -{ - struct ixgbe_q_vector *q_vector = - container_of(napi, struct ixgbe_q_vector, napi); - struct ixgbe_adapter *adapter = q_vector->adapter; - struct ixgbe_ring *ring; - int per_ring_budget; - bool clean_complete = true; - -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_dca(q_vector); -#endif - - for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); - - /* attempt to distribute budget to each queue fairly, but don't allow - * the budget to go below 1 because we'll exit polling */ - if (q_vector->rx.count > 1) - per_ring_budget = max(budget/q_vector->rx.count, 1); - else - per_ring_budget = budget; - - for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next) - clean_complete &= ixgbe_clean_rx_irq(q_vector, ring, - per_ring_budget); - - /* If all work not completed, return budget and keep polling */ - if (!clean_complete) - return budget; - - /* all work done, exit the polling mode */ - napi_complete(napi); - if (adapter->rx_itr_setting & 1) - ixgbe_set_itr(q_vector); - if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); - - return 0; -} - -/** * ixgbe_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/ @@ -4134,798 +4325,6 @@ static void ixgbe_tx_timeout(struct net_device *netdev) } /** - * ixgbe_set_rss_queues: Allocate queues for RSS - * @adapter: board private structure to initialize - * - * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try - * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. - * - **/ -static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - f->mask = 0xF; - adapter->num_rx_queues = f->indices; - adapter->num_tx_queues = f->indices; - ret = true; - } else { - ret = false; - } - - return ret; -} - -/** - * ixgbe_set_fdir_queues: Allocate queues for Flow Director - * @adapter: board private structure to initialize - * - * Flow Director is an advanced Rx filter, attempting to get Rx flows back - * to the original CPU that initiated the Tx session. This runs in addition - * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the - * Rx load across CPUs using RSS. - * - **/ -static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; - - f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); - f_fdir->mask = 0; - - /* Flow Director must have RSS enabled */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - adapter->num_tx_queues = f_fdir->indices; - adapter->num_rx_queues = f_fdir->indices; - ret = true; - } else { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - } - return ret; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) - * @adapter: board private structure to initialize - * - * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * The ring feature mask is not used as a mask for FCoE, as it can take any 8 - * rx queues out of the max number of rx queues, instead, it is used as the - * index of the first rx queue used by FCoE. - * - **/ -static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - f->indices = min((int)num_online_cpus(), f->indices); - - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - e_info(probe, "FCoE enabled with RSS\n"); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_set_fdir_queues(adapter); - else - ixgbe_set_rss_queues(adapter); - } - - /* adding FCoE rx rings to the end */ - f->mask = adapter->num_rx_queues; - adapter->num_rx_queues += f->indices; - adapter->num_tx_queues += f->indices; - - return true; -} -#endif /* IXGBE_FCOE */ - -/* Artificial max queue cap per traffic class in DCB mode */ -#define DCB_QUEUE_CAP 8 - -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - int per_tc_q, q, i, offset = 0; - struct net_device *dev = adapter->netdev; - int tcs = netdev_get_num_tc(dev); - - if (!tcs) - return false; - - /* Map queue offset and counts onto allocated tx queues */ - per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP); - q = min((int)num_online_cpus(), per_tc_q); - - for (i = 0; i < tcs; i++) { - netdev_set_tc_queue(dev, i, q, offset); - offset += q; - } - - adapter->num_tx_queues = q * tcs; - adapter->num_rx_queues = q * tcs; - -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and mask. Here we map FCoE - * indices onto the DCB queue pairs allowing FCoE to own - * configuration later. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - int tc; - struct ixgbe_ring_feature *f = - &adapter->ring_feature[RING_F_FCOE]; - - tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); - f->indices = dev->tc_to_txq[tc].count; - f->mask = dev->tc_to_txq[tc].offset; - } -#endif - - return true; -} -#endif - -/** - * ixgbe_set_sriov_queues: Allocate queues for IOV use - * @adapter: board private structure to initialize - * - * IOV doesn't actually use anything, so just NAK the - * request for now and let the other queue routines - * figure out what to do. - */ -static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) -{ - return false; -} - -/* - * ixgbe_set_num_queues: Allocate queues for device, feature dependent - * @adapter: board private structure to initialize - * - * This is the top level queue allocation routine. The order here is very - * important, starting with the "most" number of features turned on at once, - * and ending with the smallest set of features. This way large combinations - * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. - * - **/ -static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) -{ - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; - - if (ixgbe_set_sriov_queues(adapter)) - goto done; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_set_dcb_queues(adapter)) - goto done; - -#endif -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - goto done; - -#endif /* IXGBE_FCOE */ - if (ixgbe_set_fdir_queues(adapter)) - goto done; - - if (ixgbe_set_rss_queues(adapter)) - goto done; - - /* fallback to base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - -done: - /* Notify the stack of the (possibly) reduced queue counts. */ - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); -} - -static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) -{ - int err, vector_threshold; - - /* We'll want at least 3 (vector_threshold): - * 1) TxQ[0] Cleanup - * 2) RxQ[0] Cleanup - * 3) Other (Link Status Change, etc.) - * 4) TCP Timer (optional) - */ - vector_threshold = MIN_MSIX_COUNT; - - /* The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. - */ - while (vectors >= vector_threshold) { - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, - vectors); - if (!err) /* Success in acquiring all requested vectors. */ - break; - else if (err < 0) - vectors = 0; /* Nasty failure, quit now */ - else /* err == number of vectors we should try again with */ - vectors = err; - } - - if (vectors < vector_threshold) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. - */ - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI-X interrupts\n"); - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else { - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - adapter->num_msix_vectors = min(vectors, - adapter->max_msix_q_vectors + NON_Q_VECTORS); - } -} - -/** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) -{ - int i; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return false; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - - return true; -} - -#ifdef CONFIG_IXGBE_DCB - -/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ -static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, - unsigned int *tx, unsigned int *rx) -{ - struct net_device *dev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; - u8 num_tcs = netdev_get_num_tc(dev); - - *tx = 0; - *rx = 0; - - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - *tx = tc << 2; - *rx = tc << 3; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (num_tcs > 4) { - if (tc < 3) { - *tx = tc << 5; - *rx = tc << 4; - } else if (tc < 5) { - *tx = ((tc + 2) << 4); - *rx = tc << 4; - } else if (tc < num_tcs) { - *tx = ((tc + 8) << 3); - *rx = tc << 4; - } - } else { - *rx = tc << 5; - switch (tc) { - case 0: - *tx = 0; - break; - case 1: - *tx = 64; - break; - case 2: - *tx = 96; - break; - case 3: - *tx = 112; - break; - default: - break; - } - } - break; - default: - break; - } -} - -/** - * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for DCB to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) -{ - struct net_device *dev = adapter->netdev; - int i, j, k; - u8 num_tcs = netdev_get_num_tc(dev); - - if (!num_tcs) - return false; - - for (i = 0, k = 0; i < num_tcs; i++) { - unsigned int tx_s, rx_s; - u16 count = dev->tc_to_txq[i].count; - - ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); - for (j = 0; j < count; j++, k++) { - adapter->tx_ring[k]->reg_idx = tx_s + j; - adapter->rx_ring[k]->reg_idx = rx_s + j; - adapter->tx_ring[k]->dcb_tc = i; - adapter->rx_ring[k]->dcb_tc = i; - } - } - - return true; -} -#endif - -/** - * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for Flow Director to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) -{ - int i; - bool ret = false; - - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - } - - return ret; -} - -#ifdef IXGBE_FCOE -/** - * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for FCoE mode to the assigned rings. - * - */ -static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - int i; - u8 fcoe_rx_i = 0, fcoe_tx_i = 0; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_cache_ring_fdir(adapter); - else - ixgbe_cache_ring_rss(adapter); - - fcoe_rx_i = f->mask; - fcoe_tx_i = f->mask; - } - for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; - } - return true; -} - -#endif /* IXGBE_FCOE */ -/** - * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov - * @adapter: board private structure to initialize - * - * SR-IOV doesn't use any descriptor rings but changes the default if - * no other mapping is used. - * - */ -static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) -{ - adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; - adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; - if (adapter->num_vfs) - return true; - else - return false; -} - -/** - * ixgbe_cache_ring_register - Descriptor ring to register mapping - * @adapter: board private structure to initialize - * - * Once we know the feature-set enabled for the device, we'll cache - * the register offset the descriptor ring is assigned to. - * - * Note, the order the various feature calls is important. It must start with - * the "most" features enabled at the same time, then trickle down to the - * least amount of features turned on at once. - **/ -static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) -{ - /* start with default case */ - adapter->rx_ring[0]->reg_idx = 0; - adapter->tx_ring[0]->reg_idx = 0; - - if (ixgbe_cache_ring_sriov(adapter)) - return; - -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb(adapter)) - return; -#endif - -#ifdef IXGBE_FCOE - if (ixgbe_cache_ring_fcoe(adapter)) - return; -#endif /* IXGBE_FCOE */ - - if (ixgbe_cache_ring_fdir(adapter)) - return; - - if (ixgbe_cache_ring_rss(adapter)) - return; -} - -/** - * ixgbe_alloc_queues - Allocate memory for all rings - * @adapter: board private structure to initialize - * - * We allocate one ring per queue at run-time since we don't know the - * number of queues at compile-time. The polling_netdev array is - * intended for Multiqueue, but should work fine with a single queue. - **/ -static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) -{ - int rx = 0, tx = 0, nid = adapter->node; - - if (nid < 0 || !node_online(nid)) - nid = first_online_node; - - for (; tx < adapter->num_tx_queues; tx++) { - struct ixgbe_ring *ring; - - ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); - if (!ring) - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; - ring->count = adapter->tx_ring_count; - ring->queue_index = tx; - ring->numa_node = nid; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - - adapter->tx_ring[tx] = ring; - } - - for (; rx < adapter->num_rx_queues; rx++) { - struct ixgbe_ring *ring; - - ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); - if (!ring) - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - goto err_allocation; - ring->count = adapter->rx_ring_count; - ring->queue_index = rx; - ring->numa_node = nid; - ring->dev = &adapter->pdev->dev; - ring->netdev = adapter->netdev; - - adapter->rx_ring[rx] = ring; - } - - ixgbe_cache_ring_register(adapter); - - return 0; - -err_allocation: - while (tx) - kfree(adapter->tx_ring[--tx]); - - while (rx) - kfree(adapter->rx_ring[--rx]); - return -ENOMEM; -} - -/** - * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported - * @adapter: board private structure to initialize - * - * Attempt to configure the interrupts using the best available - * capabilities of the hardware and the kernel. - **/ -static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - int err = 0; - int vector, v_budget; - - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - */ - v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)num_online_cpus()) + NON_Q_VECTORS; - - /* - * At the same time, hardware can only support a maximum of - * hw.mac->max_msix_vectors vectors. With features - * such as RSS and VMDq, we can easily surpass the number of Rx and Tx - * descriptor queues supported by our device. Thus, we cap it off in - * those rare cases where the cpu count also exceeds our vector limit. - */ - v_budget = min(v_budget, (int)hw->mac.max_msix_vectors); - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (adapter->msix_entries) { - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbe_acquire_msix_vectors(adapter, v_budget); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - goto out; - } - - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - e_err(probe, - "ATR is not supported while multiple " - "queues are disabled. Disabling Flow Director\n"); - } - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 0; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); - - err = ixgbe_set_num_queues(adapter); - if (err) - return err; - - err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; - } else { - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI interrupt, " - "falling back to legacy. Error: %d\n", err); - /* reset err */ - err = 0; - } - -out: - return err; -} - -/** - * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors - * @adapter: board private structure to initialize - * - * We allocate one q_vector per queue interrupt. If allocation fails we - * return -ENOMEM. - **/ -static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) -{ - int v_idx, num_q_vectors; - struct ixgbe_q_vector *q_vector; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector), - GFP_KERNEL, adapter->node); - if (!q_vector) - q_vector = kzalloc(sizeof(struct ixgbe_q_vector), - GFP_KERNEL); - if (!q_vector) - goto err_out; - - q_vector->adapter = adapter; - q_vector->v_idx = v_idx; - - /* Allocate the affinity_hint cpumask, configure the mask */ - if (!alloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) - goto err_out; - cpumask_set_cpu(v_idx, q_vector->affinity_mask); - netif_napi_add(adapter->netdev, &q_vector->napi, - ixgbe_poll, 64); - adapter->q_vector[v_idx] = q_vector; - } - - return 0; - -err_out: - while (v_idx) { - v_idx--; - q_vector = adapter->q_vector[v_idx]; - netif_napi_del(&q_vector->napi); - free_cpumask_var(q_vector->affinity_mask); - kfree(q_vector); - adapter->q_vector[v_idx] = NULL; - } - return -ENOMEM; -} - -/** - * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors - * @adapter: board private structure to initialize - * - * This function frees the memory allocated to the q_vectors. In addition if - * NAPI is enabled it will delete any references to the NAPI struct prior - * to freeing the q_vector. - **/ -static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) -{ - int v_idx, num_q_vectors; - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; - adapter->q_vector[v_idx] = NULL; - netif_napi_del(&q_vector->napi); - free_cpumask_var(q_vector->affinity_mask); - kfree(q_vector); - } -} - -static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) -{ - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; - pci_disable_msix(adapter->pdev); - kfree(adapter->msix_entries); - adapter->msix_entries = NULL; - } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { - adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; - pci_disable_msi(adapter->pdev); - } -} - -/** - * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme - * @adapter: board private structure to initialize - * - * We determine which interrupt scheme to use based on... - * - Kernel support (MSI, MSI-X) - * - which can be user-defined (via MODULE_PARAM) - * - Hardware queue count (num_*_queues) - * - defined by miscellaneous hardware support/features (RSS, etc.) - **/ -int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int err; - - /* Number of supported queues */ - err = ixgbe_set_num_queues(adapter); - if (err) - return err; - - err = ixgbe_set_interrupt_capability(adapter); - if (err) { - e_dev_err("Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } - - err = ixgbe_alloc_q_vectors(adapter); - if (err) { - e_dev_err("Unable to allocate memory for queue vectors\n"); - goto err_alloc_q_vectors; - } - - err = ixgbe_alloc_queues(adapter); - if (err) { - e_dev_err("Unable to allocate memory for queues\n"); - goto err_alloc_queues; - } - - e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", - (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", - adapter->num_rx_queues, adapter->num_tx_queues); - - set_bit(__IXGBE_DOWN, &adapter->state); - - return 0; - -err_alloc_queues: - ixgbe_free_q_vectors(adapter); -err_alloc_q_vectors: - ixgbe_reset_interrupt_capability(adapter); -err_set_interrupt: - return err; -} - -/** - * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings - * @adapter: board private structure to clear interrupt scheme on - * - * We go through and clear interrupt specific resources and reset the structure - * to pre-load conditions - **/ -void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) -{ - int i; - - for (i = 0; i < adapter->num_tx_queues; i++) { - kfree(adapter->tx_ring[i]); - adapter->tx_ring[i] = NULL; - } - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - - /* ixgbe_get_stats64() might access this ring, we must wait - * a grace period before freeing it. - */ - kfree_rcu(ring, rcu); - adapter->rx_ring[i] = NULL; - } - - adapter->num_tx_queues = 0; - adapter->num_rx_queues = 0; - - ixgbe_free_q_vectors(adapter); - ixgbe_reset_interrupt_capability(adapter); -} - -/** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize * @@ -4952,7 +4351,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->subsystem_device_id = pdev->subsystem_device; /* Set capability flags */ - rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); + rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (hw->mac.type) { @@ -5044,10 +4443,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; - /* set defaults for eitr in MegaBytes */ - adapter->eitr_low = 10; - adapter->eitr_high = 20; - /* set default ring sizes */ adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; @@ -5061,12 +4456,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) return -EIO; } - /* enable rx csum by default */ - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - - /* get assigned NUMA node */ - adapter->node = dev_to_node(&pdev->dev); - set_bit(__IXGBE_DOWN, &adapter->state); return 0; @@ -5081,10 +4470,16 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) @@ -5094,8 +4489,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, + tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -5144,10 +4546,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = -1; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) @@ -5157,15 +4565,23 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, + rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + ixgbe_init_rx_page_offset(rx_ring); + return 0; err: vfree(rx_ring->rx_buffer_info); @@ -5285,20 +4701,24 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* MTU < 68 is an error and causes problems on some kernels */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && - hw->mac.type != ixgbe_mac_X540) { - if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) - return -EINVAL; - } else { - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + + /* + * For 82599EB we cannot allow PF to change MTU greater than 1500 + * in SR-IOV mode as it may cause buffer overruns in guest VFs that + * don't allocate and chain buffers correctly. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (adapter->hw.mac.type == ixgbe_mac_82599EB) && + (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) return -EINVAL; - } e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; @@ -5554,7 +4974,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; - u64 bytes = 0, packets = 0; + u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; #ifdef IXGBE_FCOE struct ixgbe_fcoe *fcoe = &adapter->fcoe; unsigned int cpu; @@ -5584,12 +5004,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) non_eop_descs += rx_ring->rx_stats.non_eop_descs; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; bytes += rx_ring->stats.bytes; packets += rx_ring->stats.packets; } adapter->non_eop_descs = non_eop_descs; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; netdev->stats.rx_bytes = bytes; netdev->stats.rx_packets = packets; @@ -5941,7 +5363,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) * print link down message * @adapter - pointer to the adapter structure **/ -static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) +static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; @@ -6186,41 +5608,32 @@ static void ixgbe_service_timer(unsigned long data) unsigned long next_event_offset; bool ready = true; -#ifdef CONFIG_PCI_IOV - ready = false; + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ * 2; +#ifdef CONFIG_PCI_IOV /* * don't bother with SR-IOV VF DMA hang check if there are * no VFs or the link is down */ if (!adapter->num_vfs || - (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { - ready = true; + (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) goto normal_timer_service; - } /* If we have VFs allocated then we must check for DMA hangs */ ixgbe_check_for_bad_vf(adapter); next_event_offset = HZ / 50; adapter->timer_event_accumulator++; - if (adapter->timer_event_accumulator >= 100) { - ready = true; + if (adapter->timer_event_accumulator >= 100) adapter->timer_event_accumulator = 0; - } - - goto schedule_event; - -normal_timer_service: -#endif - /* poll faster when waiting for link */ - if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) - next_event_offset = HZ / 10; else - next_event_offset = HZ * 2; + ready = false; -#ifdef CONFIG_PCI_IOV -schedule_event: +normal_timer_service: #endif /* Reset the timer */ mod_timer(&adapter->service_timer, next_event_offset + jiffies); @@ -6269,30 +5682,11 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); } -void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, - u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx) -{ - struct ixgbe_adv_tx_context_desc *context_desc; - u16 i = tx_ring->next_to_use; - - context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); - - i++; - tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - - /* set bits to identify this as an advanced context descriptor */ - type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); -} - -static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol, u8 *hdr_len) +static int ixgbe_tso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) { - int err; + struct sk_buff *skb = first->skb; u32 vlan_macip_lens, type_tucmd; u32 mss_l4len_idx, l4len; @@ -6300,7 +5694,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, return 0; if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) return err; } @@ -6308,7 +5702,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == __constant_htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -6317,17 +5711,27 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, IPPROTO_TCP, 0); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; } else if (skb_is_gso_v6(skb)) { ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; } + /* compute header lengths */ l4len = tcp_hdrlen(skb); *hdr_len = skb_transport_offset(skb) + l4len; + /* update gso size and bytecount with header size */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + /* mss_l4len_id: use 1 as index for TSO */ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; @@ -6336,29 +5740,29 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, - mss_l4len_idx); + mss_l4len_idx); return 1; } -static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - __be16 protocol) +static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first) { + struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(tx_flags & IXGBE_TX_FLAGS_TXSW)) - return false; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; } else { u8 l4_hdr = 0; - switch (protocol) { + switch (first->protocol) { case __constant_htons(ETH_P_IP): vlan_macip_lens |= skb_network_header_len(skb); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; @@ -6372,7 +5776,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but proto=%x!\n", - skb->protocol); + first->protocol); } break; } @@ -6396,19 +5800,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, "partial checksum but l4 proto=%x!\n", - skb->protocol); + l4_hdr); } break; } + + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; } + /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; - vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, mss_l4len_idx); - - return (skb->ip_summed == CHECKSUM_PARTIAL); } static __le32 ixgbe_tx_cmd_type(u32 tx_flags) @@ -6424,7 +5830,7 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) /* set segmentation enable bits for TSO/FSO */ #ifdef IXGBE_FCOE - if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO)) + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) #else if (tx_flags & IXGBE_TX_FLAGS_TSO) #endif @@ -6433,200 +5839,192 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) return cmd_type; } -static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) +static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + u32 tx_flags, unsigned int paylen) { - __le32 olinfo_status = - cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); - - if (tx_flags & IXGBE_TX_FLAGS_TSO) { - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM | - (1 << IXGBE_ADVTXD_IDX_SHIFT)); - /* enble IPv4 checksum for TSO */ - if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - } + __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); /* enable L4 checksum for TSO and TX checksum offload */ if (tx_flags & IXGBE_TX_FLAGS_CSUM) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); -#ifdef IXGBE_FCOE - /* use index 1 context for FCOE/FSO */ - if (tx_flags & IXGBE_TX_FLAGS_FCOE) - olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC | - (1 << IXGBE_ADVTXD_IDX_SHIFT)); + /* enble IPv4 checksum for TSO */ + if (tx_flags & IXGBE_TX_FLAGS_IPV4) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); + /* use index 1 context for TSO/FSO/FCOE */ +#ifdef IXGBE_FCOE + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) +#else + if (tx_flags & IXGBE_TX_FLAGS_TSO) #endif + olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + /* * Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running */ +#ifdef IXGBE_FCOE + if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) +#else if (tx_flags & IXGBE_TX_FLAGS_TXSW) +#endif olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); - return olinfo_status; + tx_desc->read.olinfo_status = olinfo_status; } #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, - struct sk_buff *skb, struct ixgbe_tx_buffer *first, - u32 tx_flags, const u8 hdr_len) { - struct device *dev = tx_ring->dev; - struct ixgbe_tx_buffer *tx_buffer_info; - union ixgbe_adv_tx_desc *tx_desc; dma_addr_t dma; - __le32 cmd_type, olinfo_status; - struct skb_frag_struct *frag; - unsigned int f = 0; + struct sk_buff *skb = first->skb; + struct ixgbe_tx_buffer *tx_buffer; + union ixgbe_adv_tx_desc *tx_desc; + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); - u32 offset = 0; - u32 paylen = skb->len - hdr_len; + unsigned int paylen = skb->len - hdr_len; + u32 tx_flags = first->tx_flags; + __le32 cmd_type; u16 i = tx_ring->next_to_use; - u16 gso_segs; + + tx_desc = IXGBE_TX_DESC(tx_ring, i); + + ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); + cmd_type = ixgbe_tx_cmd_type(tx_flags); #ifdef IXGBE_FCOE if (tx_flags & IXGBE_TX_FLAGS_FCOE) { - if (data_len >= sizeof(struct fcoe_crc_eof)) { - data_len -= sizeof(struct fcoe_crc_eof); - } else { + if (data_len < sizeof(struct fcoe_crc_eof)) { size -= sizeof(struct fcoe_crc_eof) - data_len; data_len = 0; + } else { + data_len -= sizeof(struct fcoe_crc_eof); } } #endif - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; - cmd_type = ixgbe_tx_cmd_type(tx_flags); - olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen); + /* record length, and DMA address */ + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(dma); for (;;) { - while (size > IXGBE_MAX_DATA_PER_TXD) { - tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); + while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); - tx_desc->read.olinfo_status = olinfo_status; - - offset += IXGBE_MAX_DATA_PER_TXD; - size -= IXGBE_MAX_DATA_PER_TXD; - tx_desc++; i++; + tx_desc++; if (i == tx_ring->count) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); + tx_desc = IXGBE_TX_DESC(tx_ring, 0); i = 0; } + + dma += IXGBE_MAX_DATA_PER_TXD; + size -= IXGBE_MAX_DATA_PER_TXD; + + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; } - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_buffer_info->length = offset + size; - tx_buffer_info->tx_flags = tx_flags; - tx_buffer_info->dma = dma; + if (likely(!data_len)) + break; - tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); + if (unlikely(skb->no_fcs)) + cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); - tx_desc->read.olinfo_status = olinfo_status; - if (!data_len) - break; + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = IXGBE_TX_DESC(tx_ring, 0); + i = 0; + } - frag = &skb_shinfo(skb)->frags[f]; #ifdef IXGBE_FCOE size = min_t(unsigned int, data_len, skb_frag_size(frag)); #else size = skb_frag_size(frag); #endif data_len -= size; - f++; - - offset = 0; - tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE; - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; - tx_desc++; - i++; - if (i == tx_ring->count) { - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0); - i = 0; - } - } + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); - tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.olinfo_status = 0; - i++; - if (i == tx_ring->count) - i = 0; - - tx_ring->next_to_use = i; + frag++; + } - if (tx_flags & IXGBE_TX_FLAGS_TSO) - gso_segs = skb_shinfo(skb)->gso_segs; -#ifdef IXGBE_FCOE - /* adjust for FCoE Sequence Offload */ - else if (tx_flags & IXGBE_TX_FLAGS_FSO) - gso_segs = DIV_ROUND_UP(skb->len - hdr_len, - skb_shinfo(skb)->gso_size); -#endif /* IXGBE_FCOE */ - else - gso_segs = 1; + /* write last descriptor with RS and EOP bits */ + cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); + tx_desc->read.cmd_type_len = cmd_type; - /* multiply data chunks by size of headers */ - tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len); - tx_buffer_info->gso_segs = gso_segs; - tx_buffer_info->skb = skb; + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); /* set the timestamp */ first->time_stamp = jiffies; /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. */ wmb(); /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + /* notify HW of packet */ writel(i, tx_ring->tail); return; dma_error: - dev_err(dev, "TX DMA map failed\n"); + dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear dma mappings for failed tx_buffer_info map */ for (;;) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); - if (tx_buffer_info == first) + tx_buffer = &tx_ring->tx_buffer_info[i]; + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) break; if (i == 0) i = tx_ring->count; i--; } - dev_kfree_skb_any(skb); - tx_ring->next_to_use = i; } -static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) +static void ixgbe_atr(struct ixgbe_ring *ring, + struct ixgbe_tx_buffer *first) { struct ixgbe_q_vector *q_vector = ring->q_vector; union ixgbe_atr_hash_dword input = { .dword = 0 }; @@ -6650,16 +6048,16 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, ring->atr_count++; /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + hdr.network = skb_network_header(first->skb); /* Currently only IPv4/IPv6 with TCP is supported */ - if ((protocol != __constant_htons(ETH_P_IPV6) || + if ((first->protocol != __constant_htons(ETH_P_IPV6) || hdr.ipv6->nexthdr != IPPROTO_TCP) && - (protocol != __constant_htons(ETH_P_IP) || + (first->protocol != __constant_htons(ETH_P_IP) || hdr.ipv4->protocol != IPPROTO_TCP)) return; - th = tcp_hdr(skb); + th = tcp_hdr(first->skb); /* skip this packet since it is invalid or the socket is closing */ if (!th || th->fin) @@ -6672,7 +6070,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, /* reset sample count */ ring->atr_count = 0; - vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); + vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); /* * src and dst are inverted, think how the receiver sees them @@ -6687,13 +6085,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, * since src port and flex bytes occupy the same word XOR them together * and write the value to source port portion of compressed dword */ - if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) + if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); else - common.port.src ^= th->dest ^ protocol; + common.port.src ^= th->dest ^ first->protocol; common.port.dst ^= th->source; - if (protocol == __constant_htons(ETH_P_IP)) { + if (first->protocol == __constant_htons(ETH_P_IP)) { input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; } else { @@ -6781,7 +6179,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, * otherwise try next time @@ -6797,11 +6195,12 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, return NETDEV_TX_BUSY; } -#ifdef CONFIG_PCI_IOV - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - tx_flags |= IXGBE_TX_FLAGS_TXSW; + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; -#endif /* if we have a HW VLAN tag being added default to the HW one */ if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; @@ -6814,10 +6213,20 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, goto out_drop; protocol = vhdr->h_vlan_encapsulated_proto; - tx_flags |= ntohs(vhdr->h_vlan_TCI) << IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= ntohs(vhdr->h_vlan_TCI) << + IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } +#ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + tx_flags |= IXGBE_TX_FLAGS_TXSW; + +#endif /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || @@ -6838,61 +6247,69 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } } - /* record the location of the first descriptor for this packet */ - first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == __constant_htons(ETH_P_FCOE)) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); + tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_FSO | - IXGBE_TX_FLAGS_FCOE; - else - tx_flags |= IXGBE_TX_FLAGS_FCOE; goto xmit_fcoe; } #endif /* IXGBE_FCOE */ - /* setup IPv4/IPv6 offloads */ - if (protocol == __constant_htons(ETH_P_IP)) - tx_flags |= IXGBE_TX_FLAGS_IPV4; - - tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); + tso = ixgbe_tso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; - else if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) - tx_flags |= IXGBE_TX_FLAGS_CSUM; + else if (!tso) + ixgbe_tx_csum(tx_ring, first); /* add the ATR filter if ATR is on */ if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) - ixgbe_atr(tx_ring, skb, tx_flags, protocol); + ixgbe_atr(tx_ring, first); #ifdef IXGBE_FCOE xmit_fcoe: #endif /* IXGBE_FCOE */ - ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len); + ixgbe_tx_map(tx_ring, first, hdr_len); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; out_drop: - dev_kfree_skb_any(skb); + dev_kfree_skb_any(first->skb); + first->skb = NULL; + return NETDEV_TX_OK; } -static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (skb->len < 17) { + if (skb_padto(skb, 17)) + return NETDEV_TX_OK; + skb->len = 17; + } + tx_ring = adapter->tx_ring[skb->queue_mapping]; return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } @@ -7025,8 +6442,8 @@ static void ixgbe_netpoll(struct net_device *netdev) } adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; } -#endif +#endif static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { @@ -7075,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, return stats; } +#ifdef CONFIG_IXGBE_DCB /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. * #adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled @@ -7111,7 +6529,6 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) return; } - /* ixgbe_setup_tc - routine to configure net_device for multiple traffic * classes. * @@ -7131,7 +6548,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || - (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) + (hw->mac.type == ixgbe_mac_82598EB && + tc < MAX_TRAFFIC_CLASS)) return -EINVAL; /* Hardware has to reinitialize queues and interrupts to @@ -7145,7 +6563,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (tc) { netdev_set_num_tc(dev, tc); adapter->last_lfc_mode = adapter->hw.fc.current_mode; - adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -7153,7 +6570,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) adapter->hw.fc.requested_mode = ixgbe_fc_none; } else { netdev_reset_tc(dev); - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; @@ -7171,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +#endif /* CONFIG_IXGBE_DCB */ void ixgbe_do_reset(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -7182,59 +6599,52 @@ void ixgbe_do_reset(struct net_device *netdev) } static netdev_features_t ixgbe_fix_features(struct net_device *netdev, - netdev_features_t data) + netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); #ifdef CONFIG_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - data &= ~NETIF_F_HW_VLAN_RX; + features &= ~NETIF_F_HW_VLAN_RX; #endif /* return error if RXHASH is being enabled when RSS is not supported */ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - data &= ~NETIF_F_RXHASH; + features &= ~NETIF_F_RXHASH; /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(data & NETIF_F_RXCSUM)) - data &= ~NETIF_F_LRO; + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; - /* Turn off LRO if not RSC capable or invalid ITR settings */ - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { - data &= ~NETIF_F_LRO; - } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (adapter->rx_itr_setting != 1 && - adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) { - data &= ~NETIF_F_LRO; - e_info(probe, "rx-usecs set too low, not enabling RSC\n"); - } + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + - return data; + return features; } static int ixgbe_set_features(struct net_device *netdev, - netdev_features_t data) + netdev_features_t features) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; bool need_reset = false; - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ - if (!(data & NETIF_F_RXCSUM)) - adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - else - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - /* Make sure RSC matches LRO, reset if change */ - if (!!(data & NETIF_F_LRO) != - !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { - adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; - switch (adapter->hw.mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_82599EB: + if (!(features & NETIF_F_LRO)) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) need_reset = true; - break; - default: - break; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + if (adapter->rx_itr_setting == 1 || + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; + } else if ((changed ^ features) & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); } } @@ -7242,27 +6652,30 @@ static int ixgbe_set_features(struct net_device *netdev, * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ - if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { - /* turn off ATR, enable perfect filters and reset */ - if (data & NETIF_F_NTUPLE) { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + if (!(features & NETIF_F_NTUPLE)) { + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + /* turn off Flow Director, set ATR and reset */ + if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; need_reset = true; } - } else if (!(data & NETIF_F_NTUPLE)) { - /* turn off Flow Director, set ATR and reset */ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + /* turn off ATR, enable perfect filters and reset */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; need_reset = true; } + if (changed & NETIF_F_RXALL) + need_reset = true; + + netdev->features = features; if (need_reset) ixgbe_do_reset(netdev); return 0; - } static const struct net_device_ops ixgbe_netdev_ops = { @@ -7270,7 +6683,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, .ndo_select_queue = ixgbe_select_queue, - .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, @@ -7281,10 +6694,12 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, - .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, +#ifdef CONFIG_IXGBE_DCB .ndo_setup_tc = ixgbe_setup_tc, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -7302,7 +6717,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { }; static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) + const struct ixgbe_info *ii) { #ifdef CONFIG_PCI_IOV struct ixgbe_hw *hw = &adapter->hw; @@ -7489,6 +6904,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_crit(probe, "Fan has stopped, replace the adapter\n"); } + if (allow_unsupported_sfp) + hw->allow_unsupported_sfp = allow_unsupported_sfp; + /* reset_hw fills in the perm_addr as well */ hw->phy.reset_if_overtemp = true; err = hw->mac.ops.reset_hw(hw); @@ -7533,6 +6951,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, break; } + netdev->hw_features |= NETIF_F_RXALL; + netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; netdev->vlan_features |= NETIF_F_IP_CSUM; @@ -7540,6 +6960,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->vlan_features |= NETIF_F_SG; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | @@ -7577,7 +6998,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); err = -EIO; - goto err_eeprom; + goto err_sw_init; } memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); @@ -7586,11 +7007,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (ixgbe_validate_mac_addr(netdev->perm_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; - goto err_eeprom; + goto err_sw_init; } setup_timer(&adapter->service_timer, &ixgbe_service_timer, - (unsigned long) adapter); + (unsigned long) adapter); INIT_WORK(&adapter->service_task, ixgbe_service_task); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); @@ -7678,7 +7099,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* reset the hardware with the new settings */ err = hw->mac.ops.start_hw(hw); - if (err == IXGBE_ERR_EEPROM_VERSION) { /* We are running on a pre-production device, log a warning */ e_dev_warn("This device is a pre-production adapter/LOM. " @@ -7733,7 +7153,6 @@ err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: -err_eeprom: if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 3f725d48336..1f3e32b576a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index b239bdac38d..310bdd96107 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 7cf1e1f56c6..bf9f82f4b1a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -834,6 +834,7 @@ out: **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { + struct ixgbe_adapter *adapter = hw->back; s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; @@ -1068,9 +1069,16 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) if (hw->phy.type == ixgbe_phy_sfp_intel) { status = 0; } else { - hw_dbg(hw, "SFP+ module not supported\n"); - hw->phy.type = ixgbe_phy_sfp_unsupported; - status = IXGBE_ERR_SFP_NOT_SUPPORTED; + if (hw->allow_unsupported_sfp) { + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); + status = 0; + } else { + hw_dbg(hw, + "SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } } } else { status = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 197bdd13106..cc18165b4c0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index cf6812dd143..88a58cb0856 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -67,7 +67,8 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) vf_devfn = pdev->devfn + 0x80; pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); while (pvfdev) { - if (pvfdev->devfn == vf_devfn) + if (pvfdev->devfn == vf_devfn && + (pvfdev->bus->number >= pdev->bus->number)) vfs_found++; vf_devfn += 2; pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, @@ -257,7 +258,7 @@ static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter) list_for_each(pos, &adapter->vf_mvs.l) { entry = list_entry(pos, struct vf_macvlans, l); - if (entry->free == false) + if (!entry->free) hw->mac.ops.set_rar(hw, entry->rar_entry, entry->vf_macvlan, entry->vf, IXGBE_RAH_AV); @@ -646,6 +647,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); retval = ixgbe_set_vf_macvlan(adapter, vf, index, (unsigned char *)(&msgbuf[1])); + if (retval == -ENOSPC) + e_warn(drv, "VF %d has requested a MACVLAN filter " + "but there is no space for it\n", vf); break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index e8badab0335..2ab38d5fda9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 802bfa0f62c..8636e8344fc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -161,19 +161,19 @@ /* Receive DMA Registers */ #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ - (0x0D000 + ((_i - 64) * 0x40))) + (0x0D000 + (((_i) - 64) * 0x40))) #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ - (0x0D004 + ((_i - 64) * 0x40))) + (0x0D004 + (((_i) - 64) * 0x40))) #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ - (0x0D008 + ((_i - 64) * 0x40))) + (0x0D008 + (((_i) - 64) * 0x40))) #define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ - (0x0D010 + ((_i - 64) * 0x40))) + (0x0D010 + (((_i) - 64) * 0x40))) #define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ - (0x0D018 + ((_i - 64) * 0x40))) + (0x0D018 + (((_i) - 64) * 0x40))) #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ - (0x0D028 + ((_i - 64) * 0x40))) + (0x0D028 + (((_i) - 64) * 0x40))) #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ - (0x0D02C + ((_i - 64) * 0x40))) + (0x0D02C + (((_i) - 64) * 0x40))) #define IXGBE_RSCDBU 0x03028 #define IXGBE_RDDCC 0x02F20 #define IXGBE_RXMEMWRAP 0x03190 @@ -186,7 +186,7 @@ */ #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ - (0x0D014 + ((_i - 64) * 0x40)))) + (0x0D014 + (((_i) - 64) * 0x40)))) /* * Rx DCA Control Register: * 00-15 : 0x02200 + n*4 @@ -195,7 +195,7 @@ */ #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ - (0x0D00C + ((_i - 64) * 0x40)))) + (0x0D00C + (((_i) - 64) * 0x40)))) #define IXGBE_RDRXCTL 0x02F00 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) /* 8 of these 0x03C00 - 0x03C1C */ @@ -344,9 +344,9 @@ #define IXGBE_WUPL 0x05900 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ -#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ -#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host - * Filter Table */ +#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host + * Filter Table */ #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 @@ -1021,14 +1021,16 @@ #define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ #define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ #define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -1485,7 +1487,7 @@ enum { #define IXGBE_LED_BLINK_BASE 0x00000080 #define IXGBE_LED_MODE_MASK_BASE 0x0000000F #define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) -#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) #define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) #define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) #define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) @@ -2068,9 +2070,9 @@ enum { /* SR-IOV specific macros */ #define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) -#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) -#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) -#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) +#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, @@ -2726,6 +2728,8 @@ struct ixgbe_mac_operations { s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*disable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_buff)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); void (*release_swfw_sync)(struct ixgbe_hw *, u16); @@ -2892,6 +2896,7 @@ struct ixgbe_hw { u8 revision_id; bool adapter_stopped; bool force_full_reset; + bool allow_unsupported_sfp; }; struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 8cc5eccfd65..97a991403bb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2011 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -760,7 +760,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * This will be reversed when we stop the blinking. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - if (link_up == false) { + if (!link_up) { macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); @@ -847,6 +847,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .disable_rx_buff = &ixgbe_disable_rx_buff_generic, + .enable_rx_buff = &ixgbe_enable_rx_buff_generic, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index 1f35d229e71..4ce4c97ef5a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -1,7 +1,7 @@ ################################################################################ # # Intel 82599 Virtual Function driver -# Copyright(c) 1999 - 2010 Intel Corporation. +# Copyright(c) 1999 - 2012 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 2eb89cb94a0..947b5c83073 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index dc8e6511c64..2bfe0d1d795 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2009 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -56,7 +56,8 @@ struct ixgbe_stats { offsetof(struct ixgbevf_adapter, m), \ offsetof(struct ixgbevf_adapter, b), \ offsetof(struct ixgbevf_adapter, r) -static struct ixgbe_stats ixgbe_gstrings_stats[] = { + +static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, stats.saved_reset_vfgprc)}, {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, @@ -671,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev) return 0; } -static struct ethtool_ops ixgbevf_ethtool_ops = { +static const struct ethtool_ops ixgbevf_ethtool_ops = { .get_settings = ixgbevf_get_settings, .get_drvinfo = ixgbevf_get_drvinfo, .get_regs_len = ixgbevf_get_regs_len, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e6c9d1a927a..dfed420a1bf 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -279,12 +279,12 @@ enum ixgbevf_boards { board_X540_vf, }; -extern struct ixgbevf_info ixgbevf_82599_vf_info; -extern struct ixgbevf_info ixgbevf_X540_vf_info; -extern struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_82599_vf_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_info; +extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; /* needed by ethtool.c */ -extern char ixgbevf_driver_name[]; +extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; extern int ixgbevf_up(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 891162d1610..581c65976bb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -53,14 +53,14 @@ #include "ixgbevf.h" -char ixgbevf_driver_name[] = "ixgbevf"; +const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; #define DRV_VERSION "2.2.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2010 Intel Corporation."; + "Copyright (c) 2009 - 2012 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -917,32 +917,39 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) struct ixgbe_hw *hw = &adapter->hw; u32 eicr; u32 msg; + bool got_ack = false; eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); - if (!hw->mbx.ops.check_for_ack(hw)) { + if (!hw->mbx.ops.check_for_ack(hw)) + got_ack = true; + + if (!hw->mbx.ops.check_for_msg(hw)) { + hw->mbx.ops.read(hw, &msg, 1); + + if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 1)); + + if (msg & IXGBE_VT_MSGTYPE_NACK) + pr_warn("Last Request of type %2.2x to PF Nacked\n", + msg & 0xFF); /* - * checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it. Also - * avoid the read below because the code to read - * the mailbox will also clear the ack bit. This was - * causing lost acks. Just cache the bit and exit - * the IRQ handler. + * Restore the PFSTS bit in case someone is polling for a + * return message from the PF */ - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; - goto out; + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; } - /* Not an ack interrupt, go ahead and read the message */ - hw->mbx.ops.read(hw, &msg, 1); - - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); + /* + * checking for the ack clears the PFACK bit. Place + * it back in the v2p_mailbox cache so that anyone + * polling for an ack will not miss it + */ + if (got_ack) + hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; -out: return IRQ_HANDLED; } @@ -2192,13 +2199,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) if (err) { dev_info(&pdev->dev, "PF still in reset state, assigning new address\n"); - dev_hw_addr_random(adapter->netdev, hw->mac.addr); + eth_hw_addr_random(adapter->netdev); + memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr, + adapter->netdev->addr_len); } else { err = hw->mac.ops.init_hw(hw); if (err) { pr_err("init_shared_code failed: %d\n", err); goto out; } + memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, + adapter->netdev->addr_len); } /* Enable dynamic interrupt throttling rates */ @@ -2217,6 +2228,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; set_bit(__IXGBEVF_DOWN, &adapter->state); + return 0; out: return err; @@ -2514,12 +2526,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; rx_ring->rx_buffer_info = vzalloc(size); - if (!rx_ring->rx_buffer_info) { - hw_dbg(&adapter->hw, - "Unable to vmalloc buffer memory for " - "the receive descriptor ring\n"); + if (!rx_ring->rx_buffer_info) goto alloc_failed; - } /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); @@ -3391,6 +3399,17 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, /* setup the private structure */ err = ixgbevf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* The HW MAC address was set and/or determined in sw_init */ + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + pr_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | @@ -3415,16 +3434,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; - /* The HW MAC address was set and/or determined in sw_init */ - memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); - - if (!is_valid_ether_addr(netdev->dev_addr)) { - pr_err("invalid MAC address\n"); - err = -EIO; - goto err_sw_init; - } - init_timer(&adapter->watchdog_timer); adapter->watchdog_timer.function = ixgbevf_watchdog; adapter->watchdog_timer.data = (unsigned long)adapter; @@ -3453,13 +3462,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, ixgbevf_init_last_counter_stats(adapter); /* print the MAC address */ - hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", - netdev->dev_addr[0], - netdev->dev_addr[1], - netdev->dev_addr[2], - netdev->dev_addr[3], - netdev->dev_addr[4], - netdev->dev_addr[5]); + hw_dbg(hw, "%pM\n", netdev->dev_addr); hw_dbg(hw, "MAC: %d\n", hw->mac.type); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 930fa83f256..9c955900fe6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -26,6 +26,7 @@ *******************************************************************************/ #include "mbx.h" +#include "ixgbevf.h" /** * ixgbevf_poll_for_msg - Wait for message notification @@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) return 0; } -struct ixgbe_mbx_operations ixgbevf_mbx_ops = { +const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .init_params = ixgbevf_init_mbx_params_vf, .read = ixgbevf_read_mbx_vf, .write = ixgbevf_write_mbx_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 9d38a94a348..cf9131c5c11 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 5e4d5e5cdf3..debd8c0e1f2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 21533e30036..74be7411242 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -26,6 +26,7 @@ *******************************************************************************/ #include "vf.h" +#include "ixgbevf.h" /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx @@ -282,6 +283,17 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, return ret_val; } +static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, + u32 *msg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 retmsg[IXGBE_VFMAILBOX_SIZE]; + s32 retval = mbx->ops.write_posted(hw, msg, size); + + if (!retval) + mbx->ops.read_posted(hw, retmsg, size); +} + /** * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure @@ -293,7 +305,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) { struct netdev_hw_addr *ha; - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u16 *vector_list = (u16 *)&msgbuf[1]; u32 cnt, i; @@ -320,7 +331,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } - mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); return 0; } @@ -335,7 +346,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; msgbuf[0] = IXGBE_VF_SET_VLAN; @@ -343,7 +353,9 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - return mbx->ops.write_posted(hw, msgbuf, 2); + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); + + return 0; } /** @@ -401,7 +413,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, return 0; } -static struct ixgbe_mac_operations ixgbevf_mac_ops = { +static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .init_hw = ixgbevf_init_hw_vf, .reset_hw = ixgbevf_reset_hw_vf, .start_hw = ixgbevf_start_hw_vf, @@ -415,12 +427,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = { .set_vfta = ixgbevf_set_vfta_vf, }; -struct ixgbevf_info ixgbevf_82599_vf_info = { +const struct ixgbevf_info ixgbevf_82599_vf_info = { .mac = ixgbe_mac_82599_vf, .mac_ops = &ixgbevf_mac_ops, }; -struct ixgbevf_info ixgbevf_X540_vf_info = { +const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 10306b492ee..25c951daee5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -167,7 +167,7 @@ struct ixgbevf_hw_stats { struct ixgbevf_info { enum ixgbe_mac_type mac; - struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_mac_operations *mac_ops; }; #endif /* __IXGBE_VF_H__ */ |