From e81a1ba815666ec02ef5bf0e17cf256c88d233b3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 14 Nov 2010 17:04:33 +0000 Subject: drivers/net/ixgbe: Remove unnecessary semicolons Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 5428153af8f..93f40bcf683 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, * addresses */ for (i = 0; i < entries; i++) { - vfinfo->vf_mc_hashes[i] = hash_list[i];; + vfinfo->vf_mc_hashes[i] = hash_list[i]; } for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { -- cgit v1.2.3-70-g09d2 From 1a51502bddca7ac1e921d918b741ffd2bec149ed Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 Nov 2010 19:26:42 -0800 Subject: ixgbe: delay rx_ring freeing "cat /proc/net/dev" uses RCU protection only. Its quite possible we call a driver get_stats() method while device is dismantling and freeing its data structures. So get_stats() methods must be very careful not accessing driver private data without appropriate locking. In ixgbe case, we access rx_ring pointers. These pointers are freed in ixgbe_clear_interrupt_scheme() and set to NULL, this can trigger NULL dereference in ixgbe_get_stats64() A possible fix is to use RCU locking in ixgbe_get_stats64() and defer rx_ring freeing after a grace period in ixgbe_clear_interrupt_scheme() Signed-off-by: Eric Dumazet Reported-by: Tantilov, Emil S Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 1 + drivers/net/ixgbe/ixgbe_main.c | 34 ++++++++++++++++++++++++---------- 2 files changed, 25 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ed8703cfffb..018e143612b 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -192,6 +192,7 @@ struct ixgbe_ring { unsigned int size; /* length in bytes */ dma_addr_t dma; /* phys. address of descriptor ring */ + struct rcu_head rcu; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fbad4d81960..a137f9dbaac 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -4751,6 +4751,11 @@ err_set_interrupt: return err; } +static void ring_free_rcu(struct rcu_head *head) +{ + kfree(container_of(head, struct ixgbe_ring, rcu)); +} + /** * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings * @adapter: board private structure to clear interrupt scheme on @@ -4767,7 +4772,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) adapter->tx_ring[i] = NULL; } for (i = 0; i < adapter->num_rx_queues; i++) { - kfree(adapter->rx_ring[i]); + struct ixgbe_ring *ring = adapter->rx_ring[i]; + + /* ixgbe_get_stats64() might access this ring, we must wait + * a grace period before freeing it. + */ + call_rcu(&ring->rcu, ring_free_rcu); adapter->rx_ring[i] = NULL; } @@ -6563,20 +6573,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, /* accurate rx/tx bytes/packets stats */ dev_txq_stats_fold(netdev, stats); + rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; + struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; - do { - start = u64_stats_fetch_begin_bh(&ring->syncp); - packets = ring->stats.packets; - bytes = ring->stats.bytes; - } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); - stats->rx_packets += packets; - stats->rx_bytes += bytes; + if (ring) { + do { + start = u64_stats_fetch_begin_bh(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } } - + rcu_read_unlock(); /* following stats updated by ixgbe_watchdog_task() */ stats->multicast = netdev->stats.multicast; stats->rx_errors = netdev->stats.rx_errors; @@ -7282,6 +7295,7 @@ static void __exit ixgbe_exit_module(void) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); + rcu_barrier(); /* Wait for completion of call_rcu()'s */ } #ifdef CONFIG_IXGBE_DCA -- cgit v1.2.3-70-g09d2 From 16b61beb39f2446460f93c08d4d263dc24f22dd8 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Nov 2010 19:26:44 -0800 Subject: ixgbe: DCB set PFC high and low water marks per data sheet specs Currently the high and low water marks for PFC are being set conservatively for jumbo frames. This means the RX buffers are being underutilized in the default 1500 MTU. This patch fixes this so that the water marks are set as described in the data sheet considering the MTU size. The equation used is, RTT * 1.44 + MTU * 1.44 + MTU Where RTT is the round trip time and MTU is the max frame size in KB. To avoid floating point arithmetic FC_HIGH_WATER is defined ((((RTT + MTU) * 144) + 99) / 100) + MTU This changes how the hardware field fc.low_water and fc.high_water are used. With this change they are no longer storing the actual low water and high water markers but are storing the required head room in the buffer. This simplifies the logic and we do not need to account for the size of the buffer when setting the thresholds. Testing with iperf and 16 threads showed a slight uptick in throughput over a single traffic class .1-.2Gbps and a reduction in pause frames. Without the patch a 30 second run would show ~10-15 pause frames being transmitted with the patch ~2-5 are seen. Test were run back to back with 82599. Note RXPBSIZE is in KB and low and high water marks fields are also in KB. However the FCRT* registers are 32B granularity and right shifted 5 into the register, (((rx_pbsize - water_mark) * 1024) / 32) << 5 is the most explicit conversion here we simplify (rx_pbsize - water_mark) * 32 << 5 = (rx_pbsize - water_mark) << 10 This patch updates the PFC thresholds and legacy FC thresholds. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 2 -- drivers/net/ixgbe/ixgbe_82598.c | 21 ++++++++++-------- drivers/net/ixgbe/ixgbe_common.c | 43 +++++++++++-------------------------- drivers/net/ixgbe/ixgbe_dcb_82598.c | 12 ++++------- drivers/net/ixgbe/ixgbe_dcb_82599.c | 12 +++++------ drivers/net/ixgbe/ixgbe_main.c | 9 ++++++-- drivers/net/ixgbe/ixgbe_type.h | 8 +++++++ 7 files changed, 48 insertions(+), 59 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 018e143612b..4f98486d8c2 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -61,10 +61,8 @@ #define IXGBE_MIN_RXD 64 /* flow control */ -#define IXGBE_DEFAULT_FCRTL 0x10000 #define IXGBE_MIN_FCRTL 0x40 #define IXGBE_MAX_FCRTL 0x7FF80 -#define IXGBE_DEFAULT_FCRTH 0x20000 #define IXGBE_MIN_FCRTH 0x600 #define IXGBE_MAX_FCRTH 0x7FFF0 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 9c02d6014cc..25b20f93190 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -357,6 +357,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) u32 fctrl_reg; u32 rmcs_reg; u32 reg; + u32 rx_pba_size; u32 link_speed = 0; bool link_up; @@ -459,16 +460,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) /* Set up and enable Rx high/low water mark thresholds, enable XON. */ if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - if (hw->fc.send_xon) { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - (hw->fc.low_water | IXGBE_FCRTL_XONE)); - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - hw->fc.low_water); - } + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + + reg = (rx_pba_size - hw->fc.low_water) << 6; + if (hw->fc.send_xon) + reg |= IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); + + reg = (rx_pba_size - hw->fc.high_water) << 10; + reg |= IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), - (hw->fc.high_water | IXGBE_FCRTH_FCEN)); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); } /* Configure pause time (2 TCs per register) */ diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index e3eca131638..62aa2be199f 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1595,6 +1595,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) u32 mflcn_reg, fccfg_reg; u32 reg; u32 rx_pba_size; + u32 fcrtl, fcrth; #ifdef CONFIG_DCB if (hw->fc.requested_mode == ixgbe_fc_pfc) @@ -1671,41 +1672,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); - reg = IXGBE_READ_REG(hw, IXGBE_MTQC); - /* Thresholds are different for link flow control when in DCB mode */ - if (reg & IXGBE_MTQC_RT_ENA) { - rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; - /* Always disable XON for LFC when in DCB mode */ - reg = (rx_pba_size >> 5) & 0xFFE0; - IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg); + fcrth = (rx_pba_size - hw->fc.high_water) << 10; + fcrtl = (rx_pba_size - hw->fc.low_water) << 10; - reg = (rx_pba_size >> 2) & 0xFFE0; - if (hw->fc.current_mode & ixgbe_fc_tx_pause) - reg |= IXGBE_FCRTH_FCEN; - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); - } else { - /* - * Set up and enable Rx high/low water mark thresholds, - * enable XON. - */ - if (hw->fc.current_mode & ixgbe_fc_tx_pause) { - if (hw->fc.send_xon) { - IXGBE_WRITE_REG(hw, - IXGBE_FCRTL_82599(packetbuf_num), - (hw->fc.low_water | - IXGBE_FCRTL_XONE)); - } else { - IXGBE_WRITE_REG(hw, - IXGBE_FCRTL_82599(packetbuf_num), - hw->fc.low_water); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), - (hw->fc.high_water | IXGBE_FCRTH_FCEN)); - } + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + fcrth |= IXGBE_FCRTH_FCEN; + if (hw->fc.send_xon) + fcrtl |= IXGBE_FCRTL_XONE; } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); + /* Configure pause time (2 TCs per register) */ reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); if ((packetbuf_num & 1) == 0) diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 50288bcadc5..9a5e89c12e0 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c @@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, * for each traffic class. */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (dcb_config->rx_pba_cfg == pba_equal) { - rx_pba_size = IXGBE_RXPBSIZE_64KB; - } else { - rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB - : IXGBE_RXPBSIZE_48KB; - } + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + reg = (rx_pba_size - hw->fc.low_water) << 10; - reg = ((rx_pba_size >> 5) & 0xFFF0); if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) reg |= IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); - reg = ((rx_pba_size >> 2) & 0xFFF0); + reg = (rx_pba_size - hw->fc.high_water) << 10; if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) reg |= IXGBE_FCRTH_FCEN; diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 05f22471507..374e1f74d0f 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (dcb_config->rx_pba_cfg == pba_equal) - rx_pba_size = IXGBE_RXPBSIZE_64KB; - else - rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB - : IXGBE_RXPBSIZE_48KB; + rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; + + reg = (rx_pba_size - hw->fc.low_water) << 10; - reg = ((rx_pba_size >> 5) & 0xFFE0); if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) reg |= IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); - reg = ((rx_pba_size >> 2) & 0xFFE0); + reg = (rx_pba_size - hw->fc.high_water) << 10; if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) reg |= IXGBE_FCRTH_FCEN; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a137f9dbaac..f374207e14b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -4854,6 +4854,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) int j; struct tc_configuration *tc; #endif + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; /* PCI config space info */ @@ -4930,8 +4931,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_DCB adapter->last_lfc_mode = hw->fc.current_mode; #endif - hw->fc.high_water = IXGBE_DEFAULT_FCRTH; - hw->fc.low_water = IXGBE_DEFAULT_FCRTL; + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; hw->fc.disable_fc_autoneg = false; @@ -5193,6 +5194,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* MTU < 68 is an error and causes problems on some kernels */ @@ -5203,6 +5205,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; + hw->fc.high_water = FC_HIGH_WATER(max_frame); + hw->fc.low_water = FC_LOW_WATER(max_frame); + if (netif_running(netdev)) ixgbe_reinit_locked(adapter); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index d3cc6ce7c97..96dea7731e6 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2113,6 +2113,14 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 #define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 +/* Flow Control Macros */ +#define PAUSE_RTT 8 +#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024) + +#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\ + PAUSE_MTU(MTU)) +#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) + /* Software ATR hash keys */ #define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 -- cgit v1.2.3-70-g09d2 From 80ab193dce048e7b7afa43c99e69f508167e29ab Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Nov 2010 19:26:45 -0800 Subject: ixgbe: DCB: credit max only needs to be gt TSO size for 82598 The maximum credits per traffic class only needs to be greater then the TSO size for 82598 devices. The 82599 devices do not have this requirement so only do this test for 82598 devices. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb.c | 6 ++++-- drivers/net/ixgbe/ixgbe_dcb.h | 3 ++- drivers/net/ixgbe/ixgbe_main.c | 4 ++-- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index 0d44c6470ca..4f2f0ae6735 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -42,7 +42,8 @@ * It should be called only after the rules are checked by * ixgbe_dcb_check_config(). */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, int max_frame, u8 direction) { struct tc_bw_alloc *p; @@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ - if (credit_max && + if ((hw->mac.type == ixgbe_mac_82598EB) && + credit_max && (credit_max < MINIMUM_CREDIT_FOR_TSO)) credit_max = MINIMUM_CREDIT_FOR_TSO; diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 0208a87b129..1cfe38ee164 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h @@ -150,7 +150,8 @@ struct ixgbe_dcb_config { /* DCB driver APIs */ /* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, + struct ixgbe_dcb_config *, int, u8); /* DCB hw initialization */ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f374207e14b..45d988741fe 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3366,9 +3366,9 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif - ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_RX_CONFIG); /* reconfigure the hardware */ -- cgit v1.2.3-70-g09d2 From 4c0ec6544a0cd5e3eed08df2c14cf98185098abe Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:46 -0800 Subject: ixgbe: remove unnecessary re-init of adapter on Rx-csum change There is no need to reset the adapter when changing the Rx checksum settings. Since the only change is a software flag we can disable it without needing to reset the entire adapter. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 3dc731c22ff..81fa1ac1c9b 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -412,11 +412,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) else adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - if (netif_running(netdev)) - ixgbe_reinit_locked(adapter); - else - ixgbe_reset(adapter); - return 0; } -- cgit v1.2.3-70-g09d2 From 8ad494b0e59950e2b4e587c32cb67a2452795ea0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:47 -0800 Subject: ixgbe: move GSO segments and byte count processing into ixgbe_tx_map This change simplifies the work being done by the TX interrupt handler and pushes it into the tx_map call. This allows for fewer cache misses since the TX cleanup now accesses almost none of the skb members. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 4 ++- drivers/net/ixgbe/ixgbe_main.c | 57 ++++++++++++++++++------------------------ 2 files changed, 28 insertions(+), 33 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 4f98486d8c2..93946b683ad 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -128,7 +128,9 @@ struct ixgbe_tx_buffer { unsigned long time_stamp; u16 length; u16 next_to_watch; - u16 mapped_as_page; + unsigned int bytecount; + u16 gso_segs; + u8 mapped_as_page; }; struct ixgbe_rx_buffer { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 45d988741fe..480f0b0f038 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -749,45 +749,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, bool cleaned = false; rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { - struct sk_buff *skb; tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; + + tx_desc->wb.status = 0; cleaned = (i == eop); - skb = tx_buffer_info->skb; - if (cleaned && skb) { - unsigned int segs, bytecount; - unsigned int hlen = skb_headlen(skb); + i++; + if (i == tx_ring->count) + i = 0; - /* gso_segs is currently only valid for tcp */ - segs = skb_shinfo(skb)->gso_segs ?: 1; -#ifdef IXGBE_FCOE - /* adjust for FCoE Sequence Offload */ - if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - && skb_is_gso(skb) - && vlan_get_protocol(skb) == - htons(ETH_P_FCOE)) { - hlen = skb_transport_offset(skb) + - sizeof(struct fc_frame_header) + - sizeof(struct fcoe_crc_eof); - segs = DIV_ROUND_UP(skb->len - hlen, - skb_shinfo(skb)->gso_size); - } -#endif /* IXGBE_FCOE */ - /* multiply data chunks by size of headers */ - bytecount = ((segs - 1) * hlen) + skb->len; - total_packets += segs; - total_bytes += bytecount; + if (cleaned && tx_buffer_info->skb) { + total_bytes += tx_buffer_info->bytecount; + total_packets += tx_buffer_info->gso_segs; } ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); - - tx_desc->wb.status = 0; - - i++; - if (i == tx_ring->count) - i = 0; } eop = tx_ring->tx_buffer_info[i].next_to_watch; @@ -6015,7 +5993,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, - unsigned int first) + unsigned int first, const u8 hdr_len) { struct pci_dev *pdev = adapter->pdev; struct ixgbe_tx_buffer *tx_buffer_info; @@ -6024,6 +6002,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, unsigned int offset = 0, size, count = 0, i; unsigned int nr_frags = skb_shinfo(skb)->nr_frags; unsigned int f; + unsigned int bytecount = skb->len; + u16 gso_segs = 1; i = tx_ring->next_to_use; @@ -6093,6 +6073,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, break; } + if (tx_flags & IXGBE_TX_FLAGS_TSO) + gso_segs = skb_shinfo(skb)->gso_segs; +#ifdef IXGBE_FCOE + /* adjust for FCoE Sequence Offload */ + else if (tx_flags & IXGBE_TX_FLAGS_FSO) + gso_segs = DIV_ROUND_UP(skb->len - hdr_len, + skb_shinfo(skb)->gso_size); +#endif /* IXGBE_FCOE */ + bytecount += (gso_segs - 1) * hdr_len; + + /* multiply data chunks by size of headers */ + tx_ring->tx_buffer_info[i].bytecount = bytecount; + tx_ring->tx_buffer_info[i].gso_segs = gso_segs; tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[first].next_to_watch = i; @@ -6402,7 +6395,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev tx_flags |= IXGBE_TX_FLAGS_CSUM; } - count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); + count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); if (count) { /* add the ATR filter if ATR is on */ if (tx_ring->atr_sample_rate) { -- cgit v1.2.3-70-g09d2 From d5f398ed73522b9f76861af6553775c5851de0d0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:48 -0800 Subject: ixgbe: cleanup ixgbe_alloc_rx_buffers This change re-orders alloc_rx_buffers to make better use of the packet split enabled flag. The new setup should require less branching in the code since now we are down to fewer if statements since we either are handling packet split or aren't. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 2 +- drivers/net/ixgbe/ixgbe_main.c | 81 ++++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 40 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 93946b683ad..149cf26b254 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -472,7 +472,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, struct ixgbe_tx_buffer *); extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, - int cleaned_count); + u16 cleaned_count); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 480f0b0f038..e838479d2d9 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1010,63 +1010,70 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, **/ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, - int cleaned_count) + u16 cleaned_count) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; - unsigned int i; - unsigned int bufsz = rx_ring->rx_buf_len; - - i = rx_ring->next_to_use; - bi = &rx_ring->rx_buffer_info[i]; + struct sk_buff *skb; + u16 i = rx_ring->next_to_use; while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + skb = bi->skb; - if (!bi->page_dma && - (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { - if (!bi->page) { - bi->page = netdev_alloc_page(netdev); - if (!bi->page) { - adapter->alloc_rx_page_failed++; - goto no_buffers; - } - bi->page_offset = 0; - } else { - /* use a half page if we're re-using */ - bi->page_offset ^= (PAGE_SIZE / 2); - } - - bi->page_dma = dma_map_page(&pdev->dev, bi->page, - bi->page_offset, - (PAGE_SIZE / 2), - DMA_FROM_DEVICE); - } - - if (!bi->skb) { - struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, - bufsz); - bi->skb = skb; - + if (!skb) { + skb = netdev_alloc_skb_ip_align(adapter->netdev, + rx_ring->rx_buf_len); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } /* initialize queue mapping */ skb_record_rx_queue(skb, rx_ring->queue_index); + bi->skb = skb; } if (!bi->dma) { bi->dma = dma_map_single(&pdev->dev, - bi->skb->data, + skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, bi->dma)) { + adapter->alloc_rx_buff_failed++; + bi->dma = 0; + goto no_buffers; + } } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ + if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (!bi->page) { + bi->page = netdev_alloc_page(adapter->netdev); + if (!bi->page) { + adapter->alloc_rx_page_failed++; + goto no_buffers; + } + } + + if (!bi->page_dma) { + /* use a half page if we're re-using */ + bi->page_offset ^= PAGE_SIZE / 2; + bi->page_dma = dma_map_page(&pdev->dev, + bi->page, + bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + bi->page_dma)) { + adapter->alloc_rx_page_failed++; + bi->page_dma = 0; + goto no_buffers; + } + } + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. */ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); } else { @@ -1077,15 +1084,11 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, i++; if (i == rx_ring->count) i = 0; - bi = &rx_ring->rx_buffer_info[i]; } no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; - if (i-- == 0) - i = (rx_ring->count - 1); - ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); } } -- cgit v1.2.3-70-g09d2 From 84ea2591e4a24775c2735511a1cc3cf88edd249d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:49 -0800 Subject: ixgbe: drop ring->head, make ring->tail a pointer instead of offset This change drops ring->head since it is not used in any hot-path and can easily be determined using IXGBE_[RT]DH(ring->reg_idx). It also changes ring->tail into a true pointer so we can avoid unnecessary pointer math to find the location of the tail. In addition I also dropped the setting of head and tail in ixgbe_clean_[rx|tx]_ring. The only location that should be setting the head and tail values is ixgbe_configure_[rx|tx]_ring and that is only while the queue is disabled. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 3 +-- drivers/net/ixgbe/ixgbe_main.c | 35 ++++++++++------------------------- 2 files changed, 11 insertions(+), 27 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 149cf26b254..c993fc3ab8a 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -163,8 +163,7 @@ struct ixgbe_ring { #define IXGBE_RING_RX_PS_ENABLED (u8)(1) u8 flags; /* per ring feature flags */ - u16 head; - u16 tail; + u8 __iomem *tail; unsigned int total_bytes; unsigned int total_packets; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e838479d2d9..8f2afaa35dd 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -704,8 +704,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, " time_stamp <%lx>\n" " jiffies <%lx>\n", tx_ring->queue_index, - IXGBE_READ_REG(hw, tx_ring->head), - IXGBE_READ_REG(hw, tx_ring->tail), + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), tx_ring->next_to_use, eop, tx_ring->tx_buffer_info[eop].time_stamp, jiffies); return true; @@ -991,8 +991,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, skb->ip_summed = CHECKSUM_UNNECESSARY; } -static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, - struct ixgbe_ring *rx_ring, u32 val) +static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) { /* * Force memory writes to complete before letting h/w @@ -1001,7 +1000,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, * such as IA-64). */ wmb(); - IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); + writel(val, rx_ring->tail); } /** @@ -1089,7 +1088,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; - ixgbe_release_rx_desc(&adapter->hw, rx_ring, i); + ixgbe_release_rx_desc(rx_ring, i); } } @@ -2465,8 +2464,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ring->count * sizeof(union ixgbe_adv_tx_desc)); IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); - ring->head = IXGBE_TDH(reg_idx); - ring->tail = IXGBE_TDT(reg_idx); + ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx); /* configure fetching thresholds */ if (adapter->rx_itr_setting == 0) { @@ -2791,8 +2789,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ring->count * sizeof(union ixgbe_adv_rx_desc)); IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); - ring->head = IXGBE_RDH(reg_idx); - ring->tail = IXGBE_RDT(reg_idx); + ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx); ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); @@ -3730,11 +3727,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - - if (rx_ring->head) - writel(0, adapter->hw.hw_addr + rx_ring->head); - if (rx_ring->tail) - writel(0, adapter->hw.hw_addr + rx_ring->tail); } /** @@ -3767,11 +3759,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - - if (tx_ring->head) - writel(0, adapter->hw.hw_addr + tx_ring->head); - if (tx_ring->tail) - writel(0, adapter->hw.hw_addr + tx_ring->tail); } /** @@ -6116,8 +6103,7 @@ dma_error: return 0; } -static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, +static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, int tx_flags, int count, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; @@ -6182,7 +6168,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, wmb(); tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); + writel(i, tx_ring->tail); } static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, @@ -6414,8 +6400,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); txq->tx_bytes += skb->len; txq->tx_packets++; - ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, - hdr_len); + ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); } else { -- cgit v1.2.3-70-g09d2 From b6ec895ecd32c0070c3b2b17918c030275cd834d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:49 -0800 Subject: ixgbe: move device pointer into the ring structure This change is meant to simplify DMA map/unmap by providing a device pointer. As a result the adapter pointer can be dropped from many of the calls. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 11 +-- drivers/net/ixgbe/ixgbe_ethtool.c | 32 ++++---- drivers/net/ixgbe/ixgbe_main.c | 157 ++++++++++++++++++-------------------- 3 files changed, 93 insertions(+), 107 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index c993fc3ab8a..70ccab07465 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -148,6 +148,7 @@ struct ixgbe_queue_stats { struct ixgbe_ring { void *desc; /* descriptor ring memory */ + struct device *dev; /* device for DMA mapping */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; @@ -454,10 +455,10 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter); extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); extern void ixgbe_reset(struct ixgbe_adapter *adapter); extern void ixgbe_set_ethtool_ops(struct net_device *netdev); -extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); -extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); -extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); -extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); +extern int ixgbe_setup_rx_resources(struct ixgbe_ring *); +extern int ixgbe_setup_tx_resources(struct ixgbe_ring *); +extern void ixgbe_free_rx_resources(struct ixgbe_ring *); +extern void ixgbe_free_tx_resources(struct ixgbe_ring *); extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); @@ -467,7 +468,7 @@ extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct net_device *, struct ixgbe_adapter *, struct ixgbe_ring *); -extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, +extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, struct ixgbe_tx_buffer *); extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 81fa1ac1c9b..cc7804962b2 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -900,13 +900,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev, memcpy(&temp_tx_ring[i], adapter->tx_ring[i], sizeof(struct ixgbe_ring)); temp_tx_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(adapter, - &temp_tx_ring[i]); + err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); if (err) { while (i) { i--; - ixgbe_free_tx_resources(adapter, - &temp_tx_ring[i]); + ixgbe_free_tx_resources(&temp_tx_ring[i]); } goto clear_reset; } @@ -925,13 +923,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev, memcpy(&temp_rx_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); temp_rx_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(adapter, - &temp_rx_ring[i]); + err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); if (err) { while (i) { i--; - ixgbe_free_rx_resources(adapter, - &temp_rx_ring[i]); + ixgbe_free_rx_resources(&temp_rx_ring[i]); } goto err_setup; } @@ -946,8 +942,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, /* tx */ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbe_free_tx_resources(adapter, - adapter->tx_ring[i]); + ixgbe_free_tx_resources(adapter->tx_ring[i]); memcpy(adapter->tx_ring[i], &temp_tx_ring[i], sizeof(struct ixgbe_ring)); } @@ -957,8 +952,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, /* rx */ if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbe_free_rx_resources(adapter, - adapter->rx_ring[i]); + ixgbe_free_rx_resources(adapter->rx_ring[i]); memcpy(adapter->rx_ring[i], &temp_rx_ring[i], sizeof(struct ixgbe_ring)); } @@ -1463,8 +1457,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ixgbe_reset(adapter); - ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); - ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); + ixgbe_free_tx_resources(&adapter->test_tx_ring); + ixgbe_free_rx_resources(&adapter->test_rx_ring); } static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) @@ -1478,10 +1472,11 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ tx_ring->count = IXGBE_DEFAULT_TXD; tx_ring->queue_index = 0; + tx_ring->dev = &adapter->pdev->dev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; tx_ring->numa_node = adapter->node; - err = ixgbe_setup_tx_resources(adapter, tx_ring); + err = ixgbe_setup_tx_resources(tx_ring); if (err) return 1; @@ -1496,11 +1491,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) /* Setup Rx Descriptor ring and Rx buffers */ rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring->queue_index = 0; + rx_ring->dev = &adapter->pdev->dev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; rx_ring->numa_node = adapter->node; - err = ixgbe_setup_rx_resources(adapter, rx_ring); + err = ixgbe_setup_rx_resources(rx_ring); if (err) { ret_val = 4; goto err_nomem; @@ -1622,7 +1618,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ - dma_unmap_single(&adapter->pdev->dev, + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, bufsz, DMA_FROM_DEVICE); @@ -1634,7 +1630,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, /* unmap buffer on Tx side */ tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; - ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); /* increment Rx/Tx next to clean counters */ rx_ntc++; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 8f2afaa35dd..be76dd9b94a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -600,18 +600,17 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, } } -void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, - struct ixgbe_tx_buffer - *tx_buffer_info) +void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *tx_buffer_info) { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, + dma_unmap_page(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); else - dma_unmap_single(&adapter->pdev->dev, + dma_unmap_single(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); @@ -764,7 +763,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer_info->gso_segs; } - ixgbe_unmap_and_free_tx_resource(adapter, + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } @@ -1011,7 +1010,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, u16 cleaned_count) { - struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; struct sk_buff *skb; @@ -1035,11 +1033,11 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, } if (!bi->dma) { - bi->dma = dma_map_single(&pdev->dev, + bi->dma = dma_map_single(rx_ring->dev, skb->data, rx_ring->rx_buf_len, DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, bi->dma)) { + if (dma_mapping_error(rx_ring->dev, bi->dma)) { adapter->alloc_rx_buff_failed++; bi->dma = 0; goto no_buffers; @@ -1058,12 +1056,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (!bi->page_dma) { /* use a half page if we're re-using */ bi->page_offset ^= PAGE_SIZE / 2; - bi->page_dma = dma_map_page(&pdev->dev, + bi->page_dma = dma_map_page(rx_ring->dev, bi->page, bi->page_offset, PAGE_SIZE / 2, DMA_FROM_DEVICE); - if (dma_mapping_error(&pdev->dev, + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { adapter->alloc_rx_page_failed++; bi->page_dma = 0; @@ -1151,7 +1149,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, int *work_done, int work_to_do) { struct ixgbe_adapter *adapter = q_vector->adapter; - struct pci_dev *pdev = adapter->pdev; union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; @@ -1208,7 +1205,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, IXGBE_RSC_CB(skb)->delay_unmap = true; IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; } else { - dma_unmap_single(&pdev->dev, + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); @@ -1218,8 +1215,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, } if (upper_len) { - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); + dma_unmap_page(rx_ring->dev, + rx_buffer_info->page_dma, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, rx_buffer_info->page, @@ -1262,7 +1261,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (IXGBE_RSC_CB(skb)->delay_unmap) { - dma_unmap_single(&pdev->dev, + dma_unmap_single(rx_ring->dev, IXGBE_RSC_CB(skb)->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); @@ -3665,15 +3664,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue - * @adapter: board private structure * @rx_ring: ring to free buffers from **/ -static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) +static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; + struct device *dev = rx_ring->dev; unsigned long size; - unsigned int i; + u16 i; /* ring already cleared, nothing to do */ if (!rx_ring->rx_buffer_info) @@ -3685,7 +3682,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info = &rx_ring->rx_buffer_info[i]; if (rx_buffer_info->dma) { - dma_unmap_single(&pdev->dev, rx_buffer_info->dma, + dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); rx_buffer_info->dma = 0; @@ -3696,7 +3693,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, do { struct sk_buff *this = skb; if (IXGBE_RSC_CB(this)->delay_unmap) { - dma_unmap_single(&pdev->dev, + dma_unmap_single(dev, IXGBE_RSC_CB(this)->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); @@ -3710,7 +3707,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, if (!rx_buffer_info->page) continue; if (rx_buffer_info->page_dma) { - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, + dma_unmap_page(dev, rx_buffer_info->page_dma, PAGE_SIZE / 2, DMA_FROM_DEVICE); rx_buffer_info->page_dma = 0; } @@ -3731,15 +3728,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, /** * ixgbe_clean_tx_ring - Free Tx Buffers - * @adapter: board private structure * @tx_ring: ring to be cleaned **/ -static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) +static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) { struct ixgbe_tx_buffer *tx_buffer_info; unsigned long size; - unsigned int i; + u16 i; /* ring already cleared, nothing to do */ if (!tx_ring->tx_buffer_info) @@ -3748,7 +3743,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, /* Free all the Tx ring sk_buffs */ for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; @@ -3770,7 +3765,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) int i; for (i = 0; i < adapter->num_rx_queues; i++) - ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); + ixgbe_clean_rx_ring(adapter->rx_ring[i]); } /** @@ -3782,7 +3777,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) int i; for (i = 0; i < adapter->num_tx_queues; i++) - ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); + ixgbe_clean_tx_ring(adapter->tx_ring[i]); } void ixgbe_down(struct ixgbe_adapter *adapter) @@ -4440,6 +4435,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) { int i; + int rx_count; int orig_node = adapter->node; for (i = 0; i < adapter->num_tx_queues; i++) { @@ -4458,6 +4454,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) goto err_tx_ring_allocation; ring->count = adapter->tx_ring_count; ring->queue_index = i; + ring->dev = &adapter->pdev->dev; ring->numa_node = adapter->node; adapter->tx_ring[i] = ring; @@ -4466,6 +4463,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) /* Restore the adapter's original node */ adapter->node = orig_node; + rx_count = adapter->rx_ring_count; for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; if (orig_node == -1) { @@ -4480,8 +4478,9 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); if (!ring) goto err_rx_ring_allocation; - ring->count = adapter->rx_ring_count; + ring->count = rx_count; ring->queue_index = i; + ring->dev = &adapter->pdev->dev; ring->numa_node = adapter->node; adapter->rx_ring[i] = ring; @@ -4938,15 +4937,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /** * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) - * @adapter: board private structure * @tx_ring: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ -int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) +int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { - struct pci_dev *pdev = adapter->pdev; + struct device *dev = tx_ring->dev; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; @@ -4961,7 +4958,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) goto err; @@ -4974,7 +4971,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, err: vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } @@ -4993,7 +4990,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) int i, err = 0; for (i = 0; i < adapter->num_tx_queues; i++) { - err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Tx Queue %u failed\n", i); @@ -5005,48 +5002,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) /** * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) - * @adapter: board private structure * @rx_ring: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ -int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) +int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; + struct device *dev = rx_ring->dev; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); + rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vmalloc(size); - if (!rx_ring->rx_buffer_info) { - e_err(probe, "vmalloc allocation failed for the Rx " - "descriptor ring\n"); - goto alloc_failed; - } + if (!rx_ring->rx_buffer_info) + goto err; memset(rx_ring->rx_buffer_info, 0, size); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->desc) { - e_err(probe, "Memory allocation failed for the Rx " - "descriptor ring\n"); - vfree(rx_ring->rx_buffer_info); - goto alloc_failed; - } + if (!rx_ring->desc) + goto err; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return 0; - -alloc_failed: +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } @@ -5060,13 +5050,12 @@ alloc_failed: * * Return 0 on success, negative on failure **/ - static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) { int i, err = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; e_err(probe, "Allocation for Rx Queue %u failed\n", i); @@ -5078,23 +5067,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) /** * ixgbe_free_tx_resources - Free Tx Resources per Queue - * @adapter: board private structure * @tx_ring: Tx descriptor ring for a specific queue * * Free all transmit software resources **/ -void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) +void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) { - struct pci_dev *pdev = adapter->pdev; - - ixgbe_clean_tx_ring(adapter, tx_ring); + ixgbe_clean_tx_ring(tx_ring); vfree(tx_ring->tx_buffer_info); tx_ring->tx_buffer_info = NULL; - dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, - tx_ring->dma); + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); tx_ring->desc = NULL; } @@ -5111,28 +5100,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i]->desc) - ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); + ixgbe_free_tx_resources(adapter->tx_ring[i]); } /** * ixgbe_free_rx_resources - Free Rx Resources - * @adapter: board private structure * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ -void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) +void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) { - struct pci_dev *pdev = adapter->pdev; - - ixgbe_clean_rx_ring(adapter, rx_ring); + ixgbe_clean_rx_ring(rx_ring); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, - rx_ring->dma); + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, + rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } @@ -5149,7 +5138,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) - ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); + ixgbe_free_rx_resources(adapter->rx_ring[i]); } /** @@ -5985,7 +5974,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, struct sk_buff *skb, u32 tx_flags, unsigned int first, const u8 hdr_len) { - struct pci_dev *pdev = adapter->pdev; + struct device *dev = tx_ring->dev; struct ixgbe_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; @@ -6008,10 +5997,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(&pdev->dev, + tx_buffer_info->dma = dma_map_single(dev, skb->data + offset, size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + if (dma_mapping_error(dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -6044,12 +6033,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter, size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); tx_buffer_info->length = size; - tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, + tx_buffer_info->dma = dma_map_page(dev, frag->page, offset, size, DMA_TO_DEVICE); tx_buffer_info->mapped_as_page = true; - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + if (dma_mapping_error(dev, tx_buffer_info->dma)) goto dma_error; tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; @@ -6097,7 +6086,7 @@ dma_error: i += tx_ring->count; i--; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); + ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } return 0; -- cgit v1.2.3-70-g09d2 From 5b7da51547cc3ab5461e45a8ee0ca73051416fda Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:50 -0800 Subject: ixgbe: combine some stats into a union to allow for Tx/Rx stats overlap This change moved some of the RX and TX stats into separate structures and them placed those structures in a union in order to help reduce the size of the ring structure. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 24 +++++++++++---- drivers/net/ixgbe/ixgbe_main.c | 68 ++++++++++++++++++++++++++++-------------- 2 files changed, 63 insertions(+), 29 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 70ccab07465..3c63ee6be2e 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -146,6 +146,19 @@ struct ixgbe_queue_stats { u64 bytes; }; +struct ixgbe_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + +struct ixgbe_rx_queue_stats { + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; +}; + struct ixgbe_ring { void *desc; /* descriptor ring memory */ struct device *dev; /* device for DMA mapping */ @@ -183,13 +196,12 @@ struct ixgbe_ring { struct ixgbe_queue_stats stats; struct u64_stats_sync syncp; - int numa_node; + union { + struct ixgbe_tx_queue_stats tx_stats; + struct ixgbe_rx_queue_stats rx_stats; + }; unsigned long reinit_state; - u64 rsc_count; /* stat for coalesced packets */ - u64 rsc_flush; /* stats for flushed packets */ - u32 restart_queue; /* track tx queue restarts */ - u32 non_eop_descs; /* track hardware descriptor chaining */ - + int numa_node; unsigned int size; /* length in bytes */ dma_addr_t dma; /* phys. address of descriptor ring */ struct rcu_head rcu; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index be76dd9b94a..a47e0909816 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -783,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !test_bit(__IXGBE_DOWN, &adapter->state)) { netif_wake_subqueue(netdev, tx_ring->queue_index); - ++tx_ring->restart_queue; + ++tx_ring->tx_stats.restart_queue; } } @@ -1024,7 +1024,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, skb = netdev_alloc_skb_ip_align(adapter->netdev, rx_ring->rx_buf_len); if (!skb) { - adapter->alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_rx_buff_failed++; goto no_buffers; } /* initialize queue mapping */ @@ -1038,7 +1038,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, rx_ring->rx_buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, bi->dma)) { - adapter->alloc_rx_buff_failed++; + rx_ring->rx_stats.alloc_rx_buff_failed++; bi->dma = 0; goto no_buffers; } @@ -1048,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (!bi->page) { bi->page = netdev_alloc_page(adapter->netdev); if (!bi->page) { - adapter->alloc_rx_page_failed++; + rx_ring->rx_stats.alloc_rx_page_failed++; goto no_buffers; } } @@ -1063,7 +1063,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, DMA_FROM_DEVICE); if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - adapter->alloc_rx_page_failed++; + rx_ring->rx_stats.alloc_rx_page_failed++; bi->page_dma = 0; goto no_buffers; } @@ -1258,7 +1258,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (staterr & IXGBE_RXD_STAT_EOP) { if (skb->prev) skb = ixgbe_transform_rsc_queue(skb, - &(rx_ring->rsc_count)); + &(rx_ring->rx_stats.rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (IXGBE_RSC_CB(skb)->delay_unmap) { dma_unmap_single(rx_ring->dev, @@ -1269,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, IXGBE_RSC_CB(skb)->delay_unmap = false; } if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) - rx_ring->rsc_count += - skb_shinfo(skb)->nr_frags; + rx_ring->rx_stats.rsc_count += + skb_shinfo(skb)->nr_frags; else - rx_ring->rsc_count++; - rx_ring->rsc_flush++; + rx_ring->rx_stats.rsc_count++; + rx_ring->rx_stats.rsc_flush++; } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets++; @@ -1289,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, skb->next = next_buffer->skb; skb->next->prev = skb; } - rx_ring->non_eop_descs++; + rx_ring->rx_stats.non_eop_descs++; goto next_desc; } @@ -5406,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 total_mpc = 0; u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; - u64 non_eop_descs = 0, restart_queue = 0; - struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; + u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 bytes = 0, packets = 0; if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) @@ -5422,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) adapter->hw_rx_no_dma_resources += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); for (i = 0; i < adapter->num_rx_queues; i++) { - rsc_count += adapter->rx_ring[i]->rsc_count; - rsc_flush += adapter->rx_ring[i]->rsc_flush; + rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; + rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; } adapter->rsc_total_count = rsc_count; adapter->rsc_total_flush = rsc_flush; } + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + netdev->stats.rx_bytes = bytes; + netdev->stats.rx_packets = packets; + + bytes = 0; + packets = 0; /* gather some stats to the adapter struct that are per queue */ - for (i = 0; i < adapter->num_tx_queues; i++) - restart_queue += adapter->tx_ring[i]->restart_queue; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + bytes += tx_ring->stats.bytes; + packets += tx_ring->stats.packets; + } adapter->restart_queue = restart_queue; - - for (i = 0; i < adapter->num_rx_queues; i++) - non_eop_descs += adapter->rx_ring[i]->non_eop_descs; - adapter->non_eop_descs = non_eop_descs; + adapter->tx_busy = tx_busy; + netdev->stats.tx_bytes = bytes; + netdev->stats.tx_packets = packets; hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); for (i = 0; i < 8; i++) { @@ -6223,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(netdev, tx_ring->queue_index); - ++tx_ring->restart_queue; + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -6339,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { - adapter->tx_busy++; + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } -- cgit v1.2.3-70-g09d2 From fc77dc3cc15144bbaf18203e9ef7a3e1beedfc3f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:51 -0800 Subject: ixgbe: add a netdev pointer to the ring structure This change places a netdev pointer directly into the ring structure. This way we can avoid having to determine which netdev we are supposed to be using and can just access the one on the ring directly. As a result of this change further collapse of the code is possible by dropping the adapter from ixgbe_alloc_rx_buffers, and the netdev pointer from ixgbe_xmit_frame_ring_adv and ixgbe_maybe_stop_tx. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 6 ++--- drivers/net/ixgbe/ixgbe_ethtool.c | 11 ++++---- drivers/net/ixgbe/ixgbe_main.c | 55 +++++++++++++++++++++------------------ 3 files changed, 36 insertions(+), 36 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 3c63ee6be2e..dc4b97e5777 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -162,6 +162,7 @@ struct ixgbe_rx_queue_stats { struct ixgbe_ring { void *desc; /* descriptor ring memory */ struct device *dev; /* device for DMA mapping */ + struct net_device *netdev; /* netdev ring belongs to */ union { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; @@ -477,14 +478,11 @@ extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, - struct net_device *, struct ixgbe_adapter *, struct ixgbe_ring *); extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, struct ixgbe_tx_buffer *); -extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - u16 cleaned_count); +extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index cc7804962b2..c19594a4e8f 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1473,6 +1473,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) tx_ring->count = IXGBE_DEFAULT_TXD; tx_ring->queue_index = 0; tx_ring->dev = &adapter->pdev->dev; + tx_ring->netdev = adapter->netdev; tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; tx_ring->numa_node = adapter->node; @@ -1492,6 +1493,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) rx_ring->count = IXGBE_DEFAULT_RXD; rx_ring->queue_index = 0; rx_ring->dev = &adapter->pdev->dev; + rx_ring->netdev = adapter->netdev; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; rx_ring->numa_node = adapter->node; @@ -1595,8 +1597,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb, return 13; } -static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, +static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, struct ixgbe_ring *tx_ring, unsigned int size) { @@ -1646,7 +1647,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, } /* re-map buffers to ring, store next to clean values */ - ixgbe_alloc_rx_buffers(adapter, rx_ring, count); + ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; tx_ring->next_to_clean = tx_ntc; @@ -1690,7 +1691,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) for (i = 0; i < 64; i++) { skb_get(skb); tx_ret_val = ixgbe_xmit_frame_ring(skb, - adapter->netdev, adapter, tx_ring); if (tx_ret_val == NETDEV_TX_OK) @@ -1705,8 +1705,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) /* allow 200 milliseconds for packets to go from Tx to Rx */ msleep(200); - good_cnt = ixgbe_clean_test_rings(adapter, rx_ring, - tx_ring, size); + good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size); if (good_cnt != 64) { ret_val = 13; break; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index a47e0909816..29523cec270 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -733,7 +733,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring) { struct ixgbe_adapter *adapter = q_vector->adapter; - struct net_device *netdev = adapter->netdev; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbe_tx_buffer *tx_buffer_info; unsigned int i, eop, count = 0; @@ -774,15 +773,15 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(count && netif_carrier_ok(netdev) && + if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && !test_bit(__IXGBE_DOWN, &adapter->state)) { - netif_wake_subqueue(netdev, tx_ring->queue_index); + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; } } @@ -1004,24 +1003,27 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) /** * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split - * @adapter: address of board private structure + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace **/ -void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring, - u16 cleaned_count) +void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *bi; struct sk_buff *skb; u16 i = rx_ring->next_to_use; + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev) + return; + while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); bi = &rx_ring->rx_buffer_info[i]; skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(adapter->netdev, + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_buf_len); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; @@ -1046,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (!bi->page) { - bi->page = netdev_alloc_page(adapter->netdev); + bi->page = netdev_alloc_page(rx_ring->netdev); if (!bi->page) { rx_ring->rx_stats.alloc_rx_page_failed++; goto no_buffers; @@ -1304,7 +1306,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, total_rx_bytes += skb->len; total_rx_packets++; - skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); #ifdef IXGBE_FCOE /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { @@ -1320,7 +1322,7 @@ next_desc: /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { - ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } @@ -1335,14 +1337,14 @@ next_desc: cleaned_count = IXGBE_DESC_UNUSED(rx_ring); if (cleaned_count) - ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); #ifdef IXGBE_FCOE /* include DDPed FCoE data */ if (ddp_bytes > 0) { unsigned int mss; - mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - + mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - sizeof(struct fc_frame_header) - sizeof(struct fcoe_crc_eof); if (mss > 512) @@ -2810,7 +2812,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); ixgbe_rx_desc_queue_enable(adapter, ring); - ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); + ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring)); } static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) @@ -4455,6 +4457,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) ring->count = adapter->tx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; ring->numa_node = adapter->node; adapter->tx_ring[i] = ring; @@ -4481,6 +4484,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) ring->count = rx_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; ring->numa_node = adapter->node; adapter->rx_ring[i] = ring; @@ -6229,10 +6233,9 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); } -static int __ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) { - netif_stop_subqueue(netdev, tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ @@ -6244,17 +6247,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(netdev, tx_ring->queue_index); + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; return 0; } -static int ixgbe_maybe_stop_tx(struct net_device *netdev, - struct ixgbe_ring *tx_ring, int size) +static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; - return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); + return __ixgbe_maybe_stop_tx(tx_ring, size); } static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) @@ -6299,10 +6301,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) return skb_tx_hash(dev, skb); } -netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, +netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) { + struct net_device *netdev = tx_ring->netdev; struct netdev_queue *txq; unsigned int first; unsigned int tx_flags = 0; @@ -6360,7 +6363,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { + if (ixgbe_maybe_stop_tx(tx_ring, count)) { tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; } @@ -6412,7 +6415,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev txq->tx_bytes += skb->len; txq->tx_packets++; ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len); - ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); } else { dev_kfree_skb_any(skb); @@ -6429,7 +6432,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd struct ixgbe_ring *tx_ring; tx_ring = adapter->tx_ring[skb->queue_mapping]; - return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); } /** -- cgit v1.2.3-70-g09d2 From 5f5ae6fc86083526088e2c2ca4454e0f44f1e0cb Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:52 -0800 Subject: ixgbe: move ixgbe_clear_interrupt_scheme to before pci_save_state The main reason for this change is to keep the suspend/resume logic matched up. The clear_interrupt_scheme function will disable MSI-X which will effect the PCIe configuration space. Therefore we will want to do it before we save state to avoid having the interrupt state restored by pci_restore_state, and then trying to re-enable MSI/MSI-X interrupts via ixgbe_setup_interrupt_scheme. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 29523cec270..cbb3570f920 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5327,6 +5327,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_free_all_rx_resources(adapter); } + ixgbe_clear_interrupt_scheme(adapter); + #ifdef CONFIG_PM retval = pci_save_state(pdev); if (retval) @@ -5360,8 +5362,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) *enable_wake = !!wufc; - ixgbe_clear_interrupt_scheme(adapter); - ixgbe_release_hw_control(adapter); pci_disable_device(pdev); -- cgit v1.2.3-70-g09d2 From 01fa7d905fe9a5b045615fbde19e6c0f78063206 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:53 -0800 Subject: ixgbe: remove residual code left over from earlier combining of TXDCTL Missed some code that was left floating around in the DCB configuration for the TXDCTL register. As a result the register was being messed with in two different spots when we only needed to do the change once. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index cbb3570f920..dd73ebc545d 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3328,8 +3328,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - u32 txdctl; - int i, j; if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { if (hw->mac.type == ixgbe_mac_82598EB) @@ -3350,20 +3348,13 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, DCB_RX_CONFIG); - /* reconfigure the hardware */ - ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); - - for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i]->reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - /* PThresh workaround for Tx hang with DFP enabled. */ - txdctl |= 32; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); - } /* Enable VLAN tag insert/strip */ adapter->netdev->features |= NETIF_F_HW_VLAN_RX; hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); + + /* reconfigure the hardware */ + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); } #endif -- cgit v1.2.3-70-g09d2 From c60fbb00f0400792adf873dbacd431885653b77d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:54 -0800 Subject: ixgbe: move adapter into pci_dev driver data instead of netdev This change moves an adapter pointer into the private portion of the pci_dev instead of a pointer to the netdev. The reason for this change is because in most cases we just want the adapter anyway. In addition as we start moving toward multiple netdevs per port we may want to move the adapter pointer out of the netdevs entirely. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 30 ++++++++++++++---------------- drivers/net/ixgbe/ixgbe_sriov.c | 3 +-- 2 files changed, 15 insertions(+), 18 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index dd73ebc545d..75e25bc91a9 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -887,8 +887,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) static int __ixgbe_notify_dca(struct device *dev, void *data) { - struct net_device *netdev = dev_get_drvdata(dev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = dev_get_drvdata(dev); unsigned long event = *(unsigned long *)data; switch (event) { @@ -5255,8 +5254,8 @@ static int ixgbe_close(struct net_device *netdev) #ifdef CONFIG_PM static int ixgbe_resume(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; u32 err; pci_set_power_state(pdev, PCI_D0); @@ -5287,7 +5286,7 @@ static int ixgbe_resume(struct pci_dev *pdev) IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); if (netif_running(netdev)) { - err = ixgbe_open(adapter->netdev); + err = ixgbe_open(netdev); if (err) return err; } @@ -5300,8 +5299,8 @@ static int ixgbe_resume(struct pci_dev *pdev) static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; u32 ctrl, fctrl; u32 wufc = adapter->wol; @@ -6762,8 +6761,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, SET_NETDEV_DEV(netdev, &pdev->dev); - pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); + pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; adapter->pdev = pdev; @@ -7086,8 +7085,8 @@ err_dma: **/ static void __devexit ixgbe_remove(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; set_bit(__IXGBE_DOWN, &adapter->state); /* clear the module not found bit to make sure the worker won't @@ -7157,8 +7156,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); @@ -7181,8 +7180,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, */ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); pci_ers_result_t result; int err; @@ -7220,8 +7218,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) */ static void ixgbe_io_resume(struct pci_dev *pdev) { - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; if (netif_running(netdev)) { if (ixgbe_up(adapter)) { diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 93f40bcf683..6e3e94b5a5f 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { unsigned char vf_mac_addr[6]; - struct net_device *netdev = pci_get_drvdata(pdev); - struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); bool enable = ((event_mask & 0x10000000U) != 0); -- cgit v1.2.3-70-g09d2 From 33cf09c9586a0dce472ecd2aac13e8140c9ed1a1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:55 -0800 Subject: ixgbe: move CPU variable from ring into q_vector, add ring->q_vector This is the start of work to sort out what belongs in the rings and what belongs in the q_vector. Items like the CPU variable for make much more sense in the q_vector since the CPU is a per-interrupt thing rather than a per ring thing. I also added a back-pointer from the ring to the q_vector. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 9 +-- drivers/net/ixgbe/ixgbe_main.c | 174 +++++++++++++++++++++++++---------------- 2 files changed, 111 insertions(+), 72 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index dc4b97e5777..e87b0ffd583 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -183,11 +183,6 @@ struct ixgbe_ring { unsigned int total_bytes; unsigned int total_packets; -#ifdef CONFIG_IXGBE_DCA - /* cpu for tx queue */ - int cpu; -#endif - u16 work_limit; /* max work per interrupt */ u16 reg_idx; /* holds the special value that gets * the hardware register offset @@ -206,6 +201,7 @@ struct ixgbe_ring { unsigned int size; /* length in bytes */ dma_addr_t dma; /* phys. address of descriptor ring */ struct rcu_head rcu; + struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */ } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -251,6 +247,9 @@ struct ixgbe_q_vector { unsigned int v_idx; /* index of q_vector within array, also used for * finding the bit in EICR and friends that * represents the vector for this ring */ +#ifdef CONFIG_IXGBE_DCA + int cpu; /* CPU for DCA */ +#endif struct napi_struct napi; DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 75e25bc91a9..dc78736d305 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -810,63 +810,98 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, #ifdef CONFIG_IXGBE_DCA static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *rx_ring) + struct ixgbe_ring *rx_ring, + int cpu) { + struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl; - int cpu = get_cpu(); - int q = rx_ring->reg_idx; + u8 reg_idx = rx_ring->reg_idx; - if (rx_ring->cpu != cpu) { - rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; - rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; - rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); - } - rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; - rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; - rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); - rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | - IXGBE_DCA_RXCTRL_DESC_HSRO_EN); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); - rx_ring->cpu = cpu; + rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx)); + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; + rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + break; + case ixgbe_mac_82599EB: + rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; + rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); + break; + default: + break; } - put_cpu(); + rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; + rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); + rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); } static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring, + int cpu) { + struct ixgbe_hw *hw = &adapter->hw; u32 txctrl; + u8 reg_idx = tx_ring->reg_idx; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; + txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); + break; + case ixgbe_mac_82599EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; + txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << + IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); + txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl); + break; + default: + break; + } +} + +static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) +{ + struct ixgbe_adapter *adapter = q_vector->adapter; int cpu = get_cpu(); - int q = tx_ring->reg_idx; - struct ixgbe_hw *hw = &adapter->hw; + long r_idx; + int i; - if (tx_ring->cpu != cpu) { - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; - txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); - txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; - txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << - IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); - txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); - } - tx_ring->cpu = cpu; + if (q_vector->cpu == cpu) + goto out_no_update; + + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + for (i = 0; i < q_vector->txr_count; i++) { + ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, + r_idx + 1); } + + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + for (i = 0; i < q_vector->rxr_count; i++) { + ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu); + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, + r_idx + 1); + } + + q_vector->cpu = cpu; +out_no_update: put_cpu(); } static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { + int num_q_vectors; int i; if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) @@ -875,13 +910,14 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) /* always use CB2 mode, difference is masked in the CB driver */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); - for (i = 0; i < adapter->num_tx_queues; i++) { - adapter->tx_ring[i]->cpu = -1; - ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); - } - for (i = 0; i < adapter->num_rx_queues; i++) { - adapter->rx_ring[i]->cpu = -1; - ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + else + num_q_vectors = 1; + + for (i = 0; i < num_q_vectors; i++) { + adapter->q_vector[i]->cpu = -1; + ixgbe_update_dca(adapter->q_vector[i]); } } @@ -890,6 +926,9 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) struct ixgbe_adapter *adapter = dev_get_drvdata(dev); unsigned long event = *(unsigned long *)data; + if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) + return 0; + switch (event) { case DCA_PROVIDER_ADD: /* if we're already enabled, don't do it again */ @@ -1827,8 +1866,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) int r_idx; int i; +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { + for (i = 0; i < q_vector->rxr_count; i++) { rx_ring = adapter->rx_ring[r_idx]; rx_ring->total_bytes = 0; rx_ring->total_packets = 0; @@ -1839,7 +1883,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) if (!q_vector->rxr_count) return IRQ_HANDLED; - /* disable interrupts on this vector only */ /* EIAM disabled interrupts (on this vector) for us */ napi_schedule(&q_vector->napi); @@ -1898,13 +1941,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) int work_done = 0; long r_idx; - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = adapter->rx_ring[r_idx]; #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_rx_dca(adapter, rx_ring); + ixgbe_update_dca(q_vector); #endif + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); + rx_ring = adapter->rx_ring[r_idx]; + ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); /* If all Rx work done, exit the polling mode */ @@ -1938,13 +1982,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) long r_idx; bool tx_clean_complete = true; +#ifdef CONFIG_IXGBE_DCA + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +#endif + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { ring = adapter->tx_ring[r_idx]; -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_tx_dca(adapter, ring); -#endif tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); @@ -1957,10 +2002,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { ring = adapter->rx_ring[r_idx]; -#ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_rx_dca(adapter, ring); -#endif ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx + 1); @@ -1999,13 +2040,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget) int work_done = 0; long r_idx; - r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); - tx_ring = adapter->tx_ring[r_idx]; #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) - ixgbe_update_tx_dca(adapter, tx_ring); + ixgbe_update_dca(q_vector); #endif + r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); + tx_ring = adapter->tx_ring[r_idx]; + if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) work_done = budget; @@ -3880,10 +3922,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget) int tx_clean_complete, work_done = 0; #ifdef CONFIG_IXGBE_DCA - if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { - ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); - ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]); - } + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); #endif tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); -- cgit v1.2.3-70-g09d2 From 7d637bcc8f461f19e1d018078792ec0cd9b07b1d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:56 -0800 Subject: ixgbe: add a state flags to ring This change adds a set of state flags to the rings that allow them to independently function allowing for features like RSC, packet split, and TX hang detection to be done per ring instead of for the entire device. This is accomplished by re-purposing the flow director reinit_state member and making it a global state instead since a long for a single bit flag is a bit wasteful. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 44 ++++++++++++++++++++------- drivers/net/ixgbe/ixgbe_main.c | 67 ++++++++++++++++++++++++------------------ 2 files changed, 72 insertions(+), 39 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index e87b0ffd583..160ce923454 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -159,6 +159,31 @@ struct ixgbe_rx_queue_stats { u64 alloc_rx_buff_failed; }; +enum ixbge_ring_state_t { + __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_DETECT_HANG, + __IXGBE_RX_PS_ENABLED, + __IXGBE_RX_RSC_ENABLED, +}; + +#define ring_is_ps_enabled(ring) \ + test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define set_ring_ps_enabled(ring) \ + set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define clear_ring_ps_enabled(ring) \ + clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) \ + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) +#define ring_is_rsc_enabled(ring) \ + test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define set_ring_rsc_enabled(ring) \ + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) +#define clear_ring_rsc_enabled(ring) \ + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) struct ixgbe_ring { void *desc; /* descriptor ring memory */ struct device *dev; /* device for DMA mapping */ @@ -167,6 +192,7 @@ struct ixgbe_ring { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; + unsigned long state; u8 atr_sample_rate; u8 atr_count; u16 count; /* amount of descriptors */ @@ -175,28 +201,25 @@ struct ixgbe_ring { u16 next_to_clean; u8 queue_index; /* needed for multiqueue queue management */ + u8 reg_idx; /* holds the special value that gets + * the hardware register offset + * associated with this ring, which is + * different for DCB and RSS modes + */ + + u16 work_limit; /* max work per interrupt */ -#define IXGBE_RING_RX_PS_ENABLED (u8)(1) - u8 flags; /* per ring feature flags */ u8 __iomem *tail; unsigned int total_bytes; unsigned int total_packets; - u16 work_limit; /* max work per interrupt */ - u16 reg_idx; /* holds the special value that gets - * the hardware register offset - * associated with this ring, which is - * different for DCB and RSS modes - */ - struct ixgbe_queue_stats stats; struct u64_stats_sync syncp; union { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; - unsigned long reinit_state; int numa_node; unsigned int size; /* length in bytes */ dma_addr_t dma; /* phys. address of descriptor ring */ @@ -441,7 +464,6 @@ enum ixbge_state_t { __IXGBE_TESTING, __IXGBE_RESETTING, __IXGBE_DOWN, - __IXGBE_FDIR_INIT_DONE, __IXGBE_SFP_MODULE_NOT_FOUND }; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index dc78736d305..b798501500e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -687,7 +687,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of eop */ - adapter->detect_tx_hung = false; + clear_check_for_tx_hang(tx_ring); if (tx_ring->tx_buffer_info[eop].time_stamp && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && ixgbe_tx_xon_state(adapter, tx_ring)) { @@ -786,13 +786,12 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } } - if (adapter->detect_tx_hung) { - if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { - /* schedule immediate reset if we believe we hung */ - e_info(probe, "tx hang %d detected, resetting " - "adapter\n", adapter->tx_timeout_count + 1); - ixgbe_tx_timeout(adapter->netdev); - } + if (check_for_tx_hang(tx_ring) && + ixgbe_check_tx_hang(adapter, tx_ring, i)) { + /* schedule immediate reset if we believe we hung */ + e_info(probe, "tx hang %d detected, resetting " + "adapter\n", adapter->tx_timeout_count + 1); + ixgbe_tx_timeout(adapter->netdev); } /* re-arm the interrupt */ @@ -1084,7 +1083,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) } } - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring_is_ps_enabled(rx_ring)) { if (!bi->page) { bi->page = netdev_alloc_page(rx_ring->netdev); if (!bi->page) { @@ -1214,7 +1213,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring_is_ps_enabled(rx_ring)) { hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> IXGBE_RXDADV_HDRBUFLEN_SHIFT; @@ -1284,7 +1283,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(next_rxd); cleaned_count++; - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) + if (ring_is_rsc_enabled(rx_ring)) rsc_count = ixgbe_get_rsc_count(rx_desc); if (rsc_count) { @@ -1299,7 +1298,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (skb->prev) skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rx_stats.rsc_count)); - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + if (ring_is_rsc_enabled(rx_ring)) { if (IXGBE_RSC_CB(skb)->delay_unmap) { dma_unmap_single(rx_ring->dev, IXGBE_RSC_CB(skb)->dma, @@ -1308,7 +1307,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, IXGBE_RSC_CB(skb)->dma = 0; IXGBE_RSC_CB(skb)->delay_unmap = false; } - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) + if (ring_is_ps_enabled(rx_ring)) rx_ring->rx_stats.rsc_count += skb_shinfo(skb)->nr_frags; else @@ -1320,7 +1319,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_ring->stats.bytes += skb->len; u64_stats_update_end(&rx_ring->syncp); } else { - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring_is_ps_enabled(rx_ring)) { rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->dma = next_buffer->dma; next_buffer->skb = skb; @@ -1782,8 +1781,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, - &tx_ring->reinit_state)) + if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, + &tx_ring->state)) schedule_work(&adapter->fdir_reinit_task); } } @@ -2522,7 +2521,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } /* reinitialize flowdirector state */ - set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); /* enable queue */ txdctl |= IXGBE_TXDCTL_ENABLE; @@ -2632,7 +2631,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK; - if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring_is_ps_enabled(rx_ring)) { #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #else @@ -2727,7 +2726,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int rx_buf_len; u16 reg_idx = ring->reg_idx; - if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + if (!ring_is_rsc_enabled(ring)) return; rx_buf_len = ring->rx_buf_len; @@ -2738,7 +2737,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65535 */ - if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { + if (ring_is_ps_enabled(ring)) { #if (MAX_SKB_FRAGS > 16) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; #elif (MAX_SKB_FRAGS > 8) @@ -2976,19 +2975,28 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) rx_ring->rx_buf_len = rx_buf_len; if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) - rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; + set_ring_ps_enabled(rx_ring); + else + clear_ring_ps_enabled(rx_ring); + + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); else - rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; + clear_ring_rsc_enabled(rx_ring); #ifdef IXGBE_FCOE if (netdev->features & NETIF_F_FCOE_MTU) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; if ((i >= f->mask) && (i < f->mask + f->indices)) { - rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; + clear_ring_ps_enabled(rx_ring); if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; + } else if (!ring_is_rsc_enabled(rx_ring) && + !ring_is_ps_enabled(rx_ring)) { + rx_ring->rx_buf_len = + IXGBE_FCOE_JUMBO_FRAME_SIZE; } } #endif /* IXGBE_FCOE */ @@ -5729,8 +5737,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { for (i = 0; i < adapter->num_tx_queues; i++) - set_bit(__IXGBE_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->reinit_state)); + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); } else { e_err(probe, "failed to finish FDIR re-initialization, " "ignored adding FDIR ATR filters\n"); @@ -5816,7 +5824,10 @@ static void ixgbe_watchdog_task(struct work_struct *work) netif_carrier_on(netdev); } else { /* Force detection of hung controller */ - adapter->detect_tx_hung = true; + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + set_check_for_tx_hang(tx_ring); + } } } else { adapter->link_up = false; @@ -6434,8 +6445,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (tx_ring->atr_sample_rate) { ++tx_ring->atr_count; if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && - test_bit(__IXGBE_FDIR_INIT_DONE, - &tx_ring->reinit_state)) { + test_bit(__IXGBE_TX_FDIR_INIT_DONE, + &tx_ring->state)) { ixgbe_atr(adapter, skb, tx_ring->queue_index, tx_flags, protocol); tx_ring->atr_count = 0; -- cgit v1.2.3-70-g09d2 From 73c4b7cdd25a8a769baf6dae5bc498400a9ddd93 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:57 -0800 Subject: ixgbe: cleanup race conditions in link setup This change makes it so that we perform link setup with interrupts disabled. If the SFP has not been detected previously we will schedule the SFP detection task to run in order to detect link. By doing this we avoid the possibility of interrupts firing in the middle of our link setup during ixgbe_up_complete. In addition this change makes it so that the multi-speed fiber setup and SFP setup are not mutually exclusive. The addresses issues seen in which a link would only come up at 1G on some multi-speed fiber modules. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 47 +++++++++++++++++------------------------- 1 file changed, 19 insertions(+), 28 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b798501500e..0128fe666f0 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1712,17 +1712,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; + if (eicr & IXGBE_EICR_GPI_SDP2) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + schedule_work(&adapter->sfp_config_module_task); + } + if (eicr & IXGBE_EICR_GPI_SDP1) { /* Clear the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); - schedule_work(&adapter->multispeed_fiber_task); - } else if (eicr & IXGBE_EICR_GPI_SDP2) { - /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); - schedule_work(&adapter->sfp_config_module_task); - } else { - /* Interrupt isn't for us... */ - return; + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + schedule_work(&adapter->multispeed_fiber_task); } } @@ -3587,6 +3588,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); + } else { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); + } + /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_EICR); ixgbe_irq_enable(adapter, true, true); @@ -3609,26 +3618,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) * If we're not hot-pluggable SFP+, we just need to configure link * and bring it up. */ - if (hw->phy.type == ixgbe_phy_unknown) { - err = hw->phy.ops.identify(hw); - if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { - /* - * Take the device down and schedule the sfp tasklet - * which will unregister_netdev and log it. - */ - ixgbe_down(adapter); - schedule_work(&adapter->sfp_config_module_task); - return err; - } - } - - if (ixgbe_is_sfp(hw)) { - ixgbe_sfp_link_config(adapter); - } else { - err = ixgbe_non_sfp_link_config(hw); - if (err) - e_err(probe, "link_config FAILED %d\n", err); - } + if (hw->phy.type == ixgbe_phy_unknown) + schedule_work(&adapter->sfp_config_module_task); /* enable transmits */ netif_tx_start_all_queues(adapter->netdev); -- cgit v1.2.3-70-g09d2 From 80fba3f4341b1c98430bee620b507d3f5b7086cd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:57 -0800 Subject: ixgbe: Disable RSC when ITR setting is too high to allow RSC RSC will flush its descriptors every time the interrupt throttle timer expires. In addition there are known issues with RSC when the rx-usecs value is set too low. As such we are forced to clear the RSC_ENABLED bit and reset the adapter when the rx-usecs value is set too low. However we do not need to clear the NETIF_F_LRO flag because it is used to indicate that the user wants to leave the LRO feature enabled, and in fact with this change we will now re-enable RSC as soon as the rx-usecs value is increased and the flag is still set. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 90 +++++++++++++++++++++++---------------- 1 file changed, 53 insertions(+), 37 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index c19594a4e8f..561d47895d8 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1975,6 +1975,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev, return 0; } +/* + * this function must be called before setting the new value of + * rx_itr_setting + */ +static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, + struct ethtool_coalesce *ec) +{ + struct net_device *netdev = adapter->netdev; + + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) + return false; + + /* if interrupt rate is too high then disable RSC */ + if (ec->rx_coalesce_usecs != 1 && + ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) { + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + return true; + } + } else { + /* check the feature flag value and enable RSC if necessary */ + if ((netdev->features & NETIF_F_LRO) && + !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + e_info(probe, "rx-usecs set to %d, " + "re-enabling RSC\n", + ec->rx_coalesce_usecs); + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + return true; + } + } + return false; +} + static int ixgbe_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { @@ -1992,17 +2027,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev, adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; if (ec->rx_coalesce_usecs > 1) { - u32 max_int; - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) - max_int = IXGBE_MAX_RSC_INT_RATE; - else - max_int = IXGBE_MAX_INT_RATE; - /* check the limits */ - if ((1000000/ec->rx_coalesce_usecs > max_int) || + if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) return -EINVAL; + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); + /* store the value in ints/second */ adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; @@ -2011,32 +2043,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev, /* clear the lower bit as its used for dynamic state */ adapter->rx_itr_setting &= ~1; } else if (ec->rx_coalesce_usecs == 1) { + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); + /* 1 means dynamic mode */ adapter->rx_eitr_param = 20000; adapter->rx_itr_setting = 1; } else { + /* check the old value and enable RSC if necessary */ + need_reset = ixgbe_update_rsc(adapter, ec); /* * any other value means disable eitr, which is best * served by setting the interrupt rate very high */ adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; adapter->rx_itr_setting = 0; - - /* - * if hardware RSC is enabled, disable it when - * setting low latency mode, to avoid errata, assuming - * that when the user set low latency mode they want - * it at the cost of anything else - */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; - if (netdev->features & NETIF_F_LRO) { - netdev->features &= ~NETIF_F_LRO; - e_info(probe, "rx-usecs set to 0, " - "disabling RSC\n"); - } - need_reset = true; - } } if (ec->tx_coalesce_usecs > 1) { @@ -2123,15 +2144,15 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) return rc; /* if state changes we need to update adapter->flags and reset */ - if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { - /* - * cast both to bool and verify if they are set the same - * but only enable RSC if itr is non-zero, as - * itr=0 and RSC are mutually exclusive - */ - if (((!!(data & ETH_FLAG_LRO)) != - (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && - adapter->rx_itr_setting) { + if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && + (!!(data & ETH_FLAG_LRO) != + !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { + if ((data & ETH_FLAG_LRO) && + (!adapter->rx_itr_setting || + (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) { + e_info(probe, "rx-usecs set too low, " + "not enabling RSC.\n"); + } else { adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: @@ -2140,11 +2161,6 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) default: break; } - } else if (!adapter->rx_itr_setting) { - netdev->features &= ~NETIF_F_LRO; - if (data & ETH_FLAG_LRO) - e_info(probe, "rx-usecs set to 0, " - "LRO/RSC cannot be enabled.\n"); } } -- cgit v1.2.3-70-g09d2 From b953799ee29075afd30afe4c0fb65f278b088f69 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:58 -0800 Subject: ixgbe: reorder Tx cleanup so that if adapter will reset we don't rearm The code as it existed could re-arm the queues when it was requesting a HW reset due to a TX hang. Instead of doing that this change makes it so that we will just exit if the hardware is believed to be hung. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 0128fe666f0..1d78b554b0e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -735,8 +735,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_adapter *adapter = q_vector->adapter; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbe_tx_buffer *tx_buffer_info; - unsigned int i, eop, count = 0; unsigned int total_bytes = 0, total_packets = 0; + u16 i, eop, count = 0; i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; @@ -771,6 +771,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } tx_ring->next_to_clean = i; + tx_ring->total_bytes += total_bytes; + tx_ring->total_packets += total_packets; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.packets += total_packets; + tx_ring->stats.bytes += total_bytes; + u64_stats_update_end(&tx_ring->syncp); + + if (check_for_tx_hang(tx_ring) && + ixgbe_check_tx_hang(adapter, tx_ring, i)) { + /* schedule immediate reset if we believe we hung */ + e_info(probe, "tx hang %d detected, resetting " + "adapter\n", adapter->tx_timeout_count + 1); + ixgbe_tx_timeout(adapter->netdev); + + /* the adapter is about to reset, no point in enabling stuff */ + return true; + } #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && @@ -786,24 +803,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, } } - if (check_for_tx_hang(tx_ring) && - ixgbe_check_tx_hang(adapter, tx_ring, i)) { - /* schedule immediate reset if we believe we hung */ - e_info(probe, "tx hang %d detected, resetting " - "adapter\n", adapter->tx_timeout_count + 1); - ixgbe_tx_timeout(adapter->netdev); - } - - /* re-arm the interrupt */ - if (count >= tx_ring->work_limit) - ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx)); - - tx_ring->total_bytes += total_bytes; - tx_ring->total_packets += total_packets; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.packets += total_packets; - tx_ring->stats.bytes += total_bytes; - u64_stats_update_end(&tx_ring->syncp); return count < tx_ring->work_limit; } -- cgit v1.2.3-70-g09d2 From 32aa77a4fc06bd1116f83c25bf0389a3e9b80533 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:26:59 -0800 Subject: ixgbe: change vector numbering so that queues end up on correct CPUs This changes the numbering scheme slightly. Previously the ordering was coming out like this: Rx-2 Rx-1 Rx-0 TxRx-0 Which would drop two queues on CPU 0. This change makes it so that the ordering is like this: Rx-3 Rx-2 Rx-1 TxRx-0 This means that each CPU will have it's own Rx queue, and only CPU 0 will have the Tx queue. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 1d78b554b0e..5dde7d63c3a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2182,9 +2182,11 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) } else if (handler == &ixgbe_msix_clean_tx) { sprintf(adapter->name[vector], "%s-%s-%d", netdev->name, "tx", ti++); - } else + } else { sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "TxRx", vector); + netdev->name, "TxRx", ri++); + ti++; + } err = request_irq(adapter->msix_entries[vector].vector, handler, 0, adapter->name[vector], -- cgit v1.2.3-70-g09d2 From c267fc166a3308c45c7f0ad2ddd6fc696caaeb80 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:00 -0800 Subject: ixgbe: cleanup ixgbe_clean_rx_irq The code for ixgbe_clean_rx_irq was much more tangled up than it needed to be in terms of logic statements and unused variables. This change untangles much of that and drops several unused variables such as cleaned which was being returned but never checked. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 149 +++++++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 71 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5dde7d63c3a..584608d267b 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1128,14 +1128,18 @@ no_buffers: } } -static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) +static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) { - return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; -} - -static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) -{ - return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + /* HW will not DMA in data larger than the given buffer, even if it + * parses the (NFS, of course) header to be larger. In that case, it + * fills the header buffer and spills the rest into the page. + */ + u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); + u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IXGBE_RX_HDR_SIZE) + hlen = IXGBE_RX_HDR_SIZE; + return hlen; } static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) @@ -1182,7 +1186,7 @@ struct ixgbe_rsc_cb { #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) -static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, +static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, int *work_done, int work_to_do) { @@ -1190,49 +1194,40 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, union ixgbe_adv_rx_desc *rx_desc, *next_rxd; struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; struct sk_buff *skb; - unsigned int i, rsc_count = 0; - u32 len, staterr; - u16 hdr_info; - bool cleaned = false; - int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; + const int current_node = numa_node_id(); + unsigned int rsc_count = 0; #ifdef IXGBE_FCOE int ddp_bytes = 0; #endif /* IXGBE_FCOE */ + u32 staterr; + u16 i; + u16 cleaned_count = 0; i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); - rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { u32 upper_len = 0; - if (*work_done >= work_to_do) - break; - (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ - if (ring_is_ps_enabled(rx_ring)) { - hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); - len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - if ((len > IXGBE_RX_HDR_SIZE) || - (upper_len && !(hdr_info & IXGBE_RXDADV_SPH))) - len = IXGBE_RX_HDR_SIZE; - } else { - len = le16_to_cpu(rx_desc->wb.upper.length); - } - cleaned = true; + rx_buffer_info = &rx_ring->rx_buffer_info[i]; + skb = rx_buffer_info->skb; - prefetch(skb->data); rx_buffer_info->skb = NULL; + prefetch(skb->data); + if (ring_is_rsc_enabled(rx_ring)) + rsc_count = ixgbe_get_rsc_count(rx_desc); + + /* if this is a skb from previous receive DMA will be 0 */ if (rx_buffer_info->dma) { - if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && - (!(staterr & IXGBE_RXD_STAT_EOP)) && - (!(skb->prev))) { + u16 hlen; + if (rsc_count && + !(staterr & IXGBE_RXD_STAT_EOP) && + !skb->prev) { /* * When HWRSC is enabled, delay unmapping * of the first packet. It carries the @@ -1249,7 +1244,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, DMA_FROM_DEVICE); } rx_buffer_info->dma = 0; - skb_put(skb, len); + + if (ring_is_ps_enabled(rx_ring)) { + hlen = ixgbe_get_hlen(rx_desc); + upper_len = le16_to_cpu(rx_desc->wb.upper.length); + } else { + hlen = le16_to_cpu(rx_desc->wb.upper.length); + } + + skb_put(skb, hlen); + } else { + /* assume packet split since header is unmapped */ + upper_len = le16_to_cpu(rx_desc->wb.upper.length); } if (upper_len) { @@ -1263,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_buffer_info->page_offset, upper_len); - if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || - (page_count(rx_buffer_info->page) != 1)) - rx_buffer_info->page = NULL; - else + if ((page_count(rx_buffer_info->page) == 1) && + (page_to_nid(rx_buffer_info->page) == current_node)) get_page(rx_buffer_info->page); + else + rx_buffer_info->page = NULL; skb->len += upper_len; skb->data_len += upper_len; @@ -1282,9 +1288,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(next_rxd); cleaned_count++; - if (ring_is_rsc_enabled(rx_ring)) - rsc_count = ixgbe_get_rsc_count(rx_desc); - if (rsc_count) { u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> IXGBE_RXDADV_NEXTP_SHIFT; @@ -1293,31 +1296,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, next_buffer = &rx_ring->rx_buffer_info[i]; } - if (staterr & IXGBE_RXD_STAT_EOP) { - if (skb->prev) - skb = ixgbe_transform_rsc_queue(skb, - &(rx_ring->rx_stats.rsc_count)); - if (ring_is_rsc_enabled(rx_ring)) { - if (IXGBE_RSC_CB(skb)->delay_unmap) { - dma_unmap_single(rx_ring->dev, - IXGBE_RSC_CB(skb)->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - IXGBE_RSC_CB(skb)->dma = 0; - IXGBE_RSC_CB(skb)->delay_unmap = false; - } - if (ring_is_ps_enabled(rx_ring)) - rx_ring->rx_stats.rsc_count += - skb_shinfo(skb)->nr_frags; - else - rx_ring->rx_stats.rsc_count++; - rx_ring->rx_stats.rsc_flush++; - } - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets++; - rx_ring->stats.bytes += skb->len; - u64_stats_update_end(&rx_ring->syncp); - } else { + if (!(staterr & IXGBE_RXD_STAT_EOP)) { if (ring_is_ps_enabled(rx_ring)) { rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->dma = next_buffer->dma; @@ -1331,8 +1310,32 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, goto next_desc; } + if (skb->prev) + skb = ixgbe_transform_rsc_queue(skb, + &(rx_ring->rx_stats.rsc_count)); + + if (ring_is_rsc_enabled(rx_ring)) { + if (IXGBE_RSC_CB(skb)->delay_unmap) { + dma_unmap_single(rx_ring->dev, + IXGBE_RSC_CB(skb)->dma, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + IXGBE_RSC_CB(skb)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; + } + if (ring_is_ps_enabled(rx_ring)) + rx_ring->rx_stats.rsc_count += + skb_shinfo(skb)->nr_frags; + else + rx_ring->rx_stats.rsc_count++; + rx_ring->rx_stats.rsc_flush++; + } + + /* ERR_MASK will only have valid bits if EOP set */ if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { - dev_kfree_skb_irq(skb); + /* trim packet back to size 0 and recycle it */ + __pskb_trim(skb, 0); + rx_buffer_info->skb = skb; goto next_desc; } @@ -1356,6 +1359,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, next_desc: rx_desc->wb.upper.status_error = 0; + (*work_done)++; + if (*work_done >= work_to_do) + break; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); @@ -1364,8 +1371,6 @@ next_desc: /* use prefetched values */ rx_desc = next_rxd; - rx_buffer_info = &rx_ring->rx_buffer_info[i]; - staterr = le32_to_cpu(rx_desc->wb.upper.status_error); } @@ -1392,8 +1397,10 @@ next_desc: rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; - - return cleaned; + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); } static int ixgbe_clean_rxonly(struct napi_struct *, int); -- cgit v1.2.3-70-g09d2 From ee9e0f0b40c4fb4ad71d677c094d518db42f7076 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:01 -0800 Subject: ixgbe: cleanup ATR filter setup function This change cleans up the ixgbe_atr filter setup function so that it uses fewer items from the stack. Since the code is only applicable to IPv4 w/ TCP it makes sense to just use the pointers based on the headers themselves instead of copying them to temp variables and then writing those to the filters. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 50 +++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 28 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 584608d267b..402ab7b2706 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2530,7 +2530,14 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, } /* reinitialize flowdirector state */ - set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + adapter->atr_sample_rate) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } /* enable queue */ txdctl |= IXGBE_TXDCTL_ENABLE; @@ -6227,47 +6234,34 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring, } static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, - int queue, u32 tx_flags, __be16 protocol) + u8 queue, u32 tx_flags, __be16 protocol) { struct ixgbe_atr_input atr_input; - struct tcphdr *th; struct iphdr *iph = ip_hdr(skb); struct ethhdr *eth = (struct ethhdr *)skb->data; - u16 vlan_id, src_port, dst_port, flex_bytes; - u32 src_ipv4_addr, dst_ipv4_addr; - u8 l4type = 0; + struct tcphdr *th; + u16 vlan_id; - /* Right now, we support IPv4 only */ - if (protocol != htons(ETH_P_IP)) + /* Right now, we support IPv4 w/ TCP only */ + if (protocol != htons(ETH_P_IP) || + iph->protocol != IPPROTO_TCP) return; - /* check if we're UDP or TCP */ - if (iph->protocol == IPPROTO_TCP) { - th = tcp_hdr(skb); - src_port = th->source; - dst_port = th->dest; - l4type |= IXGBE_ATR_L4TYPE_TCP; - /* l4type IPv4 type is 0, no need to assign */ - } else { - /* Unsupported L4 header, just bail here */ - return; - } memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> IXGBE_TX_FLAGS_VLAN_SHIFT; - src_ipv4_addr = iph->saddr; - dst_ipv4_addr = iph->daddr; - flex_bytes = eth->h_proto; + + th = tcp_hdr(skb); ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); - ixgbe_atr_set_src_port_82599(&atr_input, dst_port); - ixgbe_atr_set_dst_port_82599(&atr_input, src_port); - ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); - ixgbe_atr_set_l4type_82599(&atr_input, l4type); + ixgbe_atr_set_src_port_82599(&atr_input, th->dest); + ixgbe_atr_set_dst_port_82599(&atr_input, th->source); + ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); + ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); /* src and dst are inverted, think how the receiver sees them */ - ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); - ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); + ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); + ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); /* This assumes the Rx queue and Tx queue are bound to the same CPU */ ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); -- cgit v1.2.3-70-g09d2 From aa80175a539a47fd11e2fbf1696a29f7a2652930 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:02 -0800 Subject: ixgbe: cleanup use of ixgbe_rsc_count and RSC_CB This change cleans up the use of rsc_count and changes it to a boolean since the actual numerical value is used nowhere in the Rx cleanup path. I am also moving the skb count into the RSC_CB path since it is much easier to track it there than when it is passed as a parameter to various function calls. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 7 ++++++ drivers/net/ixgbe/ixgbe_main.c | 55 +++++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 27 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 160ce923454..6d9fcb4e085 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -467,6 +467,13 @@ enum ixbge_state_t { __IXGBE_SFP_MODULE_NOT_FOUND }; +struct ixgbe_rsc_cb { + dma_addr_t dma; + u16 skb_cnt; + bool delay_unmap; +}; +#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) + enum ixgbe_boards { board_82598, board_82599, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 402ab7b2706..9f5331bc598 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1142,33 +1142,25 @@ static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) return hlen; } -static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) -{ - return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & - IXGBE_RXDADV_RSCCNT_MASK) >> - IXGBE_RXDADV_RSCCNT_SHIFT; -} - /** * ixgbe_transform_rsc_queue - change rsc queue into a full packet * @skb: pointer to the last skb in the rsc queue - * @count: pointer to number of packets coalesced in this context * * This function changes a queue full of hw rsc buffers into a completed * packet. It uses the ->prev pointers to find the first packet and then * turns it into the frag list owner. **/ -static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, - u64 *count) +static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) { unsigned int frag_list_size = 0; + unsigned int skb_cnt = 1; while (skb->prev) { struct sk_buff *prev = skb->prev; frag_list_size += skb->len; skb->prev = NULL; skb = prev; - *count += 1; + skb_cnt++; } skb_shinfo(skb)->frag_list = skb->next; @@ -1176,15 +1168,16 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, skb->len += frag_list_size; skb->data_len += frag_list_size; skb->truesize += frag_list_size; + IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt; + return skb; } -struct ixgbe_rsc_cb { - dma_addr_t dma; - bool delay_unmap; -}; - -#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) +static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc) +{ + return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK); +} static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, @@ -1196,13 +1189,13 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct sk_buff *skb; unsigned int total_rx_bytes = 0, total_rx_packets = 0; const int current_node = numa_node_id(); - unsigned int rsc_count = 0; #ifdef IXGBE_FCOE int ddp_bytes = 0; #endif /* IXGBE_FCOE */ u32 staterr; u16 i; u16 cleaned_count = 0; + bool pkt_is_rsc = false; i = rx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); @@ -1220,12 +1213,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(skb->data); if (ring_is_rsc_enabled(rx_ring)) - rsc_count = ixgbe_get_rsc_count(rx_desc); + pkt_is_rsc = ixgbe_get_rsc_state(rx_desc); /* if this is a skb from previous receive DMA will be 0 */ if (rx_buffer_info->dma) { u16 hlen; - if (rsc_count && + if (pkt_is_rsc && !(staterr & IXGBE_RXD_STAT_EOP) && !skb->prev) { /* @@ -1288,7 +1281,7 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, prefetch(next_rxd); cleaned_count++; - if (rsc_count) { + if (pkt_is_rsc) { u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> IXGBE_RXDADV_NEXTP_SHIFT; next_buffer = &rx_ring->rx_buffer_info[nextp]; @@ -1310,9 +1303,15 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, goto next_desc; } - if (skb->prev) - skb = ixgbe_transform_rsc_queue(skb, - &(rx_ring->rx_stats.rsc_count)); + if (skb->prev) { + skb = ixgbe_transform_rsc_queue(skb); + /* if we got here without RSC the packet is invalid */ + if (!pkt_is_rsc) { + __pskb_trim(skb, 0); + rx_buffer_info->skb = skb; + goto next_desc; + } + } if (ring_is_rsc_enabled(rx_ring)) { if (IXGBE_RSC_CB(skb)->delay_unmap) { @@ -1323,11 +1322,14 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, IXGBE_RSC_CB(skb)->dma = 0; IXGBE_RSC_CB(skb)->delay_unmap = false; } + } + if (pkt_is_rsc) { if (ring_is_ps_enabled(rx_ring)) rx_ring->rx_stats.rsc_count += - skb_shinfo(skb)->nr_frags; + skb_shinfo(skb)->nr_frags; else - rx_ring->rx_stats.rsc_count++; + rx_ring->rx_stats.rsc_count += + IXGBE_RSC_CB(skb)->skb_cnt; rx_ring->rx_stats.rsc_flush++; } @@ -3017,7 +3019,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) } #endif /* IXGBE_FCOE */ } - } static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) -- cgit v1.2.3-70-g09d2 From bd50817859e7e82ba6e4adc75ebd8ac19459d8a4 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:03 -0800 Subject: ixgbe: change mac_type if statements to switch statements This change replaces a number of if/elseif/else statements with switch statements to support the addition of future devices to the ixgbe driver. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 45 +++-- drivers/net/ixgbe/ixgbe_main.c | 351 +++++++++++++++++++++++--------------- 2 files changed, 252 insertions(+), 144 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 561d47895d8..9483faf91ea 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -525,10 +525,20 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); - for (i = 0; i < 8; i++) - regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); - for (i = 0; i < 8; i++) - regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + for (i = 0; i < 8; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); + break; + case ixgbe_mac_82599EB: + regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); + regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); + break; + default: + break; + } + } regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); @@ -1226,12 +1236,19 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) u32 value, before, after; u32 i, toggle; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - toggle = 0x7FFFF30F; - test = reg_test_82599; - } else { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: toggle = 0x7FFFF3FF; test = reg_test_82598; + break; + case ixgbe_mac_82599EB: + toggle = 0x7FFFF30F; + test = reg_test_82599; + break; + default: + *data = 1; + return 1; + break; } /* @@ -1449,10 +1466,14 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) reg_ctl &= ~IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); - if (hw->mac.type == ixgbe_mac_82599EB) { + switch (hw->mac.type) { + case ixgbe_mac_82599EB: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); + break; + default: + break; } ixgbe_reset(adapter); @@ -1481,10 +1502,14 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) if (err) return 1; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); + break; + default: + break; } ixgbe_configure_tx_ring(adapter, tx_ring); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9f5331bc598..10fff68088e 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -589,14 +589,19 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, { u32 mask; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); - } else { + break; + case ixgbe_mac_82599EB: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + break; + default: + break; } } @@ -672,6 +677,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, break; default: tc = 0; + break; } txoff <<= tc; } @@ -1474,11 +1480,18 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) } } - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); - else if (adapter->hw.mac.type == ixgbe_mac_82599EB) + break; + case ixgbe_mac_82599EB: ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + + default: + break; + } IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); /* set up to autoclear timer, and the vectors */ @@ -1574,10 +1587,12 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: /* must write high and low 16 bits to reset counter */ itr_reg |= (itr_reg << 16); - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + break; + case ixgbe_mac_82599EB: /* * 82599 can support a value of zero, so allow it for * max interrupt rate, but there is an errata where it can @@ -1592,6 +1607,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) * immediate assertion of the interrupt */ itr_reg |= IXGBE_EITR_CNT_WDIS; + break; + default: + break; } IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); } @@ -1771,16 +1789,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) if (eicr & IXGBE_EICR_MAILBOX) ixgbe_msg_task(adapter); - if (hw->mac.type == ixgbe_mac_82598EB) - ixgbe_check_fan_failure(adapter, eicr); - - if (hw->mac.type == ixgbe_mac_82599EB) { - ixgbe_check_sfp_event(adapter, eicr); - adapter->interrupt_event = eicr; - if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) - schedule_work(&adapter->check_overtemp_task); - + switch (hw->mac.type) { + case ixgbe_mac_82599EB: /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int i; @@ -1795,7 +1805,19 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) schedule_work(&adapter->fdir_reinit_task); } } + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + adapter->interrupt_event = eicr; + schedule_work(&adapter->check_overtemp_task); + } + break; + default: + break; } + + ixgbe_check_fan_failure(adapter, eicr); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); @@ -1806,15 +1828,23 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; + struct ixgbe_hw *hw = &adapter->hw; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); - } else { + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + break; + case ixgbe_mac_82599EB: mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); mask = (qmask >> 32); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; } /* skip the flush */ } @@ -1823,15 +1853,23 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, u64 qmask) { u32 mask; + struct ixgbe_hw *hw = &adapter->hw; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: mask = (IXGBE_EIMS_RTX_QUEUE & qmask); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); - } else { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); mask = (qmask >> 32); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + break; + default: + break; } /* skip the flush */ } @@ -2288,12 +2326,16 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, mask |= IXGBE_EIMS_GPI_SDP0; if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) mask |= IXGBE_EIMS_GPI_SDP1; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; if (adapter->num_vfs) mask |= IXGBE_EIMS_MAILBOX; + break; + default: + break; } if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) @@ -2349,13 +2391,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data) if (eicr & IXGBE_EICR_LSC) ixgbe_check_lsc(adapter); - if (hw->mac.type == ixgbe_mac_82599EB) + switch (hw->mac.type) { + case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + adapter->interrupt_event = eicr; + schedule_work(&adapter->check_overtemp_task); + } + break; + default: + break; + } ixgbe_check_fan_failure(adapter, eicr); - if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) - schedule_work(&adapter->check_overtemp_task); if (napi_schedule_prep(&(q_vector->napi))) { adapter->tx_ring[0]->total_packets = 0; @@ -2448,14 +2497,19 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) **/ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) { - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); - } else { + break; + case ixgbe_mac_82599EB: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); if (adapter->num_vfs > 32) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); + break; + default: + break; } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { @@ -2630,15 +2684,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { u32 srrctl; - int index; - struct ixgbe_ring_feature *feature = adapter->ring_feature; + int index = rx_ring->reg_idx; - index = rx_ring->reg_idx; - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - unsigned long mask; - mask = (unsigned long) feature[RING_F_RSS].mask; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: { + struct ixgbe_ring_feature *feature = adapter->ring_feature; + const int mask = feature[RING_F_RSS].mask; index = index & mask; } + break; + case ixgbe_mac_82599EB: + default: + break; + } + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; @@ -3899,10 +3958,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter) (txdctl & ~IXGBE_TXDCTL_ENABLE)); } /* Disable the Tx DMA engine on 82599 */ - if (hw->mac.type == ixgbe_mac_82599EB) + switch (hw->mac.type) { + case ixgbe_mac_82599EB: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); + break; + default: + break; + } /* power down the optics */ if (hw->phy.multispeed_fiber) @@ -4260,71 +4324,66 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) bool ret = false; int dcb_i = adapter->ring_feature[RING_F_DCB].indices; - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - /* the number of queues is assumed to be symmetric */ - for (i = 0; i < dcb_i; i++) { - adapter->rx_ring[i]->reg_idx = i << 3; - adapter->tx_ring[i]->reg_idx = i << 2; - } - ret = true; - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { - if (dcb_i == 8) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 32 - * Tx TC2 starts at: descriptor queue 64 - * Tx TC3 starts at: descriptor queue 80 - * Tx TC4 starts at: descriptor queue 96 - * Tx TC5 starts at: descriptor queue 104 - * Tx TC6 starts at: descriptor queue 112 - * Tx TC7 starts at: descriptor queue 120 - * - * Rx TC0-TC7 are offset by 16 queues each - */ - for (i = 0; i < 3; i++) { - adapter->tx_ring[i]->reg_idx = i << 5; - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < 5; i++) { - adapter->tx_ring[i]->reg_idx = - ((i + 2) << 4); - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < dcb_i; i++) { - adapter->tx_ring[i]->reg_idx = - ((i + 8) << 3); - adapter->rx_ring[i]->reg_idx = i << 4; - } + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return false; - ret = true; - } else if (dcb_i == 4) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 64 - * Tx TC2 starts at: descriptor queue 96 - * Tx TC3 starts at: descriptor queue 112 - * - * Rx TC0-TC3 are offset by 32 queues each - */ - adapter->tx_ring[0]->reg_idx = 0; - adapter->tx_ring[1]->reg_idx = 64; - adapter->tx_ring[2]->reg_idx = 96; - adapter->tx_ring[3]->reg_idx = 112; - for (i = 0 ; i < dcb_i; i++) - adapter->rx_ring[i]->reg_idx = i << 5; - - ret = true; - } else { - ret = false; + /* the number of queues is assumed to be symmetric */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + for (i = 0; i < dcb_i; i++) { + adapter->rx_ring[i]->reg_idx = i << 3; + adapter->tx_ring[i]->reg_idx = i << 2; + } + ret = true; + break; + case ixgbe_mac_82599EB: + if (dcb_i == 8) { + /* + * Tx TC0 starts at: descriptor queue 0 + * Tx TC1 starts at: descriptor queue 32 + * Tx TC2 starts at: descriptor queue 64 + * Tx TC3 starts at: descriptor queue 80 + * Tx TC4 starts at: descriptor queue 96 + * Tx TC5 starts at: descriptor queue 104 + * Tx TC6 starts at: descriptor queue 112 + * Tx TC7 starts at: descriptor queue 120 + * + * Rx TC0-TC7 are offset by 16 queues each + */ + for (i = 0; i < 3; i++) { + adapter->tx_ring[i]->reg_idx = i << 5; + adapter->rx_ring[i]->reg_idx = i << 4; } - } else { - ret = false; + for ( ; i < 5; i++) { + adapter->tx_ring[i]->reg_idx = ((i + 2) << 4); + adapter->rx_ring[i]->reg_idx = i << 4; + } + for ( ; i < dcb_i; i++) { + adapter->tx_ring[i]->reg_idx = ((i + 8) << 3); + adapter->rx_ring[i]->reg_idx = i << 4; + } + ret = true; + } else if (dcb_i == 4) { + /* + * Tx TC0 starts at: descriptor queue 0 + * Tx TC1 starts at: descriptor queue 64 + * Tx TC2 starts at: descriptor queue 96 + * Tx TC3 starts at: descriptor queue 112 + * + * Rx TC0-TC3 are offset by 32 queues each + */ + adapter->tx_ring[0]->reg_idx = 0; + adapter->tx_ring[1]->reg_idx = 64; + adapter->tx_ring[2]->reg_idx = 96; + adapter->tx_ring[3]->reg_idx = 112; + for (i = 0 ; i < dcb_i; i++) + adapter->rx_ring[i]->reg_idx = i << 5; + ret = true; } - } else { - ret = false; + break; + default: + break; } - return ret; } #endif @@ -4885,11 +4944,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; - if (hw->mac.type == ixgbe_mac_82598EB) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; - } else if (hw->mac.type == ixgbe_mac_82599EB) { + break; + case ixgbe_mac_82599EB: adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; @@ -4918,6 +4979,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->fcoe.up = IXGBE_FCOE_DEFTC; #endif #endif /* IXGBE_FCOE */ + break; + default: + break; } #ifdef CONFIG_IXGBE_DCB @@ -5400,10 +5464,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); } - if (wufc && hw->mac.type == ixgbe_mac_82599EB) - pci_wake_from_d3(pdev, true); - else + switch (hw->mac.type) { + case ixgbe_mac_82598EB: pci_wake_from_d3(pdev, false); + break; + case ixgbe_mac_82599EB: + pci_wake_from_d3(pdev, !!wufc); + break; + default: + break; + } *enable_wake = !!wufc; @@ -5522,17 +5592,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); - if (hw->mac.type == ixgbe_mac_82599EB) { - hwstats->pxonrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - hwstats->pxoffrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); - hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); - } else { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); hwstats->pxoffrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + case ixgbe_mac_82599EB: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + hwstats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + break; + default: + break; } hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); @@ -5542,18 +5616,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->gprc -= missed_rx; /* 82598 hardware only has a 32 bit counter in the high register */ - if (hw->mac.type == ixgbe_mac_82599EB) { - u64 tmp; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_82599EB: hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); - tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; - /* 4 high bits of GORC */ - hwstats->gorc += (tmp << 32); + IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); - tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; - /* 4 high bits of GOTC */ - hwstats->gotc += (tmp << 32); + IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */ hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); - IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); @@ -5566,12 +5643,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); #endif /* IXGBE_FCOE */ - } else { - hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + default: + break; } bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); hwstats->bprc += bprc; @@ -5807,17 +5881,26 @@ static void ixgbe_watchdog_task(struct work_struct *work) if (!netif_carrier_ok(netdev)) { bool flow_rx, flow_tx; - if (hw->mac.type == ixgbe_mac_82599EB) { - u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); - u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); - flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); - flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); - } else { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: { u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } + break; + case ixgbe_mac_82599EB: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); + flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X); + } + break; + default: + flow_tx = false; + flow_rx = false; + break; + } e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? -- cgit v1.2.3-70-g09d2 From e2b4e216b7e9da09175c76887c754489681533b9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:04 -0800 Subject: ixgbe: cleanup ixgbe_set_tx_csum ethtool flags configuration This change makes it so that we always disable SCTP regardless of mac type since we shouldn't need to check mac type before disabling a feature that isn't supported on a given piece of hardware. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 9483faf91ea..f61a8ce908e 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -425,13 +425,12 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) struct ixgbe_adapter *adapter = netdev_priv(netdev); if (data) { - netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; if (adapter->hw.mac.type == ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CSUM; } else { - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - netdev->features &= ~NETIF_F_SCTP_CSUM; + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SCTP_CSUM); } return 0; -- cgit v1.2.3-70-g09d2 From 50d6c681d0c38208e494f0c6302ef13d21dababa Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:05 -0800 Subject: ixgbe: add WOL support for backplane adapters This change adds support for certain 82599 based Mezzanine adapters. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 17 +++++++++++++++++ drivers/net/ixgbe/ixgbe_main.c | 7 +++++++ drivers/net/ixgbe/ixgbe_type.h | 1 + 3 files changed, 25 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index f61a8ce908e..0a4b322fab6 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev, ADVERTISED_FIBRE); ecmd->port = PORT_FIBRE; ecmd->autoneg = AUTONEG_DISABLE; + } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || + (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; } else { ecmd->supported |= (SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE); @@ -1862,6 +1872,13 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, int retval = 1; switch(hw->device_id) { + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (hw->subsystem_device_id == + IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { + wol->supported = 0; + break; + } case IXGBE_DEV_ID_82599_KX4: retval = 0; break; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 10fff68088e..af4ef29cae1 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -7088,6 +7088,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; switch (pdev->device) { + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (pdev->subsystem_device == + IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { + adapter->wol = 0; + break; + } case IXGBE_DEV_ID_82599_KX4: adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | IXGBE_WUFC_MC | IXGBE_WUFC_BC); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 96dea7731e6..9e6908dff9b 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -57,6 +57,7 @@ #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C /* General Registers */ #define IXGBE_CTRL 0x00000 -- cgit v1.2.3-70-g09d2 From 673ac60461082e07be58b23f237d651c2605ce60 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:05 -0800 Subject: ixgbe: Cleanup DCB logic, whitespace, and comments in ixgbe_ethtool.c This change address a few whitespace issues in DCB #ifdefs, adds a comment calling out the DCB specific registers, and nests an if statement inline with a number of if statements related to flow control. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 0a4b322fab6..6871b2be64f 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -342,13 +342,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, else pause->autoneg = 1; -#ifdef CONFIG_DCB - if (hw->fc.current_mode == ixgbe_fc_pfc) { - pause->rx_pause = 0; - pause->tx_pause = 0; - } - -#endif if (hw->fc.current_mode == ixgbe_fc_rx_pause) { pause->rx_pause = 1; } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { @@ -356,6 +349,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev, } else if (hw->fc.current_mode == ixgbe_fc_full) { pause->rx_pause = 1; pause->tx_pause = 1; +#ifdef CONFIG_DCB + } else if (hw->fc.current_mode == ixgbe_fc_pfc) { + pause->rx_pause = 0; + pause->tx_pause = 0; +#endif } } @@ -373,7 +371,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, return -EINVAL; #endif - fc = hw->fc; if (pause->autoneg != AUTONEG_ENABLE) @@ -629,6 +626,7 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); + /* DCB */ regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); -- cgit v1.2.3-70-g09d2 From 9d6b758f428d2ad9ca4208d5c4d4cdbd4261b0d8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:06 -0800 Subject: ixgbe: cleanup unnecessary return value in ixgbe_cache_ring_rss This change is just to cleanup some confusing logic in ixgbe_cache_ring_rss which can be simplified by adding a conditional with return to the start of the call. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index af4ef29cae1..5f7929f52fe 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -4295,19 +4295,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { int i; - bool ret = false; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - } else { - ret = false; - } + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return false; - return ret; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; } #ifdef CONFIG_IXGBE_DCB -- cgit v1.2.3-70-g09d2 From bf29ee6c4819a86ba0209281550b230889b8ebe6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:07 -0800 Subject: ixgbe: cleanup unclear references to reg_idx There are a number of places where we use the variable j to contain the register index of the ring. Instead of using such a non-descriptive variable name it is better that we name it reg_idx so that it is clear what the variable contains. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 120 ++++++++++++++++++++--------------------- 1 file changed, 60 insertions(+), 60 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5f7929f52fe..f2e81a21186 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -647,8 +647,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, #ifdef CONFIG_IXGBE_DCB if (adapter->dcb_cfg.pfc_mode_enable) { int tc; - int reg_idx = tx_ring->reg_idx; int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + u8 reg_idx = tx_ring->reg_idx; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: @@ -1422,7 +1422,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int); static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector; - int i, j, q_vectors, v_idx, r_idx; + int i, q_vectors, v_idx, r_idx; u32 mask; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -1438,8 +1438,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { - j = adapter->rx_ring[r_idx]->reg_idx; - ixgbe_set_ivar(adapter, 0, j, v_idx); + u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 0, reg_idx, v_idx); r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, r_idx + 1); @@ -1448,8 +1448,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { - j = adapter->tx_ring[r_idx]->reg_idx; - ixgbe_set_ivar(adapter, 1, j, v_idx); + u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx; + ixgbe_set_ivar(adapter, 1, reg_idx, v_idx); r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, r_idx + 1); @@ -2555,7 +2555,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, u64 tdba = ring->dma; int wait_loop = 10; u32 txdctl; - u16 reg_idx = ring->reg_idx; + u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); @@ -2684,13 +2684,13 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { u32 srrctl; - int index = rx_ring->reg_idx; + u8 reg_idx = rx_ring->reg_idx; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: { struct ixgbe_ring_feature *feature = adapter->ring_feature; const int mask = feature[RING_F_RSS].mask; - index = index & mask; + reg_idx = reg_idx & mask; } break; case ixgbe_mac_82599EB: @@ -2698,7 +2698,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, break; } - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); + srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; @@ -2721,7 +2721,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) @@ -2801,7 +2801,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; u32 rscctrl; int rx_buf_len; - u16 reg_idx = ring->reg_idx; + u8 reg_idx = ring->reg_idx; if (!ring_is_rsc_enabled(ring)) return; @@ -2867,9 +2867,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; - int reg_idx = ring->reg_idx; int wait_loop = IXGBE_MAX_RX_DESC_POLL; u32 rxdctl; + u8 reg_idx = ring->reg_idx; /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && @@ -2893,7 +2893,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; u64 rdba = ring->dma; u32 rxdctl; - u16 reg_idx = ring->reg_idx; + u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); @@ -3894,7 +3894,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; u32 rxctrl; u32 txdctl; - int i, j; + int i; int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; /* signal that we are down to the interrupt handler */ @@ -3952,9 +3952,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues; i++) { - j = adapter->tx_ring[i]->reg_idx; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), + u8 reg_idx = adapter->tx_ring[i]->reg_idx; + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), (txdctl & ~IXGBE_TXDCTL_ENABLE)); } /* Disable the Tx DMA engine on 82599 */ @@ -4420,55 +4420,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) */ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) { - int i, fcoe_rx_i = 0, fcoe_tx_i = 0; - bool ret = false; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + int i; + u8 fcoe_rx_i = 0, fcoe_tx_i = 0; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { #ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - struct ixgbe_fcoe *fcoe = &adapter->fcoe; + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; - ixgbe_cache_ring_dcb(adapter); - /* find out queues in TC for FCoE */ - fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; - fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; - /* - * In 82599, the number of Tx queues for each traffic - * class for both 8-TC and 4-TC modes are: - * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 - * 8 TCs: 32 32 16 16 8 8 8 8 - * 4 TCs: 64 64 32 32 - * We have max 8 queues for FCoE, where 8 the is - * FCoE redirection table size. If TC for FCoE is - * less than or equal to TC3, we have enough queues - * to add max of 8 queues for FCoE, so we start FCoE - * tx descriptor from the next one, i.e., reg_idx + 1. - * If TC for FCoE is above TC3, implying 8 TC mode, - * and we need 8 for FCoE, we have to take all queues - * in that traffic class for FCoE. - */ - if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) - fcoe_tx_i--; - } + ixgbe_cache_ring_dcb(adapter); + /* find out queues in TC for FCoE */ + fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; + fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; + /* + * In 82599, the number of Tx queues for each traffic + * class for both 8-TC and 4-TC modes are: + * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 + * 8 TCs: 32 32 16 16 8 8 8 8 + * 4 TCs: 64 64 32 32 + * We have max 8 queues for FCoE, where 8 the is + * FCoE redirection table size. If TC for FCoE is + * less than or equal to TC3, we have enough queues + * to add max of 8 queues for FCoE, so we start FCoE + * Tx queue from the next one, i.e., reg_idx + 1. + * If TC for FCoE is above TC3, implying 8 TC mode, + * and we need 8 for FCoE, we have to take all queues + * in that traffic class for FCoE. + */ + if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) + fcoe_tx_i--; + } #endif /* CONFIG_IXGBE_DCB */ - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || - (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) - ixgbe_cache_ring_fdir(adapter); - else - ixgbe_cache_ring_rss(adapter); + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || + (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + ixgbe_cache_ring_fdir(adapter); + else + ixgbe_cache_ring_rss(adapter); - fcoe_rx_i = f->mask; - fcoe_tx_i = f->mask; - } - for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; - } - ret = true; + fcoe_rx_i = f->mask; + fcoe_tx_i = f->mask; } - return ret; + for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { + adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; + adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; + } + return true; } #endif /* IXGBE_FCOE */ -- cgit v1.2.3-70-g09d2 From 125601bf03a13e24d3785ccbc3a25ad401c92772 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:08 -0800 Subject: ixgbe: simplify math and improve stack use of ixgbe_set_itr functions This change is meant to improve the stack utilization and simplify the math used in ixgbe_set_itr_msix. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f2e81a21186..3ad754824ff 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1617,14 +1617,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) { struct ixgbe_adapter *adapter = q_vector->adapter; + int i, r_idx; u32 new_itr; u8 current_itr, ret_itr; - int i, r_idx; - struct ixgbe_ring *rx_ring, *tx_ring; r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); for (i = 0; i < q_vector->txr_count; i++) { - tx_ring = adapter->tx_ring[r_idx]; + struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx]; ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, q_vector->tx_itr, tx_ring->total_packets, @@ -1639,7 +1638,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = adapter->rx_ring[r_idx]; + struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx]; ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, q_vector->rx_itr, rx_ring->total_packets, @@ -1670,7 +1669,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) if (new_itr != q_vector->eitr) { /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); + new_itr = ((q_vector->eitr * 9) + new_itr)/10; /* save the algorithm value here, not the smoothed one */ q_vector->eitr = new_itr; @@ -2270,10 +2269,10 @@ out: static void ixgbe_set_itr(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - u8 current_itr; - u32 new_itr = q_vector->eitr; struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; + u32 new_itr = q_vector->eitr; + u8 current_itr; q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, q_vector->tx_itr, @@ -2303,9 +2302,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter) if (new_itr != q_vector->eitr) { /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); + new_itr = ((q_vector->eitr * 9) + new_itr)/10; - /* save the algorithm value here, not the smoothed one */ + /* save the algorithm value here */ q_vector->eitr = new_itr; ixgbe_write_eitr(q_vector); -- cgit v1.2.3-70-g09d2 From d0759ebb051972f8557a19aa13cf02fc314856e9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:09 -0800 Subject: ixgbe: cleanup ixgbe_map_rings_to_vectors This change cleans up some of the items in ixgbe_map_rings_to_vectors. Specifically it merges the two for loops and drops the unnecessary vectors parameter. It also moves the vector names into the q_vectors themselves. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 3 ++- drivers/net/ixgbe/ixgbe_main.c | 55 +++++++++++++++++++++--------------------- 2 files changed, 30 insertions(+), 28 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 6d9fcb4e085..ce43c935268 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -282,6 +282,7 @@ struct ixgbe_q_vector { u8 rx_itr; u32 eitr; cpumask_var_t affinity_mask; + char name[IFNAMSIZ + 9]; }; /* Helper macros to switch between ints/sec and what the register uses. @@ -330,7 +331,6 @@ struct ixgbe_adapter { u16 bd_number; struct work_struct reset_task; struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; @@ -453,6 +453,7 @@ struct ixgbe_adapter { int node; struct work_struct check_overtemp_task; u32 interrupt_event; + char lsc_int_name[IFNAMSIZ + 9]; /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 3ad754824ff..0d6422c5972 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2130,7 +2130,6 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, /** * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors * @adapter: board private structure to initialize - * @vectors: allotted vector count for descriptor rings * * This function maps descriptor rings to the queue-specific vectors * we were allotted through the MSI-X enabling code. Ideally, we'd have @@ -2138,9 +2137,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ -static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, - int vectors) +static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter) { + int q_vectors; int v_start = 0; int rxr_idx = 0, txr_idx = 0; int rxr_remaining = adapter->num_rx_queues; @@ -2153,11 +2152,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) goto out; + q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + /* * The ideal configuration... * We have enough vectors to map one per queue. */ - if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { + if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) map_vector_to_rxq(adapter, v_start, rxr_idx); @@ -2173,23 +2174,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, * multiple queues per vector. */ /* Re-adjusting *qpv takes care of the remainder. */ - for (i = v_start; i < vectors; i++) { - rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); + for (i = v_start; i < q_vectors; i++) { + rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); for (j = 0; j < rqpv; j++) { map_vector_to_rxq(adapter, i, rxr_idx); rxr_idx++; rxr_remaining--; } - } - for (i = v_start; i < vectors; i++) { - tqpv = DIV_ROUND_UP(txr_remaining, vectors - i); + tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); for (j = 0; j < tqpv; j++) { map_vector_to_txq(adapter, i, txr_idx); txr_idx++; txr_remaining--; } } - out: return err; } @@ -2211,32 +2209,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* Map the Tx/Rx rings to the vectors we were allotted. */ - err = ixgbe_map_rings_to_vectors(adapter, q_vectors); + err = ixgbe_map_rings_to_vectors(adapter); if (err) - goto out; + return err; -#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ - (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ - &ixgbe_msix_clean_many) +#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ + ? &ixgbe_msix_clean_many : \ + (_v)->rxr_count ? &ixgbe_msix_clean_rx : \ + (_v)->txr_count ? &ixgbe_msix_clean_tx : \ + NULL) for (vector = 0; vector < q_vectors; vector++) { - handler = SET_HANDLER(adapter->q_vector[vector]); + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + handler = SET_HANDLER(q_vector); if (handler == &ixgbe_msix_clean_rx) { - sprintf(adapter->name[vector], "%s-%s-%d", + sprintf(q_vector->name, "%s-%s-%d", netdev->name, "rx", ri++); } else if (handler == &ixgbe_msix_clean_tx) { - sprintf(adapter->name[vector], "%s-%s-%d", + sprintf(q_vector->name, "%s-%s-%d", netdev->name, "tx", ti++); - } else { - sprintf(adapter->name[vector], "%s-%s-%d", + } else if (handler == &ixgbe_msix_clean_many) { + sprintf(q_vector->name, "%s-%s-%d", netdev->name, "TxRx", ri++); ti++; + } else { + /* skip this unused q_vector */ + continue; } - err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, adapter->name[vector], - adapter->q_vector[vector]); + handler, 0, q_vector->name, + q_vector); if (err) { e_err(probe, "request_irq failed for MSIX interrupt " "Error: %d\n", err); @@ -2244,9 +2246,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) } } - sprintf(adapter->name[vector], "%s:lsc", netdev->name); + sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, - ixgbe_msix_lsc, 0, adapter->name[vector], netdev); + ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev); if (err) { e_err(probe, "request_irq for msix_lsc failed: %d\n", err); goto free_queue_irqs; @@ -2262,7 +2264,6 @@ free_queue_irqs: pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; -out: return err; } -- cgit v1.2.3-70-g09d2 From 2274543f15133165b855b9a4a1503b2c1268c6cf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:10 -0800 Subject: ixgbe: populate the ring->q_vector pointer during ring mapping The q_vector back pointer was not being set in the rings so it would not have been possible to determine the parent q_vector of the ring. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 0d6422c5972..38dd8534035 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2113,18 +2113,22 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, int r_idx) { struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *rx_ring = a->rx_ring[r_idx]; set_bit(r_idx, q_vector->rxr_idx); q_vector->rxr_count++; + rx_ring->q_vector = q_vector; } static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, int t_idx) { struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; + struct ixgbe_ring *tx_ring = a->tx_ring[t_idx]; set_bit(t_idx, q_vector->txr_idx); q_vector->txr_count++; + tx_ring->q_vector = q_vector; } /** -- cgit v1.2.3-70-g09d2 From e3de4b7bdfd2c06884c95cfb4ad4d64be046595e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 16 Nov 2010 19:27:11 -0800 Subject: ixgbe: Resolve null function pointer accesses on 82598 w/ multi-speed fiber This change resolves some null function pointer accesses on 82598 when a multi-speed fiber module is inserted into the adapter. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38dd8534035..def5c6e047c 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3661,7 +3661,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ixgbe_configure_msi_and_legacy(adapter); /* enable the optics */ - if (hw->phy.multispeed_fiber) + if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); clear_bit(__IXGBE_DOWN, &adapter->state); @@ -3973,7 +3973,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) } /* power down the optics */ - if (hw->phy.multispeed_fiber) + if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); /* clear n-tuple filters that are cached */ @@ -7074,7 +7074,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, } /* power down the optics */ - if (hw->phy.multispeed_fiber) + if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); init_timer(&adapter->watchdog_timer); -- cgit v1.2.3-70-g09d2 From c84d324c770dc81acebc1042163da33c8ded2364 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Nov 2010 19:27:12 -0800 Subject: ixgbe: rework Tx hang detection to fix reoccurring false Tx hangs The Tx hang logic has been known to detect false hangs when the device is receiving pause frames or has delayed processing for some other reason. This patch makes the logic more robust and resolves these known issues. The old logic checked to see if the device was paused by querying the HW then the hang logic was aborted if the device was currently paused. This check was racy because the device could have been in the pause state any time up to this check. The other operation of the hang logic is to verify the Tx ring is still advancing the old logic checked the EOP timestamp. This is not sufficient to determine the ring is not advancing but only infers that it may be moving slowly. Here we add logic to track the number of completed Tx descriptors and use the adapter stats to check if any pause frames have been received since the previous Tx hang check. This way we avoid racing with the HW register and do not detect false hangs if the ring is advancing slowly. This patch is primarily the work of Jesse Brandeburg. I clean it up some and fixed the PFC checking. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 4 + drivers/net/ixgbe/ixgbe_main.c | 250 ++++++++++++++++++++++++++++------------- 2 files changed, 175 insertions(+), 79 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index ce43c935268..2b8cbb3a81f 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -149,6 +149,8 @@ struct ixgbe_queue_stats { struct ixgbe_tx_queue_stats { u64 restart_queue; u64 tx_busy; + u64 completed; + u64 tx_done_old; }; struct ixgbe_rx_queue_stats { @@ -162,6 +164,7 @@ struct ixgbe_rx_queue_stats { enum ixbge_ring_state_t { __IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_DETECT_HANG, + __IXGBE_HANG_CHECK_ARMED, __IXGBE_RX_PS_ENABLED, __IXGBE_RX_RSC_ENABLED, }; @@ -514,6 +517,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); +extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index def5c6e047c..6e56f7b7c8f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -630,93 +630,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, } /** - * ixgbe_tx_xon_state - check the tx ring xon state - * @adapter: the ixgbe adapter - * @tx_ring: the corresponding tx_ring + * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class + * @adapter: driver private struct + * @index: reg idx of queue to query (0-127) * - * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the - * corresponding TC of this tx_ring when checking TFCS. + * Helper function to determine the traffic index for a paticular + * register index. * - * Returns : true if in xon state (currently not paused) + * Returns : a tc index for use in range 0-7, or 0-3 */ -static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring) +u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) { - u32 txoff = IXGBE_TFCS_TXOFF; + int tc = -1; + int dcb_i = adapter->ring_feature[RING_F_DCB].indices; -#ifdef CONFIG_IXGBE_DCB - if (adapter->dcb_cfg.pfc_mode_enable) { - int tc; - int dcb_i = adapter->ring_feature[RING_F_DCB].indices; - u8 reg_idx = tx_ring->reg_idx; + /* if DCB is not enabled the queues have no TC */ + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return tc; - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - tc = reg_idx >> 2; - txoff = IXGBE_TFCS_TXOFF0; + /* check valid range */ + if (reg_idx >= adapter->hw.mac.max_tx_queues) + return tc; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + tc = reg_idx >> 2; + break; + default: + if (dcb_i != 4 && dcb_i != 8) break; - case ixgbe_mac_82599EB: - tc = 0; - txoff = IXGBE_TFCS_TXOFF; - if (dcb_i == 8) { - /* TC0, TC1 */ - tc = reg_idx >> 5; - if (tc == 2) /* TC2, TC3 */ - tc += (reg_idx - 64) >> 4; - else if (tc == 3) /* TC4, TC5, TC6, TC7 */ - tc += 1 + ((reg_idx - 96) >> 3); - } else if (dcb_i == 4) { - /* TC0, TC1 */ - tc = reg_idx >> 6; - if (tc == 1) { - tc += (reg_idx - 64) >> 5; - if (tc == 2) /* TC2, TC3 */ - tc += (reg_idx - 96) >> 4; - } - } + + /* if VMDq is enabled the lowest order bits determine TC */ + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED)) { + tc = reg_idx & (dcb_i - 1); + break; + } + + /* + * Convert the reg_idx into the correct TC. This bitmask + * targets the last full 32 ring traffic class and assigns + * it a value of 1. From there the rest of the rings are + * based on shifting the mask further up to include the + * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i + * will only ever be 8 or 4 and that reg_idx will never + * be greater then 128. The code without the power of 2 + * optimizations would be: + * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32) + */ + tc = ((reg_idx & 0X1F) + 0x20) * dcb_i; + tc >>= 9 - (reg_idx >> 5); + } + + return tc; +} + +static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 data = 0; + u32 xoff[8] = {0}; + int i; + + if ((hw->fc.current_mode == ixgbe_fc_full) || + (hw->fc.current_mode == ixgbe_fc_rx_pause)) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); break; default: - tc = 0; + data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } + hwstats->lxoffrxc += data; + + /* refill credits (no tx hang) if we received xoff */ + if (!data) + return; + + for (i = 0; i < adapter->num_tx_queues; i++) + clear_bit(__IXGBE_HANG_CHECK_ARMED, + &adapter->tx_ring[i]->state); + return; + } else if (!(adapter->dcb_cfg.pfc_mode_enable)) + return; + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; + default: + xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } - txoff <<= tc; + hwstats->pxoffrxc[i] += xoff[i]; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx); + + if (xoff[tc]) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } -#endif - return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; } -static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, - struct ixgbe_ring *tx_ring, - unsigned int eop) +static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) { + return ring->tx_stats.completed; +} + +static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) +{ + struct ixgbe_adapter *adapter = netdev_priv(ring->netdev); struct ixgbe_hw *hw = &adapter->hw; - /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of eop */ + u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); + u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; +} + +static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) +{ + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = ixgbe_get_tx_pending(tx_ring); + bool ret = false; + clear_check_for_tx_hang(tx_ring); - if (tx_ring->tx_buffer_info[eop].time_stamp && - time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && - ixgbe_tx_xon_state(adapter, tx_ring)) { - /* detected Tx unit hang */ - union ixgbe_adv_tx_desc *tx_desc; - tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); - e_err(drv, "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH, TDT <%x>, <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->queue_index, - IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), - IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), - tx_ring->next_to_use, eop, - tx_ring->tx_buffer_info[eop].time_stamp, jiffies); - return true; + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED, + &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } - return false; + return ret; } #define IXGBE_MAX_TXD_PWR 14 @@ -772,6 +845,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, tx_buffer_info); } + tx_ring->tx_stats.completed++; eop = tx_ring->tx_buffer_info[i].next_to_watch; eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); } @@ -784,11 +858,31 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, tx_ring->stats.bytes += total_bytes; u64_stats_update_end(&tx_ring->syncp); - if (check_for_tx_hang(tx_ring) && - ixgbe_check_tx_hang(adapter, tx_ring, i)) { + if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { + /* schedule immediate reset if we believe we hung */ + struct ixgbe_hw *hw = &adapter->hw; + tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "tx_buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " jiffies <%lx>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), + tx_ring->next_to_use, eop, + tx_ring->tx_buffer_info[eop].time_stamp, jiffies); + + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", + adapter->tx_timeout_count + 1, tx_ring->queue_index); + /* schedule immediate reset if we believe we hung */ - e_info(probe, "tx hang %d detected, resetting " - "adapter\n", adapter->tx_timeout_count + 1); ixgbe_tx_timeout(adapter->netdev); /* the adapter is about to reset, no point in enabling stuff */ @@ -2599,6 +2693,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ring->atr_sample_rate = 0; } + clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state); + /* enable queue */ txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); @@ -4034,6 +4130,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + adapter->tx_timeout_count++; + /* Do the reset outside of interrupt context */ schedule_work(&adapter->reset_task); } @@ -4048,8 +4146,6 @@ static void ixgbe_reset_task(struct work_struct *work) test_bit(__IXGBE_RESETTING, &adapter->state)) return; - adapter->tx_timeout_count++; - ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); ixgbe_reinit_locked(adapter); @@ -5597,14 +5693,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_82598EB: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); - hwstats->pxoffrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; case ixgbe_mac_82599EB: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); - hwstats->pxoffrxc[i] += - IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); break; default: break; @@ -5616,11 +5708,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) /* work around hardware counting issue */ hwstats->gprc -= missed_rx; + ixgbe_update_xoff_received(adapter); + /* 82598 hardware only has a 32 bit counter in the high register */ switch (hw->mac.type) { case ixgbe_mac_82598EB: hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); @@ -5633,7 +5726,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); - hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); #ifdef IXGBE_FCOE -- cgit v1.2.3-70-g09d2 From a41c059741570779c0254a3a1aa4da3baa463d7c Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 16 Nov 2010 19:27:13 -0800 Subject: ixgbe: avoid doing FCoE DDP when adapter is DOWN or RESETTING There is no point to allow incoming DDP requests from the upper layer stack if the adapter is going down or being reset. Signed-off-by: Yi Zou Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_fcoe.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 05efa6a8ce8..07346b8ebb0 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -168,6 +168,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, return 0; } + /* no DDP if we are already down or resetting */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return 0; + fcoe = &adapter->fcoe; if (!fcoe->pool) { e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); -- cgit v1.2.3-70-g09d2 From 8ca371e484e2e5ceb9b90fdb83d8d251017d852b Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 16 Nov 2010 19:27:13 -0800 Subject: ixgbe: invalidate FCoE DDP context when no error status is available The hw automatically invalidates the context if DDP is successful or there is error detected. In case there is no error status available from the hw, initializing the per context error status to be 1 allows the DDP context to be still invalidated via the upper layer call to ddp_put(). Signed-off-by: Yi Zou Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_fcoe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 07346b8ebb0..26dd27479e2 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) { ddp->len = 0; - ddp->err = 0; + ddp->err = 1; ddp->udl = NULL; ddp->udp = 0UL; ddp->sgl = NULL; -- cgit v1.2.3-70-g09d2 From 9b55bb038468a7b504ccdc1d956952598ae1b85b Mon Sep 17 00:00:00 2001 From: Yi Zou Date: Tue, 16 Nov 2010 19:27:14 -0800 Subject: ixgbe: make sure FCoE DDP user buffers are really released by the HW When invalidating the DDP context is invalidated, the HW may not be done with the user buffer right away. In which case, we poll the FCBUFF register to check if the buffer valid bit is cleared or not, if not, we wait for max 100us that is guaranteed by the HW. Signed-off-by: Yi Zou Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_fcoe.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 26dd27479e2..6342d485979 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; + u32 fcbuff; if (!netdev) goto out_ddp_put; @@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); + + /* guaranteed to be invalidated after 100us */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); + if (fcbuff & IXGBE_FCBUFF_VALID) + udelay(100); } if (ddp->sgl) pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, -- cgit v1.2.3-70-g09d2 From a391f1d51244b8274920a33c5d11aeebec3aa68f Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 16 Nov 2010 19:27:15 -0800 Subject: ixgbe: make silicon specific functions generic The new MAC type X540 shares much of the same functionality of some silicon specific functions. To reduce duplicate code, made these functions generic. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_82598.c | 37 +---------------- drivers/net/ixgbe/ixgbe_82599.c | 86 ++-------------------------------------- drivers/net/ixgbe/ixgbe_common.c | 55 ++++++++++++++++++++++--- drivers/net/ixgbe/ixgbe_common.h | 5 ++- drivers/net/ixgbe/ixgbe_mbx.c | 2 +- drivers/net/ixgbe/ixgbe_mbx.h | 2 +- drivers/net/ixgbe/ixgbe_phy.c | 33 +++++++++++++++ drivers/net/ixgbe/ixgbe_phy.h | 3 ++ drivers/net/ixgbe/ixgbe_type.h | 1 + 9 files changed, 98 insertions(+), 126 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index 25b20f93190..d0f1d9d2c41 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -38,9 +38,6 @@ #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 -static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, @@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_82598; + &ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { @@ -273,37 +270,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, return status; } -/** - * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - * - * Determines the link capabilities by reading the AUTOC register. - **/ -static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_ERR_LINK_SETUP; - u16 speed_ability; - - *speed = 0; - *autoneg = true; - - status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, - &speed_ability); - - if (status == 0) { - if (speed_ability & MDIO_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - } - - return status; -} - /** * ixgbe_get_media_type_82598 - Determines media type * @hw: pointer to hardware structure @@ -1225,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { static struct ixgbe_eeprom_operations eeprom_ops_82598 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 0bd8fbb5bfd..144c44a6b21 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); -static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, @@ -174,7 +171,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = - &ixgbe_get_copper_link_capabilities_82599; + &ixgbe_get_copper_link_capabilities_generic; } /* Set necessary function pointers based on phy type */ @@ -289,37 +286,6 @@ out: return status; } -/** - * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: boolean auto-negotiation value - * - * Determines the link capabilities by reading the AUTOC register. - **/ -static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - s32 status = IXGBE_ERR_LINK_SETUP; - u16 speed_ability; - - *speed = 0; - *autoneg = true; - - status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, - &speed_ability); - - if (status == 0) { - if (speed_ability & MDIO_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; - if (speed_ability & MDIO_PMA_SPEED_1000) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; - } - - return status; -} - /** * ixgbe_get_media_type_82599 - Get media type * @hw: pointer to hardware structure @@ -2125,51 +2091,6 @@ fw_version_out: return status; } -/** - * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from - * the EEPROM - * @hw: pointer to hardware structure - * @wwnn_prefix: the alternative WWNN prefix - * @wwpn_prefix: the alternative WWPN prefix - * - * This function will read the EEPROM from the alternative SAN MAC address - * block to check the support for the alternative WWNN/WWPN prefix support. - **/ -static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) -{ - u16 offset, caps; - u16 alt_san_mac_blk_offset; - - /* clear output first */ - *wwnn_prefix = 0xFFFF; - *wwpn_prefix = 0xFFFF; - - /* check if alternative SAN MAC is supported */ - hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, - &alt_san_mac_blk_offset); - - if ((alt_san_mac_blk_offset == 0) || - (alt_san_mac_blk_offset == 0xFFFF)) - goto wwn_prefix_out; - - /* check capability in alternative san mac address block */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; - hw->eeprom.ops.read(hw, offset, &caps); - if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) - goto wwn_prefix_out; - - /* get the corresponding prefix for WWNN/WWPN */ - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwnn_prefix); - - offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; - hw->eeprom.ops.read(hw, offset, wwpn_prefix); - -wwn_prefix_out: - return 0; -} - static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, @@ -2181,7 +2102,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .get_mac_addr = &ixgbe_get_mac_addr_generic, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_device_caps = &ixgbe_get_device_caps_82599, - .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, @@ -2214,6 +2135,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, .write = &ixgbe_write_eeprom_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; @@ -2240,5 +2162,5 @@ struct ixgbe_info ixgbe_82599_info = { .mac_ops = &mac_ops_82599, .eeprom_ops = &eeprom_ops_82599, .phy_ops = &phy_ops_82599, - .mbx_ops = &mbx_ops_82599, + .mbx_ops = &mbx_ops_generic, }; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 62aa2be199f..56052570cac 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); -static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); /** * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx @@ -638,7 +636,7 @@ out: * Polls the status bit (bit 1) of the EERD or EEWR to determine when the * read or write is done respectively. **/ -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; @@ -1009,7 +1007,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum * @hw: pointer to hardware structure **/ -static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -1072,7 +1070,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, status = hw->eeprom.ops.read(hw, 0, &checksum); if (status == 0) { - checksum = ixgbe_calc_eeprom_checksum(hw); + checksum = hw->eeprom.ops.calc_checksum(hw); hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); @@ -1110,7 +1108,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) status = hw->eeprom.ops.read(hw, 0, &checksum); if (status == 0) { - checksum = ixgbe_calc_eeprom_checksum(hw); + checksum = hw->eeprom.ops.calc_checksum(hw); status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); } else { @@ -2686,3 +2684,48 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, return 0; } + +/** + * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, + &alt_san_mac_blk_offset); + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + hw->eeprom.ops.read(hw, offset, &caps); + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwnn_prefix); + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwpn_prefix); + +wwn_prefix_out: + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 424c223437d..341ca514a28 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -49,9 +49,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); @@ -81,7 +83,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete); - +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index 471f0f2cdb9..aede6eb0e67 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -454,7 +454,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) mbx->stats.rsts = 0; } -struct ixgbe_mbx_operations mbx_ops_82599 = { +struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, .write = ixgbe_write_mbx_pf, .read_posted = ixgbe_read_posted_mbx, diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h index 7e0d08ff5b5..3df9b159021 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ixgbe/ixgbe_mbx.h @@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); -extern struct ixgbe_mbx_operations mbx_ops_82599; +extern struct ixgbe_mbx_operations mbx_ops_generic; #endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 6c0d42e33f2..bc255ec4842 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -424,6 +424,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, return 0; } +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + */ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_ERR_LINK_SETUP; + u16 speed_ability; + + *speed = 0; + *autoneg = true; + + status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, + &speed_ability); + + if (status == 0) { + if (speed_ability & MDIO_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_1000) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & MDIO_PMA_SPEED_100) + *speed |= IXGBE_LINK_SPEED_100_FULL; + } + + return status; +} + /** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index fb3898f12fc..86f83bd130c 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); /* PHY specific */ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 9e6908dff9b..86c0049ba19 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -2414,6 +2414,7 @@ struct ixgbe_eeprom_operations { s32 (*write)(struct ixgbe_hw *, u16, u16); s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); s32 (*update_checksum)(struct ixgbe_hw *); + u16 (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { -- cgit v1.2.3-70-g09d2 From fe15e8e1c78521e0b4e375d6ed415b82265419c9 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 16 Nov 2010 19:27:16 -0800 Subject: ixgbe: add MAC and PHY support for x540 Adds the new x540.c file and Aquantia 1202 PHY for X540 support. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/Makefile | 2 +- drivers/net/ixgbe/ixgbe.h | 2 + drivers/net/ixgbe/ixgbe_82599.c | 8 +- drivers/net/ixgbe/ixgbe_ethtool.c | 1 + drivers/net/ixgbe/ixgbe_main.c | 1 + drivers/net/ixgbe/ixgbe_phy.c | 23 +- drivers/net/ixgbe/ixgbe_phy.h | 2 + drivers/net/ixgbe/ixgbe_type.h | 9 + drivers/net/ixgbe/ixgbe_x540.c | 722 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 766 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ixgbe/ixgbe_x540.c (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 8f81efb4916..7d7387fbdec 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o + ixgbe_mbx.o ixgbe_x540.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 2b8cbb3a81f..4806736785a 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -481,10 +481,12 @@ struct ixgbe_rsc_cb { enum ixgbe_boards { board_82598, board_82599, + board_X540, }; extern struct ixgbe_info ixgbe_82598_info; extern struct ixgbe_info ixgbe_82599_info; +extern struct ixgbe_info ixgbe_X540_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 144c44a6b21..e34643eef16 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -181,6 +181,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; break; + case ixgbe_phy_aq: + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_generic; + break; default: break; } @@ -298,7 +302,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) /* Detect if there is a copper PHY attached. */ if (hw->phy.type == ixgbe_phy_cu_unknown || - hw->phy.type == ixgbe_phy_tn) { + hw->phy.type == ixgbe_phy_tn || + hw->phy.type == ixgbe_phy_aq) { media_type = ixgbe_media_type_copper; goto out; } @@ -1890,6 +1895,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) hw->phy.ops.identify(hw); if (hw->phy.type == ixgbe_phy_tn || + hw->phy.type == ixgbe_phy_aq || hw->phy.type == ixgbe_phy_cu_unknown) { hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, &ext_ability); diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 6871b2be64f..b884f90b580 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -214,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev, /* Get PHY type */ switch (adapter->hw.phy.type) { case ixgbe_phy_tn: + case ixgbe_phy_aq: case ixgbe_phy_cu_unknown: /* Copper 10G-BASET */ ecmd->port = PORT_TP; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 6e56f7b7c8f..94c30b4f489 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -59,6 +59,7 @@ static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, [board_82599] = &ixgbe_82599_info, + [board_X540] = &ixgbe_X540_info, }; /* ixgbe_pci_tbl - PCI Device ID Table diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index bc255ec4842..c445fbce56e 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; + case AQ1202_PHY_ID: + phy_type = ixgbe_phy_aq; + break; case QT2022_PHY_ID: phy_type = ixgbe_phy_qt; break; @@ -433,8 +436,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, * Determines the link capabilities by reading the AUTOC register. */ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) + ixgbe_link_speed *speed, + bool *autoneg) { s32 status = IXGBE_ERR_LINK_SETUP; u16 speed_ability; @@ -1410,6 +1413,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, return status; } +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version +**/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1, + firmware_version); + + return status; +} + /** * ixgbe_tn_check_overtemp - Checks if an overtemp occured. * @hw: pointer to hardware structure diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index 86f83bd130c..e2c6b7eac64 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -106,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, bool *link_up); s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 86c0049ba19..cbcb15277b4 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -995,8 +995,10 @@ /* PHY IDs*/ #define TN1010_PHY_ID 0x00A19410 #define TNX_FW_REV 0xB +#define AQ1202_PHY_ID 0x03A1B440 #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 +#define AQ_FW_REV 0x20 /* PHY Types */ #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 @@ -1492,6 +1494,7 @@ #define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ #define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ #define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ #define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ /* EEPROM Addressing bits based on type (0-small, 1-large) */ #define IXGBE_EEC_ADDR_SIZE 0x00000400 @@ -1506,7 +1509,9 @@ #define IXGBE_EEPROM_SUM 0xBABA #define IXGBE_PCIE_ANALOG_PTR 0x03 #define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 #define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 #define IXGBE_PCIE_GENERAL_PTR 0x06 #define IXGBE_PCIE_CONFIG0_PTR 0x07 #define IXGBE_PCIE_CONFIG1_PTR 0x08 @@ -2173,6 +2178,7 @@ struct ixgbe_atr_input_masks { enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, + ixgbe_flash, ixgbe_eeprom_none /* No NVM support */ }; @@ -2180,12 +2186,14 @@ enum ixgbe_mac_type { ixgbe_mac_unknown = 0, ixgbe_mac_82598EB, ixgbe_mac_82599EB, + ixgbe_mac_X540, ixgbe_num_macs }; enum ixgbe_phy_type { ixgbe_phy_unknown = 0, ixgbe_phy_tn, + ixgbe_phy_aq, ixgbe_phy_cu_unknown, ixgbe_phy_qt, ixgbe_phy_xaui, @@ -2584,6 +2592,7 @@ struct ixgbe_hw { u16 subsystem_vendor_id; u8 revision_id; bool adapter_stopped; + bool force_full_reset; }; struct ixgbe_info { diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c new file mode 100644 index 00000000000..7de5f7ea710 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -0,0 +1,722 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2010 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "ixgbe.h" +#include "ixgbe_phy.h" +//#include "ixgbe_mbx.h" + +#define IXGBE_X540_MAX_TX_QUEUES 128 +#define IXGBE_X540_MAX_RX_QUEUES 128 +#define IXGBE_X540_RAR_ENTRIES 128 +#define IXGBE_X540_MC_TBL_SIZE 128 +#define IXGBE_X540_VFT_TBL_SIZE 128 + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + return ixgbe_media_type_copper; +} + +static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + /* Call PHY identify routine to get the phy type */ + ixgbe_identify_phy_generic(hw); + + mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; + mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; + mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; + mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; + mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + return 0; +} + +/** + * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: true if autonegotiation enabled + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + return hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status = 0; + u32 ctrl; + u32 ctrl_ext; + u32 reset_bit; + u32 i; + u32 autoc; + u32 autoc2; + bool link_up = false; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests before reset + */ + status = ixgbe_disable_pcie_master(hw); + if (status != 0) { + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + } + + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + if (hw->force_full_reset) { + reset_bit = IXGBE_CTRL_LNK_RST; + } else { + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); + if (!link_up) + reset_bit = IXGBE_CTRL_LNK_RST; + else + reset_bit = IXGBE_CTRL_RST; + } + + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + udelay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "Reset polling failed to complete.\n"); + } + + /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + msleep(50); + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (hw->mac.orig_link_settings_stored == false) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = true; + } else { + if (autoc != hw->mac.orig_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | + IXGBE_AUTOC_AN_RESTART)); + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + + return status; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + hw->phy.ops.identify(hw); + + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & MDIO_PMA_EXTABLE_10GBT) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_1000BT) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & MDIO_PMA_EXTABLE_100BTX) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return 0; +} + +/** + * ixgbe_read_eerd_X540 - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EERPOM + **/ +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status; + + if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) + status = ixgbe_read_eerd_generic(hw, offset, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + u32 eewr; + s32 status; + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) { + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != 0) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != 0) { + hw_dbg(hw, "Eeprom write EEWR timed out\n"); + goto out; + } + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + +out: + ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (hw->eeprom.ops.read(hw, i, &pointer) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (hw->eeprom.ops.read(hw, pointer, &length) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer+1; j <= pointer+length; j++) { + if (hw->eeprom.ops.read(hw, j, &word) != 0) { + hw_dbg(hw, "EEPROM read failed\n"); + break; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_update_eeprom_checksum_generic(hw); + + if (status) + status = ixgbe_update_flash_X540(hw); + + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status = IXGBE_ERR_EEPROM; + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + hw_dbg(hw, "Flash update time out\n"); + goto out; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + + if (hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status) + hw_dbg(hw, "Flash update complete\n"); + else + hw_dbg(hw, "Flash update time out\n"); + + } +out: + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC); + if (reg & IXGBE_EEC_FLUDONE) { + status = 0; + break; + } + udelay(5); + } + return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 hwmask = 0; + u32 timeout = 200; + u32 i; + + if (swmask == IXGBE_GSSR_EEP_SM) + hwmask = IXGBE_GSSR_FLASH_SM; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + break; + } else { + /* + * Firmware currently using resource (fwmask), + * hardware currently using resource (hwmask), + * or other software thread currently using + * resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + msleep(5); + } + } + + /* + * If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should sets the + * SW bit(s) of the requested resource(s) while ignoring the + * corresponding FW/HW bits in the SW_FW_SYNC register. + */ + if (i >= timeout) { + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (swfw_sync & (fwmask | hwmask)) { + if (ixgbe_get_swfw_sync_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + } + } + + msleep(5); + return 0; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore throught the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + msleep(5); +} + +/** + * ixgbe_get_nvm_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = 0; + break; + } + udelay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + if (status) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + udelay(50); + } + } else { + hw_dbg(hw, "Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +static struct ixgbe_mac_operations mac_ops_X540 = { + .init_hw = &ixgbe_init_hw_generic, + .reset_hw = &ixgbe_reset_hw_X540, + .start_hw = &ixgbe_start_hw_generic, + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, + .get_media_type = &ixgbe_get_media_type_X540, + .get_supported_physical_layer = + &ixgbe_get_supported_physical_layer_X540, + .enable_rx_dma = &ixgbe_enable_rx_dma_generic, + .get_mac_addr = &ixgbe_get_mac_addr_generic, + .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, + .get_device_caps = NULL, + .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, + .stop_adapter = &ixgbe_stop_adapter_generic, + .get_bus_info = &ixgbe_get_bus_info_generic, + .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, + .read_analog_reg8 = NULL, + .write_analog_reg8 = NULL, + .setup_link = &ixgbe_setup_mac_link_X540, + .check_link = &ixgbe_check_mac_link_generic, + .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, + .led_on = &ixgbe_led_on_generic, + .led_off = &ixgbe_led_off_generic, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, + .set_rar = &ixgbe_set_rar_generic, + .clear_rar = &ixgbe_clear_rar_generic, + .set_vmdq = &ixgbe_set_vmdq_generic, + .clear_vmdq = &ixgbe_clear_vmdq_generic, + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, + .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, + .enable_mc = &ixgbe_enable_mc_generic, + .disable_mc = &ixgbe_disable_mc_generic, + .clear_vfta = &ixgbe_clear_vfta_generic, + .set_vfta = &ixgbe_set_vfta_generic, + .fc_enable = &ixgbe_fc_enable_generic, + .init_uta_tables = &ixgbe_init_uta_tables_generic, + .setup_sfp = NULL, +}; + +static struct ixgbe_eeprom_operations eeprom_ops_X540 = { + .init_params = &ixgbe_init_eeprom_params_X540, + .read = &ixgbe_read_eerd_X540, + .write = &ixgbe_write_eewr_X540, + .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_X540, +}; + +static struct ixgbe_phy_operations phy_ops_X540 = { + .identify = &ixgbe_identify_phy_generic, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = NULL, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, +}; + +struct ixgbe_info ixgbe_X540_info = { + .mac = ixgbe_mac_X540, + .get_invariants = &ixgbe_get_invariants_X540, + .mac_ops = &mac_ops_X540, + .eeprom_ops = &eeprom_ops_X540, + .phy_ops = &phy_ops_X540, + .mbx_ops = &mbx_ops_generic, +}; -- cgit v1.2.3-70-g09d2 From b93a22260f6f4bcf6c92c54de8530a97d3e921f0 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Tue, 16 Nov 2010 19:27:17 -0800 Subject: ixgbe: add support for x540 MAC This patch adds support for the x540 MAC which is the next MAC in the 82598/82599 line. Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 4 +++ drivers/net/ixgbe/ixgbe_dcb.c | 11 ++++++-- drivers/net/ixgbe/ixgbe_dcb_nl.c | 55 ++++++++++++++++++++++++++++-------- drivers/net/ixgbe/ixgbe_ethtool.c | 39 +++++++++++++++++++++----- drivers/net/ixgbe/ixgbe_main.c | 59 +++++++++++++++++++++++++++++++++++---- drivers/net/ixgbe/ixgbe_mbx.c | 38 +++++++++++++++---------- drivers/net/ixgbe/ixgbe_type.h | 1 + drivers/net/ixgbe/ixgbe_x540.c | 20 ++++++------- 8 files changed, 177 insertions(+), 50 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 4806736785a..3ae30b8cb7d 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -544,6 +544,10 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte); extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type); +extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring); +extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring); extern void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index 4f2f0ae6735..d16c260c1f5 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -152,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = 0; - if (hw->mac.type == ixgbe_mac_82598EB) + switch (hw->mac.type) { + case ixgbe_mac_82598EB: ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); - else if (hw->mac.type == ixgbe_mac_82599EB) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); + break; + default: + break; + } return ret; } diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index b53b465e24a..bf566e8a455 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; - } - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + break; } + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) @@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; + default: + break; + } ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) @@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; + break; + default: + break; } } @@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } if (adapter->dcb_cfg.pfc_mode_enable) { - if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && - (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) - adapter->last_lfc_mode = adapter->hw.fc.current_mode; + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) + adapter->last_lfc_mode = + adapter->hw.fc.current_mode; + break; + default: + break; + } adapter->hw.fc.requested_mode = ixgbe_fc_pfc; } else { - if (adapter->hw.mac.type != ixgbe_mac_82598EB) - adapter->hw.fc.requested_mode = adapter->last_lfc_mode; - else + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: adapter->hw.fc.requested_mode = ixgbe_fc_none; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + break; + default: + break; + } } if (adapter->dcb_set_bitmap & BIT_RESETLINK) { diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index b884f90b580..f9b58394fbb 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -431,15 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev) static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + u32 feature_list; - if (data) { - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - netdev->features |= NETIF_F_SCTP_CSUM; - } else { - netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_SCTP_CSUM); + feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + feature_list |= NETIF_F_SCTP_CSUM; + break; + default: + break; } + if (data) + netdev->features |= feature_list; + else + netdev->features &= ~feature_list; return 0; } @@ -1250,6 +1256,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) test = reg_test_82598; break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1476,6 +1483,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) switch (hw->mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1512,6 +1520,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -2198,6 +2207,22 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) case ixgbe_mac_82599EB: need_reset = true; break; + case ixgbe_mac_X540: { + int i; + for (i = 0; i < adapter->num_rx_queues; i++) { + struct ixgbe_ring *ring = + adapter->rx_ring[i]; + if (adapter->flags2 & + IXGBE_FLAG2_RSC_ENABLED) { + ixgbe_configure_rscctl(adapter, + ring); + } else { + ixgbe_clear_rscctl(adapter, + ring); + } + } + } + break; default: break; } diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 94c30b4f489..b859a298cd2 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -113,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), + board_82599 }, /* required last entry */ {0, } @@ -561,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -596,6 +599,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -923,6 +927,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); @@ -956,6 +961,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx)); txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << @@ -1581,6 +1587,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) v_idx); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; @@ -1688,8 +1695,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) itr_reg |= (itr_reg << 16); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* - * 82599 can support a value of zero, so allow it for + * 82599 and X540 can support a value of zero, so allow it for * max interrupt rate, but there is an errata where it can * not be zero with RSC */ @@ -1885,6 +1893,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { int i; @@ -1930,6 +1939,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -1955,6 +1965,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2427,6 +2438,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, mask |= IXGBE_EIMS_GPI_SDP1; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; @@ -2492,6 +2504,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ixgbe_check_sfp_event(adapter, eicr); if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { @@ -2601,6 +2614,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -2795,6 +2809,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, } break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: default: break; } @@ -2891,12 +2906,29 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } +/** + * ixgbe_clear_rscctl - disable RSC for the indicated ring + * @adapter: address of board private structure + * @ring: structure containing ring specific data + **/ +void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; + u8 reg_idx = ring->reg_idx; + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); + rscctrl &= ~IXGBE_RSCCTL_RSCEN; + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); +} + /** * ixgbe_configure_rscctl - enable RSC for the indicated ring * @adapter: address of board private structure * @index: index of ring to set **/ -static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, +void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring) { struct ixgbe_hw *hw = &adapter->hw; @@ -3201,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) rdrxctl |= IXGBE_RDRXCTL_MVMEN; break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: /* Disable RSC for ACK packets */ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); @@ -3328,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); @@ -3357,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: for (i = 0; i < adapter->num_rx_queues; i++) { j = adapter->rx_ring[i]->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); @@ -3712,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_82598EB: IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); break; - default: case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); break; @@ -4061,6 +4097,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Disable the Tx DMA engine on 82599 */ switch (hw->mac.type) { case ixgbe_mac_82599EB: + case ixgbe_mac_X540: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -4435,6 +4472,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) ret = true; break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: if (dcb_i == 8) { /* * Tx TC0 starts at: descriptor queue 0 @@ -5049,6 +5087,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; @@ -5567,6 +5606,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) pci_wake_from_d3(pdev, false); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -5696,6 +5736,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -5720,6 +5761,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); break; case ixgbe_mac_82599EB: + case ixgbe_mac_X540: hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); @@ -5983,7 +6025,8 @@ static void ixgbe_watchdog_task(struct work_struct *work) flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); } break; - case ixgbe_mac_82599EB: { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); @@ -7057,8 +7100,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; /* Make it possible the adapter to be woken up via WOL */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: + break; + } /* * If there is a fan on this device and it has failed log the diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index aede6eb0e67..027c628c3aa 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -319,8 +319,14 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) u32 vflre = 0; s32 ret_val = IXGBE_ERR_MBX; - if (hw->mac.type == ixgbe_mac_82599EB) + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + default: + break; + } if (vflre & (1 << vf_shift)) { ret_val = 0; @@ -439,19 +445,23 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (hw->mac.type != ixgbe_mac_82599EB) - return; - - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->size = IXGBE_VFMAILBOX_SIZE; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + break; + default: + break; + } } struct ixgbe_mbx_operations mbx_ops_generic = { diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index cbcb15277b4..42c607339a6 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -58,6 +58,7 @@ #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_X540T 0x1528 /* General Registers */ #define IXGBE_CTRL 0x00000 diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 7de5f7ea710..9649fa727e3 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -46,7 +46,7 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); -enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { return ixgbe_media_type_copper; } @@ -75,9 +75,9 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) +static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, autoneg, autoneg_wait_to_complete); @@ -91,7 +91,7 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status = 0; @@ -222,7 +222,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) * * Determines physical layer capabilities of the current configuration. **/ -u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) { u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u16 ext_ability = 0; @@ -245,7 +245,7 @@ u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params * @hw: pointer to hardware structure **/ -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -274,7 +274,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) * @offset: offset of word in the EEPROM to read * @data: word read from the EERPOM **/ -s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status; @@ -295,7 +295,7 @@ s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) * * Write a 16 bit word to the EEPROM using the EEWR register. **/ -s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { u32 eewr; s32 status; @@ -406,7 +406,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ -s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { s32 status; -- cgit v1.2.3-70-g09d2 From e2ddeba95c09d0d44719ff005e915dc06ff46571 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 16 Nov 2010 19:27:18 -0800 Subject: ixgbe: refactor ixgbe_alloc_queues() I noticed ring variable was initialized before allocations, and that memory node management was a bit ugly. We also leak memory in case of ring allocations error. Signed-off-by: Eric Dumazet Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 72 ++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 44 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index b859a298cd2..5409af3da06 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -4676,71 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) **/ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) { - int i; - int rx_count; - int orig_node = adapter->node; + int rx = 0, tx = 0, nid = adapter->node; - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = adapter->tx_ring[i]; - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, - adapter->node); + if (nid < 0 || !node_online(nid)) + nid = first_online_node; + + for (; tx < adapter->num_tx_queues; tx++) { + struct ixgbe_ring *ring; + + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); if (!ring) - ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); + ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) - goto err_tx_ring_allocation; + goto err_allocation; ring->count = adapter->tx_ring_count; - ring->queue_index = i; + ring->queue_index = tx; + ring->numa_node = nid; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; - adapter->tx_ring[i] = ring; + adapter->tx_ring[tx] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node; + for (; rx < adapter->num_rx_queues; rx++) { + struct ixgbe_ring *ring; - rx_count = adapter->rx_ring_count; - for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL, - adapter->node); + ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid); if (!ring) - ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); + ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) - goto err_rx_ring_allocation; - ring->count = rx_count; - ring->queue_index = i; + goto err_allocation; + ring->count = adapter->rx_ring_count; + ring->queue_index = rx; + ring->numa_node = nid; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; - adapter->rx_ring[i] = ring; + adapter->rx_ring[rx] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node; - ixgbe_cache_ring_register(adapter); return 0; -err_rx_ring_allocation: - for (i = 0; i < adapter->num_tx_queues; i++) - kfree(adapter->tx_ring[i]); -err_tx_ring_allocation: +err_allocation: + while (tx) + kfree(adapter->tx_ring[--tx]); + + while (rx) + kfree(adapter->rx_ring[--rx]); return -ENOMEM; } -- cgit v1.2.3-70-g09d2 From 9a2d09cf61c9d9f1b31998bec5363a583e4564a4 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Sun, 21 Nov 2010 09:55:10 -0800 Subject: ixgbe: update version number for ixgbe This will reflect addition of new X540 hardware Signed-off-by: Don Skidmore Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5409af3da06..02541956744 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -52,7 +52,7 @@ char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = "Intel(R) 10 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "2.0.84-k2" +#define DRV_VERSION "3.0.12-k2" const char ixgbe_driver_version[] = DRV_VERSION; static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; -- cgit v1.2.3-70-g09d2 From 89bf67f1f080c947c92f8773482d9e57767ca292 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 22 Nov 2010 00:15:06 +0000 Subject: drivers/net: use vzalloc() Use vzalloc() and vzalloc_node() in net drivers Signed-off-by: Eric Dumazet Acked-by: Jon Mason Signed-off-by: David S. Miller --- drivers/net/bnx2.c | 9 ++------- drivers/net/cxgb3/cxgb3_offload.c | 6 ++---- drivers/net/cxgb4/cxgb4_main.c | 6 ++---- drivers/net/e1000/e1000_main.c | 6 ++---- drivers/net/e1000e/netdev.c | 6 ++---- drivers/net/ehea/ehea_main.c | 4 +--- drivers/net/igb/igb_main.c | 6 ++---- drivers/net/igbvf/netdev.c | 6 ++---- drivers/net/ixgb/ixgb_main.c | 6 ++---- drivers/net/ixgbe/ixgbe_main.c | 10 ++++------ drivers/net/ixgbevf/ixgbevf_main.c | 6 ++---- drivers/net/netxen/netxen_nic_init.c | 6 ++---- drivers/net/pch_gbe/pch_gbe_main.c | 6 ++---- drivers/net/pptp.c | 3 +-- drivers/net/qlcnic/qlcnic_init.c | 6 ++---- drivers/net/sfc/filter.c | 3 +-- drivers/net/vxge/vxge-config.c | 31 ++++++++----------------------- 17 files changed, 39 insertions(+), 87 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 062600be073..0de196da4d4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) int j; rxr->rx_buf_ring = - vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); + vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); if (rxr->rx_buf_ring == NULL) return -ENOMEM; - memset(rxr->rx_buf_ring, 0, - SW_RXBD_RING_SIZE * bp->rx_max_ring); - for (j = 0; j < bp->rx_max_ring; j++) { rxr->rx_desc_ring[j] = dma_alloc_coherent(&bp->pdev->dev, @@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) } if (bp->rx_pg_ring_size) { - rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * + rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * bp->rx_max_pg_ring); if (rxr->rx_pg_ring == NULL) return -ENOMEM; - memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE * - bp->rx_max_pg_ring); } for (j = 0; j < bp->rx_max_pg_ring; j++) { diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index bcf07532953..ef02aa68c92 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) */ void *cxgb_alloc_mem(unsigned long size) { - void *p = kmalloc(size, GFP_KERNEL); + void *p = kzalloc(size, GFP_KERNEL); if (!p) - p = vmalloc(size); - if (p) - memset(p, 0, size); + p = vzalloc(size); return p; } diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index f50bc98310f..848f89d19fb 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -868,12 +868,10 @@ out: release_firmware(fw); */ void *t4_alloc_mem(size_t size) { - void *p = kmalloc(size, GFP_KERNEL); + void *p = kzalloc(size, GFP_KERNEL); if (!p) - p = vmalloc(size); - if (p) - memset(p, 0, size); + p = vzalloc(size); return p; } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4686c3983fc..dcb7f82c270 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -1425,13 +1425,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, int size; size = sizeof(struct e1000_buffer) * txdr->count; - txdr->buffer_info = vmalloc(size); + txdr->buffer_info = vzalloc(size); if (!txdr->buffer_info) { e_err(probe, "Unable to allocate memory for the Tx descriptor " "ring\n"); return -ENOMEM; } - memset(txdr->buffer_info, 0, size); /* round up to nearest 4K */ @@ -1621,13 +1620,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, int size, desc_len; size = sizeof(struct e1000_buffer) * rxdr->count; - rxdr->buffer_info = vmalloc(size); + rxdr->buffer_info = vzalloc(size); if (!rxdr->buffer_info) { e_err(probe, "Unable to allocate memory for the Rx descriptor " "ring\n"); return -ENOMEM; } - memset(rxdr->buffer_info, 0, size); desc_len = sizeof(struct e1000_rx_desc); diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9b3f0a996b0..0adcb79e638 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2059,10 +2059,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) int err = -ENOMEM, size; size = sizeof(struct e1000_buffer) * tx_ring->count; - tx_ring->buffer_info = vmalloc(size); + tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; - memset(tx_ring->buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); @@ -2095,10 +2094,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter) int i, size, desc_len, err = -ENOMEM; size = sizeof(struct e1000_buffer) * rx_ring->count; - rx_ring->buffer_info = vmalloc(size); + rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; - memset(rx_ring->buffer_info, 0, size); for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 182b2a7be8d..a84c389d3db 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1496,12 +1496,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) { int arr_size = sizeof(void *) * max_q_entries; - q_skba->arr = vmalloc(arr_size); + q_skba->arr = vzalloc(arr_size); if (!q_skba->arr) return -ENOMEM; - memset(q_skba->arr, 0, arr_size); - q_skba->len = max_q_entries; q_skba->index = 0; q_skba->os_skbs = 0; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 892d196f17a..67ea262e482 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -2436,10 +2436,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) int size; size = sizeof(struct igb_buffer) * tx_ring->count; - tx_ring->buffer_info = vmalloc(size); + tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; - memset(tx_ring->buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); @@ -2587,10 +2586,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) int size, desc_len; size = sizeof(struct igb_buffer) * rx_ring->count; - rx_ring->buffer_info = vmalloc(size); + rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; - memset(rx_ring->buffer_info, 0, size); desc_len = sizeof(union e1000_adv_rx_desc); diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 4c998b7726d..8dbde2397c1 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -430,10 +430,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, int size; size = sizeof(struct igbvf_buffer) * tx_ring->count; - tx_ring->buffer_info = vmalloc(size); + tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) goto err; - memset(tx_ring->buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); @@ -470,10 +469,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, int size, desc_len; size = sizeof(struct igbvf_buffer) * rx_ring->count; - rx_ring->buffer_info = vmalloc(size); + rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) goto err; - memset(rx_ring->buffer_info, 0, size); desc_len = sizeof(union e1000_adv_rx_desc); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index caa8192fff2..211a1694667 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -669,13 +669,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter) int size; size = sizeof(struct ixgb_buffer) * txdr->count; - txdr->buffer_info = vmalloc(size); + txdr->buffer_info = vzalloc(size); if (!txdr->buffer_info) { netif_err(adapter, probe, adapter->netdev, "Unable to allocate transmit descriptor ring memory\n"); return -ENOMEM; } - memset(txdr->buffer_info, 0, size); /* round up to nearest 4K */ @@ -759,13 +758,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter) int size; size = sizeof(struct ixgb_buffer) * rxdr->count; - rxdr->buffer_info = vmalloc(size); + rxdr->buffer_info = vzalloc(size); if (!rxdr->buffer_info) { netif_err(adapter, probe, adapter->netdev, "Unable to allocate receive descriptor ring\n"); return -ENOMEM; } - memset(rxdr->buffer_info, 0, size); /* Round up to nearest 4K */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 02541956744..494cb57b700 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5181,12 +5181,11 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); + tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vmalloc(size); + tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; - memset(tx_ring->tx_buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); @@ -5246,12 +5245,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node); + rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vmalloc(size); + rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto err; - memset(rx_ring->rx_buffer_info, 0, size); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 5b8063cb4e6..2216a3c8b12 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -2489,10 +2489,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, int size; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vmalloc(size); + tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; - memset(tx_ring->tx_buffer_info, 0, size); /* round up to nearest 4K */ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); @@ -2556,14 +2555,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, int size; size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vmalloc(size); + rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) { hw_dbg(&adapter->hw, "Unable to vmalloc buffer memory for " "the receive descriptor ring\n"); goto alloc_failed; } - memset(rx_ring->rx_buffer_info, 0, size); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 95fe552aa27..f946de23fe7 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); - cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", netdev->name); goto err_out; } - memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = &adapter->recv_ctx; @@ -280,7 +279,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) } rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) - vmalloc(RCV_BUFF_RINGSIZE(rds_ring)); + vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) { printk(KERN_ERR "%s: Failed to allocate " "rx buffer ring %d\n", @@ -288,7 +287,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) /* free whatever was already allocated */ goto err_out; } - memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring)); INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 472056b4744..afb75066b14 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, int desNo; size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; - tx_ring->buffer_info = vmalloc(size); + tx_ring->buffer_info = vzalloc(size); if (!tx_ring->buffer_info) { pr_err("Unable to allocate memory for the buffer infomation\n"); return -ENOMEM; } - memset(tx_ring->buffer_info, 0, size); tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); @@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter, int desNo; size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; - rx_ring->buffer_info = vmalloc(size); + rx_ring->buffer_info = vzalloc(size); if (!rx_ring->buffer_info) { pr_err("Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } - memset(rx_ring->buffer_info, 0, size); rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index ccbc91326bf..7556a9224f7 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -673,8 +673,7 @@ static int __init pptp_init_module(void) int err = 0; pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); - callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *), - GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *)); if (!callid_sock) { pr_err("PPTP: cann't allocate memory\n"); return -ENOMEM; diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 0d180c6e41f..3f970187cfc 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -236,12 +236,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) tx_ring->num_desc = adapter->num_txd; tx_ring->txq = netdev_get_tx_queue(netdev, 0); - cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); + cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring)); if (cmd_buf_arr == NULL) { dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); goto err_out; } - memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); tx_ring->cmd_buf_arr = cmd_buf_arr; recv_ctx = &adapter->recv_ctx; @@ -276,13 +275,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter) break; } rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) - vmalloc(RCV_BUFF_RINGSIZE(rds_ring)); + vzalloc(RCV_BUFF_RINGSIZE(rds_ring)); if (rds_ring->rx_buf_arr == NULL) { dev_err(&netdev->dev, "Failed to allocate " "rx buffer ring %d\n", ring); goto err_out; } - memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring)); INIT_LIST_HEAD(&rds_ring->free_list); /* * Now go through all of them, set reference handles diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c index 52cb6082b91..44500b54fd5 100644 --- a/drivers/net/sfc/filter.c +++ b/drivers/net/sfc/filter.c @@ -428,10 +428,9 @@ int efx_probe_filters(struct efx_nic *efx) GFP_KERNEL); if (!table->used_bitmap) goto fail; - table->spec = vmalloc(table->size * sizeof(*table->spec)); + table->spec = vzalloc(table->size * sizeof(*table->spec)); if (!table->spec) goto fail; - memset(table->spec, 0, table->size * sizeof(*table->spec)); } return 0; diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 409c2e6053d..44d3ddd37b3 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -1220,13 +1220,12 @@ vxge_hw_device_initialize( goto exit; hldev = (struct __vxge_hw_device *) - vmalloc(sizeof(struct __vxge_hw_device)); + vzalloc(sizeof(struct __vxge_hw_device)); if (hldev == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(hldev, 0, sizeof(struct __vxge_hw_device)); hldev->magic = VXGE_HW_DEVICE_MAGIC; vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); @@ -2064,15 +2063,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate, * allocate new memblock and its private part at once. * This helps to minimize memory usage a lot. */ mempool->memblocks_priv_arr[i] = - vmalloc(mempool->items_priv_size * n_items); + vzalloc(mempool->items_priv_size * n_items); if (mempool->memblocks_priv_arr[i] == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(mempool->memblocks_priv_arr[i], 0, - mempool->items_priv_size * n_items); - /* allocate DMA-capable memblock */ mempool->memblocks_arr[i] = __vxge_hw_blockpool_malloc(mempool->devh, @@ -2145,12 +2141,11 @@ __vxge_hw_mempool_create( } mempool = (struct vxge_hw_mempool *) - vmalloc(sizeof(struct vxge_hw_mempool)); + vzalloc(sizeof(struct vxge_hw_mempool)); if (mempool == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto exit; } - memset(mempool, 0, sizeof(struct vxge_hw_mempool)); mempool->devh = devh; mempool->memblock_size = memblock_size; @@ -2170,31 +2165,27 @@ __vxge_hw_mempool_create( /* allocate array of memblocks */ mempool->memblocks_arr = - (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + (void **) vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->memblocks_arr, 0, - sizeof(void *) * mempool->memblocks_max); /* allocate array of private parts of items per memblocks */ mempool->memblocks_priv_arr = - (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); + (void **) vzalloc(sizeof(void *) * mempool->memblocks_max); if (mempool->memblocks_priv_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->memblocks_priv_arr, 0, - sizeof(void *) * mempool->memblocks_max); /* allocate array of memblocks DMA objects */ mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) - vmalloc(sizeof(struct vxge_hw_mempool_dma) * + vzalloc(sizeof(struct vxge_hw_mempool_dma) * mempool->memblocks_max); if (mempool->memblocks_dma_arr == NULL) { @@ -2203,20 +2194,16 @@ __vxge_hw_mempool_create( mempool = NULL; goto exit; } - memset(mempool->memblocks_dma_arr, 0, - sizeof(struct vxge_hw_mempool_dma) * - mempool->memblocks_max); /* allocate hash array of items */ mempool->items_arr = - (void **) vmalloc(sizeof(void *) * mempool->items_max); + (void **) vzalloc(sizeof(void *) * mempool->items_max); if (mempool->items_arr == NULL) { __vxge_hw_mempool_destroy(mempool); status = VXGE_HW_ERR_OUT_OF_MEMORY; mempool = NULL; goto exit; } - memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max); /* calculate initial number of memblocks */ memblocks_to_allocate = (mempool->items_initial + @@ -4272,14 +4259,12 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev, goto vpath_open_exit1; vp = (struct __vxge_hw_vpath_handle *) - vmalloc(sizeof(struct __vxge_hw_vpath_handle)); + vzalloc(sizeof(struct __vxge_hw_vpath_handle)); if (vp == NULL) { status = VXGE_HW_ERR_OUT_OF_MEMORY; goto vpath_open_exit2; } - memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle)); - vp->vpath = vpath; if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { -- cgit v1.2.3-70-g09d2 From c6ecf39a10ceec3e97096e2a8d3eadcecd593422 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 03:31:51 +0000 Subject: ixgbe: fix link behavior for SFP+ when driver is brought down We have had several requests to have ifconfig down command disable the SFP+ laser and thus make link go down. Likewise on ifconfig up the laser would be enabled and link would come up. This patch enables that behavior. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_82599.c | 12 +++++++++--- drivers/net/ixgbe/ixgbe_main.c | 26 ++++++++++++++++++-------- 2 files changed, 27 insertions(+), 11 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index e34643eef16..8fa76785b45 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -65,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; - if (hw->phy.multispeed_fiber) { - /* Set up dual speed SFP+ support */ - mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + + /* enable the laser control functions for SFP+ fiber */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { mac->ops.disable_tx_laser = &ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = @@ -77,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + } else { if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 494cb57b700..5861ece7020 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3793,8 +3793,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) else ixgbe_configure_msi_and_legacy(adapter); - /* enable the optics */ - if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser) + /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->phy.type == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.enable_tx_laser(hw); clear_bit(__IXGBE_DOWN, &adapter->state); @@ -4106,15 +4109,19 @@ void ixgbe_down(struct ixgbe_adapter *adapter) break; } - /* power down the optics */ - if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) - hw->mac.ops.disable_tx_laser(hw); - /* clear n-tuple filters that are cached */ ethtool_ntuple_flush(netdev); if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); + + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->phy.type == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) + hw->mac.ops.disable_tx_laser(hw); + ixgbe_clean_all_tx_rings(adapter); ixgbe_clean_all_rx_rings(adapter); @@ -7197,8 +7204,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_eeprom; } - /* power down the optics */ - if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser) + /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser && + ((hw->phy.multispeed_fiber) || + ((hw->phy.type == ixgbe_media_type_fiber) && + (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.disable_tx_laser(hw); init_timer(&adapter->watchdog_timer); -- cgit v1.2.3-70-g09d2 From 0b077feac00a8b7b0afbab3274b2e74b749bc917 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 03:32:13 +0000 Subject: ixgbe: add WOL support for SFP+ subdevice This patch will add wake on LAN support to the dev/sub_dev 10FB 11A9. This will also include ixgbe ethtool support for this device. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_ethtool.c | 11 +++++++++++ drivers/net/ixgbe/ixgbe_main.c | 15 ++++++++++----- drivers/net/ixgbe/ixgbe_type.h | 1 + 3 files changed, 22 insertions(+), 5 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index f9b58394fbb..ef3f9105a05 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1879,7 +1879,16 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int retval = 1; + /* WOL not supported except for the following */ switch(hw->device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) { + wol->supported = 0; + break; + } + retval = 0; + break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (hw->subsystem_device_id == @@ -1887,6 +1896,8 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, wol->supported = 0; break; } + retval = 0; + break; case IXGBE_DEV_ID_82599_KX4: retval = 0; break; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5861ece7020..5f4c93d40a4 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -7223,13 +7223,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; switch (pdev->device) { + case IXGBE_DEV_ID_82599_SFP: + /* Only this subdevice supports WOL */ + if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP) + adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | + IXGBE_WUFC_MC | IXGBE_WUFC_BC); + break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ - if (pdev->subsystem_device == - IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) { - adapter->wol = 0; - break; - } + if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) + adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | + IXGBE_WUFC_MC | IXGBE_WUFC_BC); + break; case IXGBE_DEV_ID_82599_KX4: adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | IXGBE_WUFC_MC | IXGBE_WUFC_BC); diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 42c607339a6..9557ae64f95 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -54,6 +54,7 @@ #define IXGBE_DEV_ID_82599_T3_LOM 0x151C #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 -- cgit v1.2.3-70-g09d2 From dbffcb210f45239ea530e0a71470e48abefe4210 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 03:32:34 +0000 Subject: ixgbe: add support for 82599 FCoE SKU Add both NIC and backplane support for FCoE enabled devices IDs. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_82599.c | 2 ++ drivers/net/ixgbe/ixgbe_main.c | 4 ++++ drivers/net/ixgbe/ixgbe_type.h | 2 ++ 3 files changed, 8 insertions(+) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 8fa76785b45..385ccebb826 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -319,11 +319,13 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: media_type = ixgbe_media_type_fiber; break; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5f4c93d40a4..c5c93408212 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -109,6 +109,10 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), + board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), + board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 9557ae64f95..35b60db5e77 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -54,6 +54,8 @@ #define IXGBE_DEV_ID_82599_T3_LOM 0x151C #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -- cgit v1.2.3-70-g09d2 From 289700dbc40c78741f17e2304ed4ac0db3c3afd3 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 03:32:58 +0000 Subject: ixgbe: add support for new format of PBA numbers The new PBA format is stored as a string. This patch allows the driver to support both the old and new format. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_common.c | 94 +++++++++++++++++++++++++++++++++++++--- drivers/net/ixgbe/ixgbe_common.h | 3 +- drivers/net/ixgbe/ixgbe_main.c | 18 ++++---- drivers/net/ixgbe/ixgbe_type.h | 6 +++ 4 files changed, 105 insertions(+), 16 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 56052570cac..cc11e422ce9 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -196,30 +196,110 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } /** - * ixgbe_read_pba_num_generic - Reads part number from EEPROM + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM * @hw: pointer to hardware structure - * @pba_num: stores the part number from the EEPROM + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length * - * Reads the part number from the EEPROM. + * Reads the part number string from the EEPROM. **/ -s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) { s32 ret_val; u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + hw_dbg(hw, "PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } - *pba_num = (u32)(data << 16); - ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val) { hw_dbg(hw, "NVM Read Error\n"); return ret_val; } - *pba_num |= data; + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + hw_dbg(hw, "NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + hw_dbg(hw, "NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + hw_dbg(hw, "PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + hw_dbg(hw, "NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 341ca514a28..e1f980a8a09 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -35,7 +35,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index c5c93408212..f97353cdb60 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6952,11 +6952,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; static int cards_found; int i, err, pci_using_dac; + u8 part_str[IXGBE_PBANUM_LENGTH]; unsigned int indices = num_possible_cpus(); #ifdef IXGBE_FCOE u16 device_caps; #endif - u32 part_num, eec; + u32 eec; /* Catch broken hardware that put the wrong VF device ID in * the PCIe SR-IOV capability. @@ -7262,16 +7263,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : "Unknown"), netdev->dev_addr); - ixgbe_read_pba_num_generic(hw, &part_num); + + err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); + if (err) + strcpy(part_str, "Unknown"); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) - e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " - "PBA No: %06x-%03x\n", + e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, - (part_num >> 8), (part_num & 0xff)); + part_str); else - e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", - hw->mac.type, hw->phy.type, - (part_num >> 8), (part_num & 0xff)); + e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, part_str); if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { e_dev_warn("PCI-Express bandwidth available for this card is " diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 35b60db5e77..ef816dd5a8f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1508,7 +1508,11 @@ #define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 #define IXGBE_EEPROM_OPCODE_BITS 8 +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + /* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA #define IXGBE_EEPROM_CHECKSUM 0x3F #define IXGBE_EEPROM_SUM 0xBABA #define IXGBE_PCIE_ANALOG_PTR 0x03 @@ -2637,6 +2641,8 @@ struct ixgbe_info { #define IXGBE_ERR_NO_SPACE -25 #define IXGBE_ERR_OVERTEMP -26 #define IXGBE_ERR_RAR_INDEX -27 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #endif /* _IXGBE_TYPE_H_ */ -- cgit v1.2.3-70-g09d2 From a7f5a5fcd9f13afd3471a0de8c1fdaa8f989497c Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 13:23:30 +0000 Subject: ixgbe: fix for link failure on SFP+ DA cables This patch helps prevent FW/SW semaphore collision from leading to link establishment failure. The collision might mess up the PHY registers so we reset the PHY. However there are SFI/KR areas in the PHY that are not reset with a Reset_AN so we need to change LMS to reset it. Also wait until AN state machine is AN_GOOD Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_82599.c | 28 +++++++++++++++++++++++++--- drivers/net/ixgbe/ixgbe_type.h | 3 +++ 2 files changed, 28 insertions(+), 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 385ccebb826..6827dddc383 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -96,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; + u32 reg_anlp1 = 0; + u32 i = 0; u16 list_offset, data_offset, data_value; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { @@ -122,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); hw->eeprom.ops.read(hw, ++data_offset, &data_value); } - /* Now restart DSP by setting Restart_AN */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, - (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART)); /* Release the semaphore */ ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Delay obtaining semaphore again to allow FW access */ msleep(hw->eeprom.semaphore_delay); + + /* Now restart DSP by setting Restart_AN and clearing LMS */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, + IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | + IXGBE_AUTOC_AN_RESTART)); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + msleep(4); + reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } + + /* Restart DSP by setting Restart_AN and return to SFI mode */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | + IXGBE_AUTOC_AN_RESTART)); } setup_sfp_out: diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index ef816dd5a8f..0f80893edab 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1470,6 +1470,8 @@ #define IXGBE_ANLP1_PAUSE 0x0C00 #define IXGBE_ANLP1_SYM_PAUSE 0x0400 #define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + /* SW Semaphore Register bitmasks */ #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ @@ -2641,6 +2643,7 @@ struct ixgbe_info { #define IXGBE_ERR_NO_SPACE -25 #define IXGBE_ERR_OVERTEMP -26 #define IXGBE_ERR_RAR_INDEX -27 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 #define IXGBE_ERR_PBA_SECTION -31 #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF -- cgit v1.2.3-70-g09d2 From 9f91170773d852e65e4fc36e1f8173ce614f62e1 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 13:24:05 +0000 Subject: ixgbe: fix enum type mismatch on disable laser Fixes a recent bug on the patch (c6ecf39a10ceec3e97096e2a8d3eadcecd593422) that disabled the laser on ifconfig down. Compilers were seeing a enum mismatch. Signed-off-by Don Skidmore Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ixgbe/ixgbe_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f97353cdb60..a12e86fccb0 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3800,7 +3800,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.enable_tx_laser && ((hw->phy.multispeed_fiber) || - ((hw->phy.type == ixgbe_media_type_fiber) && + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.enable_tx_laser(hw); @@ -4122,7 +4122,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser && ((hw->phy.multispeed_fiber) || - ((hw->phy.type == ixgbe_media_type_fiber) && + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.disable_tx_laser(hw); @@ -7212,7 +7212,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ if (hw->mac.ops.disable_tx_laser && ((hw->phy.multispeed_fiber) || - ((hw->phy.type == ixgbe_media_type_fiber) && + ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && (hw->mac.type == ixgbe_mac_82599EB)))) hw->mac.ops.disable_tx_laser(hw); -- cgit v1.2.3-70-g09d2 From 667445008db3f45a760c235d771be0c9671e59e5 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 1 Dec 2010 19:59:50 +0000 Subject: Intel Wired LAN drivers: Use static const Based on work by Joe Perches Using static const to decrease data and overall object size. CC: Joe Perches Signed-off-by: Jeff Kirsher Tested-by: Emil Tantilov --- drivers/net/e1000/e1000_hw.c | 20 ++++++++++---------- drivers/net/e1000/e1000_param.c | 13 +++++++------ drivers/net/e1000e/phy.c | 11 ++++++----- drivers/net/igb/e1000_phy.c | 11 ++++++----- drivers/net/ixgb/ixgb_param.c | 21 +++++++++++---------- drivers/net/ixgbe/ixgbe_ethtool.c | 24 ++++++++++++++---------- drivers/net/ixgbevf/ethtool.c | 18 +++++++++++------- 7 files changed, 65 insertions(+), 53 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index c7e242b69a1..77d08e697b7 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -4892,11 +4892,11 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ u16 cur_agc_value; u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; - u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = - { IGP01E1000_PHY_AGC_A, - IGP01E1000_PHY_AGC_B, - IGP01E1000_PHY_AGC_C, - IGP01E1000_PHY_AGC_D + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { @@ -5071,11 +5071,11 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) { s32 ret_val; u16 phy_data, phy_saved_data, speed, duplex, i; - u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = - { IGP01E1000_PHY_AGC_PARAM_A, - IGP01E1000_PHY_AGC_PARAM_B, - IGP01E1000_PHY_AGC_PARAM_C, - IGP01E1000_PHY_AGC_PARAM_D + static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D }; u16 min_length, max_length; diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 10d8d98bb79..1301eba8b57 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c @@ -352,12 +352,13 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter) } { /* Flow Control */ - struct e1000_opt_list fc_list[] = - {{ E1000_FC_NONE, "Flow Control Disabled" }, - { E1000_FC_RX_PAUSE,"Flow Control Receive Only" }, - { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" }, - { E1000_FC_FULL, "Flow Control Enabled" }, - { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; opt = (struct e1000_option) { .type = list_option, diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 3d3dc0c8235..6ad90ccb4ba 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -1840,11 +1840,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; - u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = - {IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D}; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c index ddd036a7899..6694bf3e5ad 100644 --- a/drivers/net/igb/e1000_phy.c +++ b/drivers/net/igb/e1000_phy.c @@ -1757,11 +1757,12 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw) u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; - u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = - {IGP02E1000_PHY_AGC_A, - IGP02E1000_PHY_AGC_B, - IGP02E1000_PHY_AGC_C, - IGP02E1000_PHY_AGC_D}; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c index 88a08f05624..dd7fbeb1f7d 100644 --- a/drivers/net/ixgb/ixgb_param.c +++ b/drivers/net/ixgb/ixgb_param.c @@ -191,9 +191,9 @@ struct ixgb_option { } r; struct { /* list_option info */ int nr; - struct ixgb_opt_list { + const struct ixgb_opt_list { int i; - char *str; + const char *str; } *p; } l; } arg; @@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) break; case list_option: { int i; - struct ixgb_opt_list *ent; + const struct ixgb_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; @@ -322,14 +322,15 @@ ixgb_check_options(struct ixgb_adapter *adapter) } { /* Flow Control */ - struct ixgb_opt_list fc_list[] = - {{ ixgb_fc_none, "Flow Control Disabled" }, - { ixgb_fc_rx_pause,"Flow Control Receive Only" }, - { ixgb_fc_tx_pause,"Flow Control Transmit Only" }, - { ixgb_fc_full, "Flow Control Enabled" }, - { ixgb_fc_default, "Flow Control Hardware Default" }}; + static const struct ixgb_opt_list fc_list[] = { + { ixgb_fc_none, "Flow Control Disabled" }, + { ixgb_fc_rx_pause, "Flow Control Receive Only" }, + { ixgb_fc_tx_pause, "Flow Control Transmit Only" }, + { ixgb_fc_full, "Flow Control Enabled" }, + { ixgb_fc_default, "Flow Control Hardware Default" } + }; - const struct ixgb_option opt = { + static const struct ixgb_option opt = { .type = list_option, .name = "Flow Control", .err = "reading default settings from EEPROM", diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index ef3f9105a05..90a740d77e5 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1157,7 +1157,7 @@ struct ixgbe_reg_test { #define TABLE64_TEST_HI 6 /* default 82599 register test */ -static struct ixgbe_reg_test reg_test_82599[] = { +static const struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1181,7 +1181,7 @@ static struct ixgbe_reg_test reg_test_82599[] = { }; /* default 82598 register test */ -static struct ixgbe_reg_test reg_test_82598[] = { +static const struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, @@ -1208,18 +1208,22 @@ static struct ixgbe_reg_test reg_test_82598[] = { { 0, 0, 0, 0 } }; +static const u32 register_test_patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF +}; + #define REG_PATTERN_TEST(R, M, W) \ { \ u32 pat, val, before; \ - const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ - for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ before = readl(adapter->hw.hw_addr + R); \ - writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ + writel((register_test_patterns[pat] & W), \ + (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ - if (val != (_test[pat] & W & M)) { \ - e_err(drv, "pattern test reg %04X failed: got " \ - "0x%08X expected 0x%08X\n", \ - R, val, (_test[pat] & W & M)); \ + if (val != (register_test_patterns[pat] & W & M)) { \ + e_err(drv, "pattern test reg %04X failed: got " \ + "0x%08X expected 0x%08X\n", \ + R, val, (register_test_patterns[pat] & W & M)); \ *data = R; \ writel(before, adapter->hw.hw_addr + R); \ return 1; \ @@ -1246,7 +1250,7 @@ static struct ixgbe_reg_test reg_test_82598[] = { static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) { - struct ixgbe_reg_test *test; + const struct ixgbe_reg_test *test; u32 value, before, after; u32 i, toggle; diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c index 4cc817acfb6..fa29b3c8c46 100644 --- a/drivers/net/ixgbevf/ethtool.c +++ b/drivers/net/ixgbevf/ethtool.c @@ -544,7 +544,7 @@ struct ixgbevf_reg_test { #define TABLE64_TEST_HI 6 /* default VF register test */ -static struct ixgbevf_reg_test reg_test_vf[] = { +static const struct ixgbevf_reg_test reg_test_vf[] = { { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, @@ -557,19 +557,23 @@ static struct ixgbevf_reg_test reg_test_vf[] = { { 0, 0, 0, 0 } }; +static const u32 register_test_patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF +}; + #define REG_PATTERN_TEST(R, M, W) \ { \ u32 pat, val, before; \ - const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ - for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ + for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ before = readl(adapter->hw.hw_addr + R); \ - writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ + writel((register_test_patterns[pat] & W), \ + (adapter->hw.hw_addr + R)); \ val = readl(adapter->hw.hw_addr + R); \ - if (val != (_test[pat] & W & M)) { \ + if (val != (register_test_patterns[pat] & W & M)) { \ hw_dbg(&adapter->hw, \ "pattern test reg %04X failed: got " \ "0x%08X expected 0x%08X\n", \ - R, val, (_test[pat] & W & M)); \ + R, val, (register_test_patterns[pat] & W & M)); \ *data = R; \ writel(before, adapter->hw.hw_addr + R); \ return 1; \ @@ -596,7 +600,7 @@ static struct ixgbevf_reg_test reg_test_vf[] = { static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) { - struct ixgbevf_reg_test *test; + const struct ixgbevf_reg_test *test; u32 i; test = reg_test_vf; -- cgit v1.2.3-70-g09d2 From 2b264909c660717a67da997a181a4a4f551ef9b6 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Thu, 9 Dec 2010 06:55:14 +0000 Subject: ixgbe: fix X540 phy id to correct value The existing PHY ID for X540 was from early production hardware and is no longer correct. This patch corrects that. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_phy.c | 2 +- drivers/net/ixgbe/ixgbe_type.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index c445fbce56e..8f7123e8fc0 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -115,7 +115,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; - case AQ1202_PHY_ID: + case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; case QT2022_PHY_ID: diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 0f80893edab..59f6d0afe0f 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -999,7 +999,7 @@ /* PHY IDs*/ #define TN1010_PHY_ID 0x00A19410 #define TNX_FW_REV 0xB -#define AQ1202_PHY_ID 0x03A1B440 +#define X540_PHY_ID 0x01540200 #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 -- cgit v1.2.3-70-g09d2 From d994653db465616a7bf27703e733170c47488cdf Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Thu, 9 Dec 2010 06:55:19 +0000 Subject: ixgbe: fix X540 to use it's own info struct This patch enables X540 hardware to use it's own set of support functions. This is useful as it has no need of SFP+ support. A couple minor bugs with the eeprom semaphore were also cleaned up. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 16 ++++++++-------- drivers/net/ixgbe/ixgbe_x540.c | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fdb35d040d2..f2694f2b127 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -118,7 +118,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), - board_82599 }, + board_X540 }, /* required last entry */ {0, } @@ -1897,6 +1897,13 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: + ixgbe_check_sfp_event(adapter, eicr); + if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && + ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { + adapter->interrupt_event = eicr; + schedule_work(&adapter->check_overtemp_task); + } + /* now fallthrough to handle Flow Director */ case ixgbe_mac_X540: /* Handle Flow Director Full threshold interrupt */ if (eicr & IXGBE_EICR_FLOW_DIR) { @@ -1912,12 +1919,6 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) schedule_work(&adapter->fdir_reinit_task); } } - ixgbe_check_sfp_event(adapter, eicr); - if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && - ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { - adapter->interrupt_event = eicr; - schedule_work(&adapter->check_overtemp_task); - } break; default: break; @@ -2508,7 +2509,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: - case ixgbe_mac_X540: ixgbe_check_sfp_event(adapter, eicr); if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) { diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index 9649fa727e3..cf88515c0ef 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -278,7 +278,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status; - if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) + if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) status = ixgbe_read_eerd_generic(hw, offset, data); else status = IXGBE_ERR_SWFW_SYNC; @@ -311,7 +311,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; - if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM)) { + if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status != 0) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); -- cgit v1.2.3-70-g09d2 From 5136cad37b276e3e11c4f8ad0bcf9cb2eec0e5af Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 1 Dec 2010 05:47:05 +0000 Subject: ixgbe: fix ntuple support commit f62bbb5e62c6e4a91fb222d22bc46e8d4d7e59ef ixgbe: Update ixgbe to use new vlan accleration. removed ETH_FLAG_NTUPLE from the supported flags. This patch puts it back on to allow for setting ntuple via ethtool. CC: Jesse Gross Signed-off-by: Emil Tantilov Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 90a740d77e5..f2245ac7503 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -2202,7 +2202,7 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) need_reset = (data & ETH_FLAG_RXVLAN) != (netdev->features & NETIF_F_HW_VLAN_RX); - rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); if (rc) return rc; -- cgit v1.2.3-70-g09d2 From 9fe93afdd07aba52a018eb52784124579a80470e Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Fri, 3 Dec 2010 09:33:54 +0000 Subject: ixgbe: cleanup string function calls to use bound checking versions. Some minor cleanup to use string calls that use bound checks just to be extra safe. Signed-off-by: Don Skidmore Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_ethtool.c | 5 +++-- drivers/net/ixgbe/ixgbe_main.c | 16 ++++++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index f2245ac7503..23ff23e8b39 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -839,9 +839,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); char firmware_version[32]; - strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); + strncpy(drvinfo->driver, ixgbe_driver_name, + sizeof(drvinfo->driver) - 1); strncpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version)); + sizeof(drvinfo->version) - 1); snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", (adapter->eeprom_version & 0xF000) >> 12, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f2694f2b127..8af0fc05169 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2338,14 +2338,14 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) handler = SET_HANDLER(q_vector); if (handler == &ixgbe_msix_clean_rx) { - sprintf(q_vector->name, "%s-%s-%d", - netdev->name, "rx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); } else if (handler == &ixgbe_msix_clean_tx) { - sprintf(q_vector->name, "%s-%s-%d", - netdev->name, "tx", ti++); + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); } else if (handler == &ixgbe_msix_clean_many) { - sprintf(q_vector->name, "%s-%s-%d", - netdev->name, "TxRx", ri++); + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); ti++; } else { /* skip this unused q_vector */ @@ -7047,7 +7047,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - strcpy(netdev->name, pci_name(pdev)); + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); adapter->bd_number = cards_found; @@ -7269,7 +7269,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); if (err) - strcpy(part_str, "Unknown"); + strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, -- cgit v1.2.3-70-g09d2 From 760141a53e5d72d4cc1d8c6e2a0232a24bedb36b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sun, 12 Dec 2010 16:45:14 +0100 Subject: igb[v],ixgbe: don't use flush_scheduled_work() All three drivers use flush_scheduled_work() similarly during driver detach. Replace it with explicit cancels. Signed-off-by: Tejun Heo Cc: "David S. Miller" Cc: e1000-devel@lists.sourceforge.net Cc: netdev@vger.kernel.org --- drivers/net/igb/igb_main.c | 9 ++++++--- drivers/net/igbvf/netdev.c | 7 ++++--- drivers/net/ixgbe/ixgbe_main.c | 11 +++++++---- 3 files changed, 17 insertions(+), 10 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 041f8e6f74f..62348fc60e5 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -2050,13 +2050,16 @@ static void __devexit igb_remove(struct pci_dev *pdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - /* flush_scheduled work may reschedule our watchdog task, so - * explicitly disable watchdog tasks from being rescheduled */ + /* + * The watchdog timer may be rescheduled, so explicitly + * disable watchdog from being rescheduled. + */ set_bit(__IGB_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); - flush_scheduled_work(); + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); #ifdef CONFIG_IGB_DCA if (adapter->flags & IGB_FLAG_DCA_ENABLED) { diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 8dbde2397c1..4fb023bce78 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -2825,13 +2825,14 @@ static void __devexit igbvf_remove(struct pci_dev *pdev) struct e1000_hw *hw = &adapter->hw; /* - * flush_scheduled work may reschedule our watchdog task, so - * explicitly disable watchdog tasks from being rescheduled + * The watchdog timer may be rescheduled, so explicitly + * disable it from being rescheduled. */ set_bit(__IGBVF_DOWN, &adapter->state); del_timer_sync(&adapter->watchdog_timer); - flush_scheduled_work(); + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); unregister_netdev(netdev); diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 8af0fc05169..ca9036de49f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -7373,13 +7373,15 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) struct net_device *netdev = adapter->netdev; set_bit(__IXGBE_DOWN, &adapter->state); - /* clear the module not found bit to make sure the worker won't - * reschedule + + /* + * The timers may be rescheduled, so explicitly disable them + * from being rescheduled. */ clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); del_timer_sync(&adapter->watchdog_timer); - del_timer_sync(&adapter->sfp_timer); + cancel_work_sync(&adapter->watchdog_task); cancel_work_sync(&adapter->sfp_task); cancel_work_sync(&adapter->multispeed_fiber_task); @@ -7387,7 +7389,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) cancel_work_sync(&adapter->fdir_reinit_task); - flush_scheduled_work(); + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + cancel_work_sync(&adapter->check_overtemp_task); #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { -- cgit v1.2.3-70-g09d2 From d3306c2974481ff9c539de22a37bb667e8694be2 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Thu, 18 Nov 2010 03:03:23 +0000 Subject: ixgbe: Warn on VF attempt to override Administratively set MAC/VLAN Print warnings to the system log when the VF attempts to override MAC/VLAN settings that were configured by the VMM Host administrator using the ip link set commands. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_sriov.c | 40 +++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 6e3e94b5a5f..e01d0db8b51 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -227,6 +227,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) int entries; u16 *hash_list; int add, vid; + u8 *new_mac; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); @@ -244,15 +245,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) if (msgbuf[0] == IXGBE_VF_RESET) { unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - u8 *addr = (u8 *)(&msgbuf[1]); + new_mac = (u8 *)(&msgbuf[1]); e_info(probe, "VF Reset msg received from vf %d\n", vf); adapter->vfinfo[vf].clear_to_send = false; ixgbe_vf_reset_msg(adapter, vf); adapter->vfinfo[vf].clear_to_send = true; + if (is_valid_ether_addr(new_mac) && + !adapter->vfinfo[vf].pf_set_mac) + ixgbe_set_vf_mac(adapter, vf, vf_mac); + else + ixgbe_set_vf_mac(adapter, + vf, adapter->vfinfo[vf].vf_mac_addresses); + /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); + memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); /* * Piggyback the multicast filter type so VF can compute the * correct vectors @@ -271,14 +279,16 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) switch ((msgbuf[0] & 0xFFFF)) { case IXGBE_VF_SET_MAC_ADDR: - { - u8 *new_mac = ((u8 *)(&msgbuf[1])); - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) - ixgbe_set_vf_mac(adapter, vf, new_mac); - else - ixgbe_set_vf_mac(adapter, - vf, adapter->vfinfo[vf].vf_mac_addresses); + new_mac = ((u8 *)(&msgbuf[1])); + if (is_valid_ether_addr(new_mac) && + !adapter->vfinfo[vf].pf_set_mac) { + ixgbe_set_vf_mac(adapter, vf, new_mac); + } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, + new_mac, ETH_ALEN)) { + e_warn(drv, "VF %d attempted to override " + "administratively set MAC address\nReload " + "the VF driver to resume operations\n", vf); + retval = -1; } break; case IXGBE_VF_SET_MULTICAST: @@ -295,7 +305,15 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); + if (adapter->vfinfo[vf].pf_vlan) { + e_warn(drv, "VF %d attempted to override " + "administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + retval = -1; + } else { + retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); + } break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); -- cgit v1.2.3-70-g09d2 From 3377eba79e15671799876f82d30446e656aac5ad Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Tue, 7 Dec 2010 08:16:45 +0000 Subject: ixgbe: Add SR-IOV feature support to X540 Add X540 specific feature support to X540 Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 2 +- drivers/net/ixgbe/ixgbe_mbx.c | 4 +++- drivers/net/ixgbe/ixgbe_x540.c | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index ca9036de49f..c9056253020 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6889,7 +6889,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; int err; - if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) + if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs) return; /* The 82599 supports up to 64 VFs per physical function diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index 027c628c3aa..ea82c5a1cd3 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -321,9 +321,11 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) switch (hw->mac.type) { case ixgbe_mac_82599EB: - case ixgbe_mac_X540: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; default: break; } diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index cf88515c0ef..3a8923993ce 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -685,6 +685,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .fc_enable = &ixgbe_fc_enable_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = NULL, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { -- cgit v1.2.3-70-g09d2 From a985b6c31ff230a1246d921afbfc0f6a1386be83 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Thu, 18 Nov 2010 03:02:52 +0000 Subject: ixgbe: Add anti-spoofing feature support Add support for the anti-spoofing feature in the HW. Packets from VF devices with spoofed MAC addresses or VLAN tags will be blocked and a counter incremented. During the watchdog timer the spoofed packet dropped counter is read and if it is non-zero then a warning message is displayed on the host VMM's console. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_82599.c | 2 ++ drivers/net/ixgbe/ixgbe_common.c | 64 ++++++++++++++++++++++++++++++++++++++++ drivers/net/ixgbe/ixgbe_common.h | 2 ++ drivers/net/ixgbe/ixgbe_main.c | 24 +++++++++++++++ drivers/net/ixgbe/ixgbe_sriov.c | 12 ++++++-- drivers/net/ixgbe/ixgbe_type.h | 13 +++++++- 6 files changed, 114 insertions(+), 3 deletions(-) (limited to 'drivers/net/ixgbe') diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 6827dddc383..bfd3c227cd4 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -2165,6 +2165,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .fc_enable = &ixgbe_fc_enable_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, + .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, + .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index cc11e422ce9..d5ede2df3e4 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -2809,3 +2809,67 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, wwn_prefix_out: return 0; } + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ + int j; + int pf_target_reg = pf >> 3; + int pf_target_shift = pf % 8; + u32 pfvfspoof = 0; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (enable) + pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + + /* + * PFVFSPOOF register array is size 8 with 8 bits assigned to + * MAC anti-spoof enables in each register array element. + */ + for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* If not enabling anti-spoofing then done */ + if (!enable) + return; + + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Reset the bit assigned to the PF + */ + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); + pfvfspoof ^= (1 << pf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index e1f980a8a09..66ed045a8cf 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -88,6 +88,8 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index c9056253020..38ab4f3f819 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3132,6 +3132,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* enable Tx loopback for VF/PF communication */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + /* Enable MAC Anti-Spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), + adapter->num_vfs); } static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) @@ -5960,6 +5963,26 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) netif_tx_start_all_queues(adapter->netdev); } +static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) +{ + u32 ssvpc; + + /* Do not perform spoof check for 82598 */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + return; + + ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); + + /* + * ssvpc register is cleared on read, if zero then no + * spoofed packets in the last interval. + */ + if (!ssvpc) + return; + + e_warn(drv, "%d Spoofed packets detected\n", ssvpc); +} + static DEFINE_MUTEX(ixgbe_watchdog_lock); /** @@ -6080,6 +6103,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) } } + ixgbe_spoof_check(adapter); ixgbe_update_stats(adapter); mutex_unlock(&ixgbe_watchdog_lock); } diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index e01d0db8b51..47b15738b00 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -215,6 +215,11 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) reg |= (reg | (1 << vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); + ixgbe_vf_reset_event(adapter, vf); } @@ -412,6 +417,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) return -EINVAL; @@ -420,7 +426,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) if (err) goto out; ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - ixgbe_set_vmolr(&adapter->hw, vf, false); + ixgbe_set_vmolr(hw, vf, false); + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); adapter->vfinfo[vf].pf_vlan = vlan; adapter->vfinfo[vf].pf_qos = qos; dev_info(&adapter->pdev->dev, @@ -437,7 +444,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); ixgbe_set_vmvir(adapter, vlan, vf); - ixgbe_set_vmolr(&adapter->hw, vf, true); + ixgbe_set_vmolr(hw, vf, true); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); adapter->vfinfo[vf].pf_vlan = 0; adapter->vfinfo[vf].pf_qos = 0; } diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 59f6d0afe0f..446f3467d3c 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -230,6 +230,7 @@ #define IXGBE_VT_CTL 0x051B0 #define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) #define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) #define IXGBE_QDE 0x2F04 #define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ #define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) @@ -284,7 +285,8 @@ #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) #define IXGBE_DTXCTL 0x07E00 -#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ #define IXGBE_PFDTXGSWC 0x08220 #define IXGBE_DTXMXSZRQ 0x08100 #define IXGBE_DTXTCPFLGL 0x04A88 @@ -298,6 +300,13 @@ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 + #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ /* Tx DCA Control register : 128 of these (0-127) */ #define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) @@ -2482,6 +2491,8 @@ struct ixgbe_mac_operations { s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *, s32); -- cgit v1.2.3-70-g09d2