diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-12 18:07:07 -0800 |
commit | 6be35c700f742e911ecedd07fcc43d4439922334 (patch) | |
tree | ca9f37214d204465fcc2d79c82efd291e357c53c /drivers/net/ethernet/intel/ixgbe | |
parent | e37aa63e87bd581f9be5555ed0ba83f5295c92fc (diff) | |
parent | 520dfe3a3645257bf83660f672c47f8558f3d4c4 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller:
1) Allow to dump, monitor, and change the bridge multicast database
using netlink. From Cong Wang.
2) RFC 5961 TCP blind data injection attack mitigation, from Eric
Dumazet.
3) Networking user namespace support from Eric W. Biederman.
4) tuntap/virtio-net multiqueue support by Jason Wang.
5) Support for checksum offload of encapsulated packets (basically,
tunneled traffic can still be checksummed by HW). From Joseph
Gasparakis.
6) Allow BPF filter access to VLAN tags, from Eric Dumazet and
Daniel Borkmann.
7) Bridge port parameters over netlink and BPDU blocking support
from Stephen Hemminger.
8) Improve data access patterns during inet socket demux by rearranging
socket layout, from Eric Dumazet.
9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and
Jon Maloy.
10) Update TCP socket hash sizing to be more in line with current day
realities. The existing heurstics were choosen a decade ago.
From Eric Dumazet.
11) Fix races, queue bloat, and excessive wakeups in ATM and
associated drivers, from Krzysztof Mazur and David Woodhouse.
12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions
in VXLAN driver, from David Stevens.
13) Add "oops_only" mode to netconsole, from Amerigo Wang.
14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also
allow DCB netlink to work on namespaces other than the initial
namespace. From John Fastabend.
15) Support PTP in the Tigon3 driver, from Matt Carlson.
16) tun/vhost zero copy fixes and improvements, plus turn it on
by default, from Michael S. Tsirkin.
17) Support per-association statistics in SCTP, from Michele
Baldessari.
And many, many, driver updates, cleanups, and improvements. Too
numerous to mention individually.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits)
net/mlx4_en: Add support for destination MAC in steering rules
net/mlx4_en: Use generic etherdevice.h functions.
net: ethtool: Add destination MAC address to flow steering API
bridge: add support of adding and deleting mdb entries
bridge: notify mdb changes via netlink
ndisc: Unexport ndisc_{build,send}_skb().
uapi: add missing netconf.h to export list
pkt_sched: avoid requeues if possible
solos-pci: fix double-free of TX skb in DMA mode
bnx2: Fix accidental reversions.
bna: Driver Version Updated to 3.1.2.1
bna: Firmware update
bna: Add RX State
bna: Rx Page Based Allocation
bna: TX Intr Coalescing Fix
bna: Tx and Rx Optimizations
bna: Code Cleanup and Enhancements
ath9k: check pdata variable before dereferencing it
ath5k: RX timestamp is reported at end of frame
ath9k_htc: RX timestamp is reported at end of frame
...
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/Makefile | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 15 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 160 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 100 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 115 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 109 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 418 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 164 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 470 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 2 |
15 files changed, 1019 insertions, 609 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 89f40e51fc1..f3a632bf8d9 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o -ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 30efc9f0f47..8e786764c60 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -36,11 +36,9 @@ #include <linux/aer.h> #include <linux/if_vlan.h> -#ifdef CONFIG_IXGBE_PTP #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#endif /* CONFIG_IXGBE_PTP */ #include "ixgbe_type.h" #include "ixgbe_common.h" @@ -135,6 +133,7 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; + unsigned int vf_api; }; struct vf_macvlans { @@ -482,8 +481,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) +#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12) /* Tx fast path data */ int num_tx_queues; @@ -571,7 +571,6 @@ struct ixgbe_adapter { u32 interrupt_event; u32 led_reg; -#ifdef CONFIG_IXGBE_PTP struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; unsigned long last_overflow_check; @@ -580,8 +579,6 @@ struct ixgbe_adapter { struct timecounter tc; int rx_hwtstamp_filter; u32 base_incval; - u32 cycle_speed; -#endif /* CONFIG_IXGBE_PTP */ /* SR-IOV */ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); @@ -600,6 +597,8 @@ struct ixgbe_adapter { #ifdef CONFIG_DEBUG_FS struct dentry *ixgbe_dbg_adapter; #endif /*CONFIG_DEBUG_FS*/ + + u8 default_up; }; struct ixgbe_fdir_filter { @@ -691,6 +690,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); +extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); extern void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); @@ -739,7 +739,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) return netdev_get_tx_queue(ring->netdev, ring->queue_index); } -#ifdef CONFIG_IXGBE_PTP extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); @@ -751,7 +750,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd); extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); +extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); -#endif /* CONFIG_IXGBE_PTP */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 1077cb2b38d..1073aea5da4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { @@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; - u32 reg_anlp1 = 0; - u32 i = 0; u16 list_offset, data_offset, data_value; + bool got_lock = false; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); - /* Now restart DSP by setting Restart_AN and clearing LMS */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, - IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | - IXGBE_AUTOC_AN_RESTART)); - - /* Wait for AN to leave state 0 */ - for (i = 0; i < 10; i++) { - usleep_range(4000, 8000); - reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) - break; + /* Need SW/FW semaphore around AUTOC writes if LESM on, + * likewise reset_pipeline requires lock as it also writes + * AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto setup_sfp_out; + + got_lock = true; + } + + /* Restart DSP and set SFI mode */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL)); + + ret_val = ixgbe_reset_pipeline_82599(hw); + + if (got_lock) { + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + got_lock = false; } - if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { - hw_dbg(hw, "sfp module setup not complete\n"); + + if (ret_val) { + hw_dbg(hw, " sfp module setup not complete\n"); ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; goto setup_sfp_out; } - - /* Restart DSP by setting Restart_AN and return to SFI mode */ - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, - IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | - IXGBE_AUTOC_AN_RESTART)); } setup_sfp_out: @@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, u32 links_reg; u32 i; s32 status = 0; + bool got_lock = false; + + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + goto out; + + got_lock = true; + } /* Restart link */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == @@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, /* Add delay to filter out noises during initial link setup */ msleep(50); +out: return status; } @@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, u32 links_reg; u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + bool got_lock = false; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, @@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, } if (autoc != start_autoc) { + /* Need SW/FW semaphore around AUTOC writes if LESM is on, + * likewise reset_pipeline requires us to hold this lock as + * it also writes to AUTOC. + */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status != 0) + goto out; + + got_lock = true; + } + /* Restart link */ - autoc |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { @@ -994,9 +1031,28 @@ mac_reset_top: hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { - if (autoc != hw->mac.orig_autoc) - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | - IXGBE_AUTOC_AN_RESTART)); + if (autoc != hw->mac.orig_autoc) { + /* Need SW/FW semaphore around AUTOC writes if LESM is + * on, likewise reset_pipeline requires us to hold + * this lock as it also writes to AUTOC. + */ + bool got_lock = false; + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (status) + goto reset_hw_out; + + got_lock = true; + } + + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { @@ -1022,7 +1078,7 @@ mac_reset_top: hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + if (is_valid_ether_addr(hw->mac.san_addr)) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); @@ -1983,7 +2039,7 @@ fw_version_out: * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ -static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; @@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, return ret_val; } +/** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * + * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure + * full pipeline reset. Note - We must hold the SW/FW semaphore before writing + * to AUTOC, so this function assumes the semaphore is held. + **/ +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +{ + s32 i, autoc_reg, ret_val; + s32 anlp1_reg = 0; + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + usleep_range(4000, 8000); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { + hw_dbg(hw, "auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + + ret_val = 0; + +reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + + return ret_val; +} + static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index dbf37e4a45f..5e68afdd502 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -65,13 +65,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); * function check the device id to see if the associated phy supports * autoneg flow control. **/ -static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: - return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; default: @@ -90,6 +89,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) s32 ret_val = 0; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; + bool got_lock = false; /* * Validate the requested mode. Strict IEEE mode does not allow @@ -210,8 +210,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { - reg_bp |= IXGBE_AUTOC_AN_RESTART; + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on, likewise reset_pipeline requries the lock as + * it also writes AUTOC. + */ + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && (ixgbe_device_supports_autoneg_fc(hw) == 0)) { hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, @@ -1762,30 +1783,6 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) } /** - * ixgbe_validate_mac_addr - Validate MAC address - * @mac_addr: pointer to MAC address. - * - * Tests a MAC address to ensure it is a valid Individual Address - **/ -s32 ixgbe_validate_mac_addr(u8 *mac_addr) -{ - s32 status = 0; - - /* Make sure it is not a multicast address */ - if (IXGBE_IS_MULTICAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Not a broadcast address */ - else if (IXGBE_IS_BROADCAST(mac_addr)) - status = IXGBE_ERR_INVALID_MAC_ADDR; - /* Reject the zero address */ - else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) - status = IXGBE_ERR_INVALID_MAC_ADDR; - - return status; -} - -/** * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write @@ -1889,8 +1886,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ - if (ixgbe_validate_mac_addr(hw->mac.addr) == - IXGBE_ERR_INVALID_MAC_ADDR) { + if (!is_valid_ether_addr(hw->mac.addr)) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); @@ -2617,6 +2613,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = 0; /* * Link must be up to auto-blink the LEDs; @@ -2625,10 +2622,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on. + */ + bool got_lock = false; + + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); usleep_range(10000, 20000); } @@ -2637,7 +2652,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** @@ -2649,18 +2665,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + s32 ret_val = 0; + bool got_lock = false; + + /* Need the SW/FW semaphore around AUTOC writes if 82599 and + * LESM is on. + */ + if ((hw->mac.type == ixgbe_mac_82599EB) && + ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val) + goto out; + + got_lock = true; + } autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + if (hw->mac.type == ixgbe_mac_82599EB) + ixgbe_reset_pipeline_82599(hw); + + if (got_lock) + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - return 0; +out: + return ret_val; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index d813d1188c3..f7a0970a251 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -78,9 +78,9 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); @@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index 8d3a2188909..50aa546b8c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -37,20 +37,6 @@ static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; /** - * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened - * @inode: inode that was opened - * @filp: file info - * - * Stash the adapter pointer hiding in the inode into the file pointer where - * we can find it later in the read and write calls - **/ -static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - -/** * ixgbe_dbg_reg_ops_read - read for reg_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read @@ -61,23 +47,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char buf[256]; - int bytes_not_copied; + char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - len = snprintf(buf, sizeof(buf), "%s: %s\n", - adapter->netdev->name, ixgbe_dbg_reg_ops_buf); - if (count < len) + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); return -ENOSPC; - bytes_not_copied = copy_to_user(buffer, buf, len); - if (bytes_not_copied < 0) - return bytes_not_copied; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - *ppos = len; + kfree(buf); return len; } @@ -93,7 +83,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int bytes_not_copied; + int len; /* don't allow partial writes */ if (*ppos != 0) @@ -101,14 +91,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) return -ENOSPC; - bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied < count) - count -= bytes_not_copied; - else - return -ENOSPC; - ixgbe_dbg_reg_ops_buf[count] = '\0'; + len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf, + sizeof(ixgbe_dbg_reg_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_reg_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { u32 reg, value; @@ -142,7 +133,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, static const struct file_operations ixgbe_dbg_reg_ops_fops = { .owner = THIS_MODULE, - .open = ixgbe_dbg_reg_ops_open, + .open = simple_open, .read = ixgbe_dbg_reg_ops_read, .write = ixgbe_dbg_reg_ops_write, }; @@ -150,20 +141,6 @@ static const struct file_operations ixgbe_dbg_reg_ops_fops = { static char ixgbe_dbg_netdev_ops_buf[256] = ""; /** - * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item - * @inode: inode that was opened - * @filp: file info - * - * Stash the adapter pointer hiding in the inode into the file pointer - * where we can find it later in the read and write calls - **/ -static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp) -{ - filp->private_data = inode->i_private; - return 0; -} - -/** * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read @@ -175,23 +152,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - char buf[256]; - int bytes_not_copied; + char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; - len = snprintf(buf, sizeof(buf), "%s: %s\n", - adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); - if (count < len) + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + adapter->netdev->name, + ixgbe_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); return -ENOSPC; - bytes_not_copied = copy_to_user(buffer, buf, len); - if (bytes_not_copied < 0) - return bytes_not_copied; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - *ppos = len; + kfree(buf); return len; } @@ -207,7 +188,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, size_t count, loff_t *ppos) { struct ixgbe_adapter *adapter = filp->private_data; - int bytes_not_copied; + int len; /* don't allow partial writes */ if (*ppos != 0) @@ -215,15 +196,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) return -ENOSPC; - bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, - buffer, count); - if (bytes_not_copied < 0) - return bytes_not_copied; - else if (bytes_not_copied < count) - count -= bytes_not_copied; - else - return -ENOSPC; - ixgbe_dbg_netdev_ops_buf[count] = '\0'; + len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf, + sizeof(ixgbe_dbg_netdev_ops_buf)-1, + ppos, + buffer, + count); + if (len < 0) + return len; + + ixgbe_dbg_netdev_ops_buf[len] = '\0'; if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); @@ -238,7 +219,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, static const struct file_operations ixgbe_dbg_netdev_ops_fops = { .owner = THIS_MODULE, - .open = ixgbe_dbg_netdev_ops_open, + .open = simple_open, .read = ixgbe_dbg_netdev_ops_read, .write = ixgbe_dbg_netdev_ops_write, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 116f0e901be..32685842434 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -383,6 +383,11 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return -EINVAL; + /* some devices do not support autoneg of link flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + (ixgbe_device_supports_autoneg_fc(hw) != 0)) + return -EINVAL; + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) @@ -887,24 +892,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; + struct ixgbe_ring *temp_ring; int i, err = 0; u32 new_rx_count, new_tx_count; - bool need_update = false; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); - new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD); - new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD); + new_tx_count = clamp_t(u32, ring->tx_pending, + IXGBE_MIN_TXD, IXGBE_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - if ((new_tx_count == adapter->tx_ring[0]->count) && - (new_rx_count == adapter->rx_ring[0]->count)) { + new_rx_count = clamp_t(u32, ring->rx_pending, + IXGBE_MIN_RXD, IXGBE_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) { /* nothing to do */ return 0; } @@ -922,81 +926,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev, goto clear_reset; } - temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); - if (!temp_tx_ring) { + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); + + if (!temp_ring) { err = -ENOMEM; goto clear_reset; } + ixgbe_down(adapter); + + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ if (new_tx_count != adapter->tx_ring_count) { for (i = 0; i < adapter->num_tx_queues; i++) { - memcpy(&temp_tx_ring[i], adapter->tx_ring[i], + memcpy(&temp_ring[i], adapter->tx_ring[i], sizeof(struct ixgbe_ring)); - temp_tx_ring[i].count = new_tx_count; - err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); + + temp_ring[i].count = new_tx_count; + err = ixgbe_setup_tx_resources(&temp_ring[i]); if (err) { while (i) { i--; - ixgbe_free_tx_resources(&temp_tx_ring[i]); + ixgbe_free_tx_resources(&temp_ring[i]); } - goto clear_reset; + goto err_setup; } } - need_update = true; - } - temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); - if (!temp_rx_ring) { - err = -ENOMEM; - goto err_setup; + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbe_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); + } + + adapter->tx_ring_count = new_tx_count; } + /* Repeat the process for the Rx rings if needed */ if (new_rx_count != adapter->rx_ring_count) { for (i = 0; i < adapter->num_rx_queues; i++) { - memcpy(&temp_rx_ring[i], adapter->rx_ring[i], + memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); - temp_rx_ring[i].count = new_rx_count; - err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); + + temp_ring[i].count = new_rx_count; + err = ixgbe_setup_rx_resources(&temp_ring[i]); if (err) { while (i) { i--; - ixgbe_free_rx_resources(&temp_rx_ring[i]); + ixgbe_free_rx_resources(&temp_ring[i]); } goto err_setup; } + } - need_update = true; - } - /* if rings need to be updated, here's the place to do it in one shot */ - if (need_update) { - ixgbe_down(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); - /* tx */ - if (new_tx_count != adapter->tx_ring_count) { - for (i = 0; i < adapter->num_tx_queues; i++) { - ixgbe_free_tx_resources(adapter->tx_ring[i]); - memcpy(adapter->tx_ring[i], &temp_tx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->tx_ring_count = new_tx_count; + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct ixgbe_ring)); } - /* rx */ - if (new_rx_count != adapter->rx_ring_count) { - for (i = 0; i < adapter->num_rx_queues; i++) { - ixgbe_free_rx_resources(adapter->rx_ring[i]); - memcpy(adapter->rx_ring[i], &temp_rx_ring[i], - sizeof(struct ixgbe_ring)); - } - adapter->rx_ring_count = new_rx_count; - } - ixgbe_up(adapter); + adapter->rx_ring_count = new_rx_count; } - vfree(temp_rx_ring); err_setup: - vfree(temp_tx_ring); + ixgbe_up(adapter); + vfree(temp_ring); clear_reset: clear_bit(__IXGBE_RESETTING, &adapter->state); return err; @@ -2669,7 +2672,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); switch (adapter->hw.mac.type) { -#ifdef CONFIG_IXGBE_PTP case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -2695,7 +2697,6 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; -#endif /* CONFIG_IXGBE_PTP */ default: return ethtool_op_get_ts_info(dev, info); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ae73ef14fdf..252850d9a3e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev) return -EINVAL; e_info(drv, "Enabling FCoE offload features.\n"); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n"); + if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 17ecbcedd54..8c74f739011 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* setup affinity mask and node */ if (cpu != -1) cpumask_set_cpu(cpu, &q_vector->affinity_mask); - else - cpumask_copy(&q_vector->affinity_mask, cpu_online_mask); q_vector->numa_node = node; +#ifdef CONFIG_IXGBE_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +#endif /* initialize NAPI */ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll, 64); @@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize pointer to rings */ ring = q_vector->ring; + /* intialize ITR */ + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fa3d552e1f4..20a5af6d87d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -44,6 +44,7 @@ #include <linux/ethtool.h> #include <linux/if.h> #include <linux/if_vlan.h> +#include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> @@ -62,11 +63,7 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define MAJ 3 -#define MIN 9 -#define BUILD 15 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ - __stringify(BUILD) "-k" +#define DRV_VERSION "3.11.33-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2012 Intel Corporation."; @@ -335,11 +332,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) goto exit; dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); - pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + pr_info(" %s %s %s %s\n", + "Queue [NTU] [NTC] [bi(ntc)->dma ]", + "leng", "ntw", "timestamp"); for (n = 0; n < adapter->num_tx_queues; n++) { tx_ring = adapter->tx_ring[n]; tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; - pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", + pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), @@ -355,13 +354,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) /* Transmit Descriptor Formats * - * Advanced Transmit Descriptor + * 82598 Advanced Transmit Descriptor * +--------------------------------------------------------------+ * 0 | Buffer Address [63:0] | * +--------------------------------------------------------------+ - * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | + * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | * +--------------------------------------------------------------+ * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 + * + * 82598 Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | NXTSEQ | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 + * + * 82599+ Advanced Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +--------------------------------------------------------------+ + * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | + * +--------------------------------------------------------------+ + * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 + * + * 82599+ Advanced Transmit Descriptor (Write-Back Format) + * +--------------------------------------------------------------+ + * 0 | RSV [63:0] | + * +--------------------------------------------------------------+ + * 8 | RSV | STA | RSV | + * +--------------------------------------------------------------+ + * 63 36 35 32 31 0 */ for (n = 0; n < adapter->num_tx_queues; n++) { @@ -369,40 +392,43 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("------------------------------------\n"); pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("T [desc] [address 63:0 ] " - "[PlPOIdStDDt Ln] [bi->dma ] " - "leng ntw timestamp bi->skb\n"); + pr_info("%s%s %s %s %s %s\n", + "T [desc] [address 63:0 ] ", + "[PlPOIdStDDt Ln] [bi->dma ] ", + "leng", "ntw", "timestamp", "bi->skb"); for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { tx_desc = IXGBE_TX_DESC(tx_ring, i); tx_buffer = &tx_ring->tx_buffer_info[i]; u0 = (struct my_u0 *)tx_desc; - pr_info("T [0x%03X] %016llX %016llX %016llX" - " %04X %p %016llX %p", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), - (u64)dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - tx_buffer->next_to_watch, - (u64)tx_buffer->time_stamp, - tx_buffer->skb); - if (i == tx_ring->next_to_use && - i == tx_ring->next_to_clean) - pr_cont(" NTC/U\n"); - else if (i == tx_ring->next_to_use) - pr_cont(" NTU\n"); - else if (i == tx_ring->next_to_clean) - pr_cont(" NTC\n"); - else - pr_cont("\n"); - - if (netif_msg_pktdata(adapter) && - tx_buffer->skb) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - tx_buffer->skb->data, + if (dma_unmap_len(tx_buffer, len) > 0) { + pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", + i, + le64_to_cpu(u0->a), + le64_to_cpu(u0->b), + (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), - true); + tx_buffer->next_to_watch, + (u64)tx_buffer->time_stamp, + tx_buffer->skb); + if (i == tx_ring->next_to_use && + i == tx_ring->next_to_clean) + pr_cont(" NTC/U\n"); + else if (i == tx_ring->next_to_use) + pr_cont(" NTU\n"); + else if (i == tx_ring->next_to_clean) + pr_cont(" NTC\n"); + else + pr_cont("\n"); + + if (netif_msg_pktdata(adapter) && + tx_buffer->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, 1, + tx_buffer->skb->data, + dma_unmap_len(tx_buffer, len), + true); + } } } @@ -422,7 +448,9 @@ rx_ring_summary: dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); - /* Advanced Receive Descriptor (Read) Format + /* Receive Descriptor Formats + * + * 82598 Advanced Receive Descriptor (Read) Format * 63 1 0 * +-----------------------------------------------------+ * 0 | Packet Buffer Address [63:1] |A0/NSE| @@ -431,27 +459,52 @@ rx_ring_summary: * +-----------------------------------------------------+ * * - * Advanced Receive Descriptor (Write-Back) Format + * 82598 Advanced Receive Descriptor (Write-Back) Format * * 63 48 47 32 31 30 21 20 16 15 4 3 0 * +------------------------------------------------------+ - * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | - * | Checksum Ident | | | | Type | Type | + * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | + * | Packet | IP | | | | Type | Type | + * | Checksum | Ident | | | | | | * +------------------------------------------------------+ * 8 | VLAN Tag | Length | Extended Error | Extended Status | * +------------------------------------------------------+ * 63 48 47 32 31 20 19 0 + * + * 82599+ Advanced Receive Descriptor (Read) Format + * 63 1 0 + * +-----------------------------------------------------+ + * 0 | Packet Buffer Address [63:1] |A0/NSE| + * +----------------------------------------------+------+ + * 8 | Header Buffer Address [63:1] | DD | + * +-----------------------------------------------------+ + * + * + * 82599+ Advanced Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 30 21 20 17 16 4 3 0 + * +------------------------------------------------------+ + * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | + * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | + * |/ Flow Dir Flt ID | | | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 */ + for (n = 0; n < adapter->num_rx_queues; n++) { rx_ring = adapter->rx_ring[n]; pr_info("------------------------------------\n"); pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); pr_info("------------------------------------\n"); - pr_info("R [desc] [ PktBuf A0] " - "[ HeadBuf DD] [bi->dma ] [bi->skb] " + pr_info("%s%s%s", + "R [desc] [ PktBuf A0] ", + "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", "<-- Adv Rx Read format\n"); - pr_info("RWB[desc] [PcsmIpSHl PtRs] " - "[vl er S cks ln] ---------------- [bi->skb] " + pr_info("%s%s%s", + "RWB[desc] [PcsmIpSHl PtRs] ", + "[vl er S cks ln] ---------------- [bi->skb ] ", "<-- Adv Rx Write-Back format\n"); for (i = 0; i < rx_ring->count; i++) { @@ -646,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *hwstats = &adapter->stats; u32 xoff[8] = {0}; + u8 tc; int i; bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; @@ -659,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) /* update stats for each tc, only valid with PFC enabled */ for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { + u32 pxoffrxc; + switch (hw->mac.type) { case ixgbe_mac_82598EB: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); break; default: - xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } - hwstats->pxoffrxc[i] += xoff[i]; + hwstats->pxoffrxc[i] += pxoffrxc; + /* Get the TC for given UP */ + tc = netdev_get_prio_tc_map(adapter->netdev, i); + xoff[tc] += pxoffrxc; } /* disarm tx queues that have received xoff frames */ for (i = 0; i < adapter->num_tx_queues; i++) { struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - u8 tc = tx_ring->dcb_tc; + tc = tx_ring->dcb_tc; if (xoff[tc]) clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); } @@ -791,10 +850,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; -#ifdef CONFIG_IXGBE_PTP if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); -#endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); @@ -967,7 +1024,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, * which will cause the DCA tag to be cleared. */ rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | - IXGBE_DCA_RXCTRL_DATA_DCA_EN | IXGBE_DCA_RXCTRL_DESC_DCA_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); @@ -1244,6 +1300,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, struct vlan_hdr *vlan; /* l3 headers */ struct iphdr *ipv4; + struct ipv6hdr *ipv6; } hdr; __be16 protocol; u8 nexthdr = 0; /* default to not TCP */ @@ -1281,20 +1338,30 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, if (hlen < sizeof(struct iphdr)) return hdr.network - data; + /* record next protocol if header is present */ + if (!hdr.ipv4->frag_off) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == __constant_htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + /* record next protocol */ - nexthdr = hdr.ipv4->protocol; - hdr.network += hlen; + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); #ifdef IXGBE_FCOE } else if (protocol == __constant_htons(ETH_P_FCOE)) { if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) return max_len; - hdr.network += FCOE_HEADER_LEN; + hlen = FCOE_HEADER_LEN; #endif } else { return hdr.network - data; } - /* finally sort out TCP */ + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ if (nexthdr == IPPROTO_TCP) { if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) return max_len; @@ -1307,6 +1374,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return hdr.network - data; hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); } /* @@ -1369,9 +1441,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); -#ifdef CONFIG_IXGBE_PTP ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); -#endif if ((dev->features & NETIF_F_HW_VLAN_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@ -1781,7 +1851,7 @@ dma_sync: **/ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *rx_ring, - int budget) + const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE @@ -1832,7 +1902,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; /* populate checksum, timestamp, VLAN, and protocol */ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); @@ -1865,8 +1934,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ixgbe_rx_skb(q_vector, skb); /* update budget accounting */ - budget--; - } while (likely(budget)); + total_rx_packets++; + } while (likely(total_rx_packets < budget)); u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1878,7 +1947,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (cleaned_count) ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return !!budget; + return (total_rx_packets < budget); } /** @@ -1914,20 +1983,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ixgbe_for_each_ring(ring, q_vector->tx) ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); - if (q_vector->tx.ring && !q_vector->rx.ring) { - /* tx only vector */ - if (adapter->tx_itr_setting == 1) - q_vector->itr = IXGBE_10K_ITR; - else - q_vector->itr = adapter->tx_itr_setting; - } else { - /* rx or rx/tx vector */ - if (adapter->rx_itr_setting == 1) - q_vector->itr = IXGBE_20K_ITR; - else - q_vector->itr = adapter->rx_itr_setting; - } - ixgbe_write_eitr(q_vector); } @@ -2324,10 +2379,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } -#ifdef CONFIG_IXGBE_PTP if (adapter->hw.mac.type == ixgbe_mac_X540) mask |= IXGBE_EIMS_TIMESYNC; -#endif if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) @@ -2393,10 +2446,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); -#ifdef CONFIG_IXGBE_PTP if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter, eicr); -#endif /* re-enable the original interrupt state, no lsc, no queues */ if (!test_bit(__IXGBE_DOWN, &adapter->state)) @@ -2588,10 +2639,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); -#ifdef CONFIG_IXGBE_PTP if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) ixgbe_ptp_check_pps_event(adapter, eicr); -#endif /* would disable interrupts here but EIAM disabled it */ napi_schedule(&q_vector->napi); @@ -2699,12 +2748,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; - /* rx/tx vector */ - if (adapter->rx_itr_setting == 1) - q_vector->itr = IXGBE_20K_ITR; - else - q_vector->itr = adapter->rx_itr_setting; - ixgbe_write_eitr(q_vector); ixgbe_set_ivar(adapter, 0, 0, 0); @@ -3132,14 +3175,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); - /* If operating in IOV mode set RLPML for X540 */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - hw->mac.type == ixgbe_mac_X540) { - rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; - rxdctl |= ((ring->netdev->mtu + ETH_HLEN + - ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); - } - if (hw->mac.type == ixgbe_mac_82598EB) { /* * enable cache line friendly hardware writes: @@ -3211,7 +3246,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); @@ -3234,8 +3270,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - /* enable Tx loopback for VF/PF communication */ - IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), @@ -3263,6 +3297,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif /* IXGBE_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { mhadd &= ~IXGBE_MHADD_MFS_MASK; @@ -3271,9 +3310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } - /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ - max_frame += VLAN_HLEN; - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ hlreg0 |= IXGBE_HLREG0_JUMBOEN; @@ -4072,11 +4108,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) else ixgbe_configure_msi_and_legacy(adapter); - /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.enable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* enable the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); clear_bit(__IXGBE_DOWN, &adapter->state); @@ -4192,6 +4225,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_reset(adapter); } /** @@ -4393,11 +4429,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) ixgbe_reset(adapter); - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); ixgbe_clean_all_tx_rings(adapter); @@ -4429,11 +4462,12 @@ static void ixgbe_tx_timeout(struct net_device *netdev) * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ -static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) +static int ixgbe_sw_init(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; unsigned int rss; + u32 fwsm; #ifdef CONFIG_IXGBE_DCB int j; struct tc_configuration *tc; @@ -4457,7 +4491,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->max_q_vectors = MAX_Q_VECTORS_82598; break; case ixgbe_mac_X540: - adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if (fwsm & IXGBE_FWSM_TS_ENABLED) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; case ixgbe_mac_82599EB: adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; @@ -4533,7 +4569,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ixgbe_pbthresh_setup(adapter); hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; hw->fc.send_xon = true; - hw->fc.disable_fc_autoneg = false; + hw->fc.disable_fc_autoneg = + (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true; #ifdef CONFIG_PCI_IOV /* assign number of SR-IOV VFs */ @@ -4828,14 +4865,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; /* - * For 82599EB we cannot allow PF to change MTU greater than 1500 - * in SR-IOV mode as it may cause buffer overruns in guest VFs that - * don't allocate and chain buffers correctly. + * For 82599EB we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a + * warning that legacy VFs will be disabled. */ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (adapter->hw.mac.type == ixgbe_mac_82599EB) && (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) - return -EINVAL; + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); @@ -4901,6 +4938,8 @@ static int ixgbe_open(struct net_device *netdev) if (err) goto err_set_queues; + ixgbe_ptp_init(adapter); + ixgbe_up_complete(adapter); return 0; @@ -4932,6 +4971,8 @@ static int ixgbe_close(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + ixgbe_ptp_stop(adapter); + ixgbe_down(adapter); ixgbe_free_irq(adapter); @@ -5022,14 +5063,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) if (wufc) { ixgbe_set_rx_mode(netdev); - /* - * enable the optics for both mult-speed fiber and - * 82599 SFP+ fiber as we can WoL. - */ - if (hw->mac.ops.enable_tx_laser && - (hw->phy.multispeed_fiber || - (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && - hw->mac.type == ixgbe_mac_82599EB))) + /* enable the optics for 82599 SFP+ fiber as we can WoL */ + if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); /* turn on all-multi mode if wake on multicast is enabled */ @@ -5442,6 +5477,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) adapter->link_speed = link_speed; } +static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .protocol = 0, + }; + u8 up = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) + up = dcb_ieee_getapp_mask(netdev, &app); + + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; +#endif +} + /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message @@ -5482,9 +5534,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) break; } -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_start_cyclecounter(adapter); -#endif + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? @@ -5501,6 +5552,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); + /* update the default user priority for VFs */ + ixgbe_update_default_up(adapter); + /* ping all the active vfs to let them know link has changed */ ixgbe_ping_all_vfs(adapter); } @@ -5526,9 +5580,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_start_cyclecounter(adapter); -#endif + if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) + ixgbe_ptp_start_cyclecounter(adapter); e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); @@ -5833,9 +5886,7 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_watchdog_subtask(adapter); ixgbe_fdir_reinit_subtask(adapter); ixgbe_check_hang_subtask(adapter); -#ifdef CONFIG_IXGBE_PTP ixgbe_ptp_overflow_check(adapter); -#endif ixgbe_service_event_complete(adapter); } @@ -5988,10 +6039,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); -#ifdef CONFIG_IXGBE_PTP if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); -#endif /* set segmentation enable bits for TSO/FSO */ #ifdef IXGBE_FCOE @@ -6393,12 +6442,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, skb_tx_timestamp(skb); -#ifdef CONFIG_IXGBE_PTP if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; } -#endif #ifdef CONFIG_PCI_IOV /* @@ -6485,6 +6532,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, if (skb_pad(skb, 17 - skb->len)) return NETDEV_TX_OK; skb->len = 17; + skb_set_tail_pointer(skb, 17); } tx_ring = adapter->tx_ring[skb->queue_mapping]; @@ -6547,10 +6595,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (cmd) { -#ifdef CONFIG_IXGBE_PTP case SIOCSHWTSTAMP: return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); -#endif default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -6910,13 +6956,16 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return -EOPNOTSUPP; - if (ndm->ndm_state & NUD_PERMANENT) { + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { pr_info("%s: FDB only supports static addresses\n", ixgbe_driver_name); return -EINVAL; } - if (is_unicast_ether_addr(addr)) { + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; if (netdev_uc_count(dev) < rar_uc_entries) @@ -6974,6 +7023,61 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb, return idx; } +static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + u32 reg = 0; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + reg = 0; + adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; + } else if (mode == BRIDGE_MODE_VEB) { + reg = IXGBE_PFDTXGSWC_VT_LBEN; + adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + } else + return -EINVAL; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + + if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -7013,6 +7117,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fdb_add = ixgbe_ndo_fdb_add, .ndo_fdb_del = ixgbe_ndo_fdb_del, .ndo_fdb_dump = ixgbe_ndo_fdb_dump, + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, }; /** @@ -7042,6 +7148,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, break; case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: + case IXGBE_SUBDEV_ID_82599_ECNA_DP: is_wol_supported = 1; break; } @@ -7079,8 +7186,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ -static int __devinit ixgbe_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) +static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct ixgbe_adapter *adapter = NULL; @@ -7340,7 +7446,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); - if (ixgbe_validate_mac_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->perm_addr)) { e_dev_err("invalid MAC address\n"); err = -EIO; goto err_sw_init; @@ -7364,10 +7470,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_init(adapter); -#endif /* CONFIG_IXGBE_PTP*/ - /* save off EEPROM version number */ hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); @@ -7420,11 +7522,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_register; - /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ - if (hw->mac.ops.disable_tx_laser && - ((hw->phy.multispeed_fiber) || - ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) && - (hw->mac.type == ixgbe_mac_82599EB)))) + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); /* carrier off reporting is important to ethtool even BEFORE open */ @@ -7493,7 +7592,7 @@ err_dma: * Hot-Plug event, or because the driver is going to be removed from * memory. **/ -static void __devexit ixgbe_remove(struct pci_dev *pdev) +static void ixgbe_remove(struct pci_dev *pdev) { struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; @@ -7505,9 +7604,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); -#ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_stop(adapter); -#endif #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { @@ -7736,7 +7832,7 @@ static struct pci_driver ixgbe_driver = { .name = ixgbe_driver_name, .id_table = ixgbe_pci_tbl, .probe = ixgbe_probe, - .remove = __devexit_p(ixgbe_remove), + .remove = ixgbe_remove, #ifdef CONFIG_PM .suspend = ixgbe_suspend, .resume = ixgbe_resume, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 310bdd96107..42dd65e6ac9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -62,12 +62,39 @@ /* bits 23:16 are used for exra info for certain messages */ #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +/* definitions to support mailbox API version negotiation */ + +/* + * Each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + +/* GET_QUEUES return data indices within the mailbox */ +#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index d9291316ee9..1a751c9d09c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; + event.type = PTP_CLOCK_PPS; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!adapter->ptp_clock) + return; + switch (hw->mac.type) { case ixgbe_mac_X540: ptp_clock_event(adapter->ptp_clock, &event); @@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; struct timespec ts; - if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && + if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) && (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); adapter->last_overflow_check = jiffies; @@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, adapter = q_vector->adapter; hw = &adapter->hw; + if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) + return; + tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); /* Check if we have a valid timestamp and make sure the skb should * have been timestamped */ - if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || - !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; /* @@ -622,8 +633,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct hwtstamp_config config; u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; - u32 tsync_rx_mtrl = 0; - bool is_l4 = false; + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; bool is_l2 = false; u32 regval; @@ -646,16 +656,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; - is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; - is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: @@ -668,7 +677,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; is_l2 = true; - is_l4 = true; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: @@ -693,42 +701,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, /* Store filter value for later use */ adapter->rx_hwtstamp_filter = config.rx_filter; - /* define ethertype filter for timestamped packets */ + /* define ethertype filter for timestamping L2 packets */ if (is_l2) - IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), (IXGBE_ETQF_FILTER_EN | /* enable filter */ IXGBE_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); - -#define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */ - | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */ - | IXGBE_FTQF_QUEUE_ENABLE); - - ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */ - & IXGBE_FTQF_DEST_PORT_MASK /* dest check */ - & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */ - << IXGBE_FTQF_5TUPLE_MASK_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0); - IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3), - (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 | - IXGBE_IMIR_SIZE_BP_82599)); - - /* enable port check */ - IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3), - (htons(PTP_PORT) | - htons(PTP_PORT) << 16)); - - IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf); - - tsync_rx_mtrl |= PTP_PORT << 16; - } else { - IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0); - } /* enable/disable TX */ regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); @@ -759,58 +740,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw * @adapter: pointer to the adapter structure * - * this function initializes the timecounter and cyclecounter - * structures for use in generated a ns counter from the arbitrary - * fixed point cycles registers in the hardware. - * - * A change in link speed impacts the frequency of the DMA clock on - * the device, which is used to generate the cycle counter - * registers. Therefor this function is called whenever the link speed - * changes. - * - * This function also turns on the SDP pin for clock out feature (X540 - * only), because this is where the shift is first calculated. + * This function should be called to set the proper values for the TIMINCA + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TIMINCA value is necessary, + * such as during initialization or when the link speed changes. */ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 incval = 0; - u32 timinca = 0; u32 shift = 0; - u32 cycle_speed; unsigned long flags; /** - * Determine what speed we need to set the cyclecounter - * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat - * unknown speeds as 10Gb. (Hence why we can't just copy the - * link_speed. - */ - switch (adapter->link_speed) { - case IXGBE_LINK_SPEED_100_FULL: - case IXGBE_LINK_SPEED_1GB_FULL: - case IXGBE_LINK_SPEED_10GB_FULL: - cycle_speed = adapter->link_speed; - break; - default: - /* cycle speed should be 10Gb when there is no link */ - cycle_speed = IXGBE_LINK_SPEED_10GB_FULL; - break; - } - - /* - * grab the current TIMINCA value from the register so that it can be - * double checked. If the register value has been cleared, it must be - * reset to the correct value for generating a cyclecounter. If - * TIMINCA is zero, the SYSTIME registers do not increment at all. - */ - timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA); - - /* Bail if the cycle speed didn't change and TIMINCA is non-zero */ - if (adapter->cycle_speed == cycle_speed && timinca) - return; - - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added * or subtracted. The drawbacks of a large factor include @@ -819,8 +762,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) * to nanoseconds using only a multiplier and a right-shift, * and (c) the value must fit within the timinca register space * => math based on internal DMA clock rate and available bits + * + * Note that when there is no link, internal DMA clock is same as when + * link speed is 10Gb. Set the registers correctly even when link is + * down to preserve the clock setting */ - switch (cycle_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: incval = IXGBE_INCVAL_100; shift = IXGBE_INCVAL_SHIFT_100; @@ -830,6 +777,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) shift = IXGBE_INCVAL_SHIFT_1GB; break; case IXGBE_LINK_SPEED_10GB_FULL: + default: incval = IXGBE_INCVAL_10GB; shift = IXGBE_INCVAL_SHIFT_10GB; break; @@ -857,18 +805,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) return; } - /* reset the system time registers */ - IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); - IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); - IXGBE_WRITE_FLUSH(hw); - - /* store the new cycle speed */ - adapter->cycle_speed = cycle_speed; - + /* update the base incval used to calculate frequency adjustment */ ACCESS_ONCE(adapter->base_incval) = incval; smp_mb(); - /* grab the ptp lock */ + /* need lock to prevent incorrect read while modifying cyclecounter */ spin_lock_irqsave(&adapter->tmreg_lock, flags); memset(&adapter->cc, 0, sizeof(adapter->cc)); @@ -877,6 +818,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) adapter->cc.shift = shift; adapter->cc.mult = 1; + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +} + +/** + * ixgbe_ptp_reset + * @adapter: the ixgbe private board structure + * + * When the MAC resets, all timesync features are reset. This function should be + * called to re-enable the PTP clock structure. It will re-init the timecounter + * structure based on the kernel time as well as setup the cycle counter data. + */ +void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; + + /* set SYSTIME registers to 0 just in case */ + IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); + IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); + IXGBE_WRITE_FLUSH(hw); + + ixgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); + /* reset the ns time counter */ timecounter_init(&adapter->tc, &adapter->cc, ktime_to_ns(ktime_get_real())); @@ -904,7 +870,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X540: - snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -918,7 +884,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) adapter->ptp_caps.enable = ixgbe_ptp_enable; break; case ixgbe_mac_82599EB: - snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 250000000; adapter->ptp_caps.n_alarm = 0; @@ -942,11 +908,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) spin_lock_init(&adapter->tmreg_lock); - ixgbe_ptp_start_cyclecounter(adapter); - - /* (Re)start the overflow check */ - adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -955,6 +916,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) } else e_dev_info("registered PHC device on %s\n", netdev->name); + ixgbe_ptp_reset(adapter); + + /* set the flag that PTP has been enabled */ + adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED; + return; } @@ -967,7 +933,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { /* stop the overflow check task */ - adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | + adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED | IXGBE_FLAG2_PTP_PPS_ENABLED); ixgbe_ptp_setup_sdp(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index dce48bf64d9..85cddac673e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -117,6 +117,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, } } + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + /* If call to enable VFs succeeded then allocate memory * for per VF control structures. */ @@ -150,16 +154,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); -#ifdef IXGBE_FCOE - /* - * When SR-IOV is enabled 82599 cannot support jumbo frames - * so we must disable FCoE because we cannot support FCoE MTU. - */ - if (adapter->hw.mac.type == ixgbe_mac_82599EB) - adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | - IXGBE_FLAG_FCOE_CAPABLE); -#endif - /* enable spoof checking for all VFs */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].spoofchk_enabled = true; @@ -265,8 +259,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) } static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, - int entries, u16 *hash_list, u32 vf) + u32 *msgbuf, u32 vf) { + int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) + >> IXGBE_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; struct ixgbe_hw *hw = &adapter->hw; int i; @@ -353,31 +350,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } -static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) +static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - int new_mtu = msgbuf[1]; + int max_frame = msgbuf[1]; u32 max_frs; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; - /* Only X540 supports jumbo frames in IOV mode */ - if (adapter->hw.mac.type != ixgbe_mac_X540) - return; + /* + * For 82599EB we have to keep all PFs and VFs operating with + * the same max_frame value in order to avoid sending an oversize + * frame to a VF. In order to guarantee this is handled correctly + * for all cases we have several special exceptions to take into + * account before we can enable the VF for receive + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_11: + /* + * Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; + default: + /* + * If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ + if ((pf_max_frame > ETH_FRAME_LEN) || + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) + err = -EINVAL; + break; + } + + /* determine VF receive enable location */ + vf_shift = vf % 32; + reg_offset = vf / 32; + + /* enable or disable receive depending on error */ + vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); + if (err) + vfre &= ~(1 << vf_shift); + else + vfre |= 1 << vf_shift; + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); + + if (err) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return err; + } + } /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { - e_err(drv, "VF mtu %d out of range\n", new_mtu); - return; + if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { + e_err(drv, "VF max_frame %d out of range\n", max_frame); + return -EINVAL; } - max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & - IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; - if (max_frs < new_mtu) { - max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + /* pull current max frame size from hardware */ + max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + max_frs &= IXGBE_MHADD_MFS_MASK; + max_frs >>= IXGBE_MHADD_MFS_SHIFT; + + if (max_frs < max_frame) { + max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); } - e_info(hw, "VF requests change max MTU to %d\n", new_mtu); + e_info(hw, "VF requests change max MTU to %d\n", max_frame); + + return 0; } static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) @@ -392,35 +447,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); } -static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) +static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, + u16 vid, u16 qos, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; - if (vid) - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), - (vid | IXGBE_VMVIR_VLANA_DEFAULT)); - else - IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); } +static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + + IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); +} static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; int rar_entry = hw->mac.num_rar_entries - (vf + 1); + u8 num_tcs = netdev_get_num_tc(adapter->netdev); + + /* add PF assigned VLAN or VLAN 0 */ + ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); /* reset offloads to defaults */ - if (adapter->vfinfo[vf].pf_vlan) { - ixgbe_set_vf_vlan(adapter, true, - adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, - (adapter->vfinfo[vf].pf_vlan | - (adapter->vfinfo[vf].pf_qos << - VLAN_PRIO_SHIFT)), vf); - ixgbe_set_vmolr(hw, vf, false); + ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + ixgbe_clear_vmvir(adapter, vf); } else { - ixgbe_set_vf_vlan(adapter, true, 0, vf); - ixgbe_set_vmvir(adapter, 0, vf); - ixgbe_set_vmolr(hw, vf, true); + if (vfinfo->pf_qos || !num_tcs) + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, + adapter->default_up, vf); + + if (vfinfo->spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); } /* reset multicast table array for vf */ @@ -430,6 +497,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ixgbe_set_rx_mode(adapter->netdev); hw->mac.ops.clear_rar(hw, rar_entry); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; } static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, @@ -521,30 +591,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) return 0; } -static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) +static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 reg; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, msgbuf[4]; u32 reg_offset, vf_shift; + u8 *addr = (u8 *)(&msgbuf[1]); + + e_info(probe, "VF Reset msg received from vf %d\n", vf); + + /* reset the filters for the device */ + ixgbe_vf_reset_event(adapter, vf); + + /* set vf mac address */ + ixgbe_set_vf_mac(adapter, vf, vf_mac); vf_shift = vf % 32; reg_offset = vf / 32; - /* enable transmit and receive for vf */ + /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= (reg | (1 << vf_shift)); + reg |= 1 << vf_shift; IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= (reg | (1 << vf_shift)); + reg |= 1 << vf_shift; + /* + * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. + * For more info take a look at ixgbe_set_vf_lpe + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +#ifdef CONFIG_FCOE + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); + +#endif /* CONFIG_FCOE */ + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); reg |= (1 << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); - ixgbe_vf_reset_event(adapter, vf); + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[3] = hw->mac.mc_filter_type; + ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, + ETH_ALEN)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; +} + +static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + int err; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + + if (add) + adapter->vfinfo[vf].vlan_count++; + else if (adapter->vfinfo[vf].vlan_count) + adapter->vfinfo[vf].vlan_count--; + + err = ixgbe_set_vf_vlan(adapter, add, vid, vf); + if (!err && adapter->vfinfo[vf].spoofchk_enabled) + hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); + + return err; +} + +static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + /* + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (adapter->vfinfo[vf].spoofchk_enabled) + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + } + + err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for it\n", + vf); + + return err < 0; +} + +static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: + break; + } + + e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + + return -1; +} + +static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + u8 num_tcs = netdev_get_num_tc(dev); + + /* verify the PF is supporting the correct APIs */ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + break; + default: + return -1; + } + + /* only allow 1 Tx queue for bandwidth limiting */ + msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + /* if TCs > 1 determine which TC belongs to default user priority */ + if (num_tcs > 1) + default_tc = netdev_get_prio_tc_map(dev, adapter->default_up); + + /* notify VF of need for VLAN tag stripping, and correct queue */ + if (num_tcs) + msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; + else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) + msgbuf[IXGBE_VF_TRANS_VLAN] = 1; + else + msgbuf[IXGBE_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; + + return 0; } static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) @@ -553,10 +814,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; struct ixgbe_hw *hw = &adapter->hw; s32 retval; - int entries; - u16 *hash_list; - int add, vid, index; - u8 *new_mac; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); @@ -572,39 +829,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) /* flush the ack before we write any messages back */ IXGBE_WRITE_FLUSH(hw); + if (msgbuf[0] == IXGBE_VF_RESET) + return ixgbe_vf_reset_msg(adapter, vf); + /* * until the vf completes a virtual function reset it should not be * allowed to start any configuration. */ - - if (msgbuf[0] == IXGBE_VF_RESET) { - unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; - new_mac = (u8 *)(&msgbuf[1]); - e_info(probe, "VF Reset msg received from vf %d\n", vf); - adapter->vfinfo[vf].clear_to_send = false; - ixgbe_vf_reset_msg(adapter, vf); - adapter->vfinfo[vf].clear_to_send = true; - - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) - ixgbe_set_vf_mac(adapter, vf, vf_mac); - else - ixgbe_set_vf_mac(adapter, - vf, adapter->vfinfo[vf].vf_mac_addresses); - - /* reply to reset with ack and vf mac address */ - msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - memcpy(new_mac, vf_mac, ETH_ALEN); - /* - * Piggyback the multicast filter type so VF can compute the - * correct vectors - */ - msgbuf[3] = hw->mac.mc_filter_type; - ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); - - return retval; - } - if (!adapter->vfinfo[vf].clear_to_send) { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; ixgbe_write_mbx(hw, msgbuf, 1, vf); @@ -613,70 +844,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) switch ((msgbuf[0] & 0xFFFF)) { case IXGBE_VF_SET_MAC_ADDR: - new_mac = ((u8 *)(&msgbuf[1])); - if (is_valid_ether_addr(new_mac) && - !adapter->vfinfo[vf].pf_set_mac) { - ixgbe_set_vf_mac(adapter, vf, new_mac); - } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses, - new_mac, ETH_ALEN)) { - e_warn(drv, "VF %d attempted to override " - "administratively set MAC address\nReload " - "the VF driver to resume operations\n", vf); - retval = -1; - } + retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); break; case IXGBE_VF_SET_MULTICAST: - entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - hash_list = (u16 *)&msgbuf[1]; - retval = ixgbe_set_vf_multicasts(adapter, entries, - hash_list, vf); - break; - case IXGBE_VF_SET_LPE: - ixgbe_set_vf_lpe(adapter, msgbuf); + retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); break; case IXGBE_VF_SET_VLAN: - add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) - >> IXGBE_VT_MSGINFO_SHIFT; - vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); - if (adapter->vfinfo[vf].pf_vlan) { - e_warn(drv, "VF %d attempted to override " - "administratively set VLAN configuration\n" - "Reload the VF driver to resume operations\n", - vf); - retval = -1; - } else { - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (!retval && adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - } + retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_LPE: + retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); break; case IXGBE_VF_SET_MACVLAN: - index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> - IXGBE_VT_MSGINFO_SHIFT; - if (adapter->vfinfo[vf].pf_set_mac && index > 0) { - e_warn(drv, "VF %d requested MACVLAN filter but is " - "administratively denied\n", vf); - retval = -1; - break; - } - /* - * If the VF is allowed to set MAC filters then turn off - * anti-spoofing to avoid false positives. An index - * greater than 0 will indicate the VF is setting a - * macvlan MAC filter. - */ - if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); - retval = ixgbe_set_vf_macvlan(adapter, vf, index, - (unsigned char *)(&msgbuf[1])); - if (retval == -ENOSPC) - e_warn(drv, "VF %d has requested a MACVLAN filter " - "but there is no space for it\n", vf); + retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); @@ -692,7 +878,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, msgbuf, 1, vf); + ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); return retval; } @@ -783,7 +969,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); if (err) goto out; - ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); if (adapter->vfinfo[vf].spoofchk_enabled) hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); @@ -803,7 +989,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) } else { err = ixgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); - ixgbe_set_vmvir(adapter, vlan, vf); + ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); if (adapter->vfinfo[vf].vlan_count) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 0722f336809..9cd8a13711d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -56,6 +56,7 @@ #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -1833,15 +1834,6 @@ enum { /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 -/* Check whether address is multicast. This is little-endian specific check.*/ -#define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) - -/* Check whether an address is broadcast. */ -#define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) - /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 #define IXGBE_RAH_VIND_SHIFT 18 @@ -1962,6 +1954,8 @@ enum { #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 #define IXGBE_MRQC_L3L4TXSWEN 0x00008000 +#define IXGBE_FWSM_TS_ENABLED 0x1 + /* Queue Drop Enable */ #define IXGBE_QDE_ENABLE 0x00000001 #define IXGBE_QDE_IDX_MASK 0x00007F00 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index de4da5219b7..c73b9299339 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -152,7 +152,7 @@ mac_reset_top: hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ - if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + if (is_valid_ether_addr(hw->mac.san_addr)) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); |