diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 300 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 592 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 205 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 105 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 |
10 files changed, 820 insertions, 430 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 5fd5d04c26c..89f40e51fc1 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -32,7 +32,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index b9623e9ea89..30efc9f0f47 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -78,6 +78,9 @@ /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ /* @@ -104,6 +107,7 @@ #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) +#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -293,16 +297,25 @@ struct ixgbe_ring_feature { * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. */ -#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { - return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : + IXGBE_RXBUFFER_3K; +#endif + return IXGBE_RXBUFFER_2K; } -#else -#define ixgbe_rx_pg_order(_ring) 0 + +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? 1 : 0; #endif + return 0; +} #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) -#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring)) struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ @@ -397,7 +410,7 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) #define IXGBE_TX_CTXTDESC(R, i) \ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) -#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 +#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ #ifdef IXGBE_FCOE /* Use 3K as the baby jumbo frame size for FCoE */ #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 @@ -584,6 +597,9 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff ixgbe_hwmon_buff; #endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + struct dentry *ixgbe_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ }; struct ixgbe_fdir_filter { @@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info); extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); #endif /* IXGBE_FCOE */ - +#ifdef CONFIG_DEBUG_FS +extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); +extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); +extern void ixgbe_dbg_init(void); +extern void ixgbe_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 18bf08c9d7a..1077cb2b38d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1099,7 +1099,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; - udelay(10); + usleep_range(1000, 2000); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 90e41db3cb6..dbf37e4a45f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -70,6 +70,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: return 0; case IXGBE_DEV_ID_82599_T3_LOM: return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c new file mode 100644 index 00000000000..8d3a2188909 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -0,0 +1,300 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/debugfs.h> +#include <linux/module.h> + +#include "ixgbe.h" + +static struct dentry *ixgbe_dbg_root; + +static char ixgbe_dbg_reg_ops_buf[256] = ""; + +/** + * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened + * @inode: inode that was opened + * @filp: file info + * + * Stash the adapter pointer hiding in the inode into the file pointer where + * we can find it later in the read and write calls + **/ +static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char buf[256]; + int bytes_not_copied; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_reg_ops_buf); + if (count < len) + return -ENOSPC; + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * ixgbe_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int bytes_not_copied; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_reg_ops_buf[count] = '\0'; + + if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + IXGBE_WRITE_REG(&adapter->hw, reg, value); + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write <reg> <value>\n"); + } + } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read <reg>\n"); + } + } else { + e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read <reg>\n"); + e_dev_info(" write <reg> <value>\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = ixgbe_dbg_reg_ops_open, + .read = ixgbe_dbg_reg_ops_read, + .write = ixgbe_dbg_reg_ops_write, +}; + +static char ixgbe_dbg_netdev_ops_buf[256] = ""; + +/** + * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item + * @inode: inode that was opened + * @filp: file info + * + * Stash the adapter pointer hiding in the inode into the file pointer + * where we can find it later in the read and write calls + **/ +static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +/** + * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char buf[256]; + int bytes_not_copied; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); + if (count < len) + return -ENOSPC; + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int bytes_not_copied; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, + buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_netdev_ops_buf[count] = '\0'; + + if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = ixgbe_dbg_netdev_ops_open, + .read = ixgbe_dbg_netdev_ops_read, + .write = ixgbe_dbg_netdev_ops_write, +}; + +/** + * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); + if (adapter->ixgbe_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) +{ + if (adapter->ixgbe_dbg_adapter) + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + adapter->ixgbe_dbg_adapter = NULL; +} + +/** + * ixgbe_dbg_init - start up debugfs for the driver + **/ +void ixgbe_dbg_init(void) +{ + ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); + if (ixgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ixgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ixgbe_dbg_exit(void) +{ + debugfs_remove_recursive(ixgbe_dbg_root); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4104ea25d81..56b20d17d0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2690,10 +2690,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_SOME); + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); break; #endif /* CONFIG_IXGBE_PTP */ default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4326f74f713..fa3d552e1f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -114,6 +114,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, /* required last entry */ {0, } }; @@ -1167,7 +1168,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } bi->dma = dma; - bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); + bi->page_offset = 0; return true; } @@ -1320,29 +1321,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return max_len; } -static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - __le32 rsc_enabled; - u32 rsc_cnt; - - if (!ring_is_rsc_enabled(rx_ring)) - return; - - rsc_enabled = rx_desc->wb.lower.lo_dword.data & - cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); - - /* If this is an RSC frame rsc_cnt should be non-zero */ - if (!rsc_enabled) - return; - - rsc_cnt = le32_to_cpu(rsc_enabled); - rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; - - IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; -} - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@ -1440,16 +1418,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, prefetch(IXGBE_RX_DESC(rx_ring, ntc)); - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - return false; + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); - /* append_cnt indicates packet is RSC, if so fetch nextp */ - if (IXGBE_CB(skb)->append_cnt) { - ntc = le32_to_cpu(rx_desc->wb.upper.status_error); - ntc &= IXGBE_RXDADV_NEXTP_MASK; - ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; + + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } } + /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; @@ -1458,6 +1448,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, } /** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; +} + +/** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -1479,24 +1541,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct net_device *netdev = rx_ring->netdev; - unsigned char *va; - unsigned int pull_len; - - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } else { - dma_sync_single_range_for_cpu(rx_ring->dev, - IXGBE_CB(skb)->dma, - frag->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - IXGBE_CB(skb)->dma = 0; /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, @@ -1506,40 +1551,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return true; } - /* - * it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* - * we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = skb_frag_size(frag); - if (pull_len > IXGBE_RX_HDR_SIZE) - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - /* - * if we sucked the frag empty then we should free it, - * if there are other frags here something is screwed up in hardware - */ - if (skb_frag_size(frag) == 0) { - BUG_ON(skb_shinfo(skb)->nr_frags != 1); - skb_shinfo(skb)->nr_frags = 0; - __skb_frag_unref(frag); - skb->truesize -= ixgbe_rx_bufsz(rx_ring); - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ @@ -1560,33 +1574,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /** - * ixgbe_can_reuse_page - determine if we can reuse a page - * @rx_buffer: pointer to rx_buffer containing the page we want to reuse - * - * Returns true if page can be reused in another Rx buffer - **/ -static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) -{ - struct page *page = rx_buffer->page; - - /* if we are only owner of page and it is local we can reuse it */ - return likely(page_count(page) == 1) && - likely(page_to_nid(page) == numa_node_id()); -} - -/** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * - * Syncronizes page for reuse by the adapter + * Synchronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; - u16 bufsz = ixgbe_rx_bufsz(rx_ring); new_buff = &rx_ring->rx_buffer_info[nta]; @@ -1597,17 +1595,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; - - /* flip page offset to other buffer and store to new_buff */ - new_buff->page_offset = old_buff->page_offset ^ bufsz; + new_buff->page_offset = old_buff->page_offset; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, bufsz, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - - /* bump ref count on page before it is given to the stack */ - get_page(new_buff->page); } /** @@ -1617,20 +1611,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * - * This function is based on skb_add_rx_frag. I would have used that - * function however it doesn't handle the truesize case correctly since we - * are allocating more memory than might be used for a single receive. + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. **/ -static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb, int size) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer->page, rx_buffer->page_offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += ixgbe_rx_bufsz(rx_ring); + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; + + /* + * since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + +static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } /** @@ -1653,16 +1786,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; - int ddp_bytes = 0; + int ddp_bytes; + unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); do { - struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; - struct page *page; - u16 ntc; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -1670,9 +1801,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, cleaned_count = 0; } - ntc = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, ntc); - rx_buffer = &rx_ring->rx_buffer_info[ntc]; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) break; @@ -1684,75 +1813,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ rmb(); - page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif - - /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - break; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - - /* pull page into skb */ - ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, - le16_to_cpu(rx_desc->wb.upper.length)); - - if (ixgbe_can_reuse_page(rx_buffer)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; - rx_buffer->page = NULL; - - ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; cleaned_count++; @@ -1775,6 +1841,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } if (!ddp_bytes) { dev_kfree_skb_any(skb); continue; @@ -1788,21 +1868,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, budget--; } while (likely(budget)); -#ifdef IXGBE_FCOE - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - unsigned int mss; - - mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); - } - -#endif /* IXGBE_FCOE */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -2258,6 +2323,12 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, default: break; } + +#ifdef CONFIG_IXGBE_PTP + if (adapter->hw.mac.type == ixgbe_mac_X540) + mask |= IXGBE_EIMS_TIMESYNC; +#endif + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) mask |= IXGBE_EIMS_FLOW_DIR; @@ -2321,8 +2392,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) } ixgbe_check_fan_failure(adapter, eicr); + #ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_check_pps_event(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); #endif /* re-enable the original interrupt state, no lsc, no queues */ @@ -2516,7 +2589,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data) ixgbe_check_fan_failure(adapter, eicr); #ifdef CONFIG_IXGBE_PTP - ixgbe_ptp_check_pps_event(adapter, eicr); + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) + ixgbe_ptp_check_pps_event(adapter, eicr); #endif /* would disable interrupts here but EIAM disabled it */ @@ -2868,11 +2942,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ -#if PAGE_SIZE > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#endif /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -2980,13 +3050,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65536 */ -#if (PAGE_SIZE <= 8192) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (PAGE_SIZE <= 16384) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#endif IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } @@ -3606,8 +3670,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); - hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); - #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); @@ -3807,6 +3869,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); #endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); @@ -3838,8 +3905,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) break; } - ixgbe_configure_virtualization(adapter); - #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); @@ -4130,27 +4195,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** - * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers - * @rx_ring: ring to setup - * - * On many IA platforms the L1 cache has a critical stride of 4K, this - * results in each receive buffer starting in the same cache set. To help - * reduce the pressure on this cache set we can interleave the offsets so - * that only every other buffer will be in the same cache set. - **/ -static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) -{ - struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; - u16 i; - - for (i = 0; i < rx_ring->count; i += 2) { - rx_buffer[0].page_offset = 0; - rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); - rx_buffer = &rx_buffer[2]; - } -} - -/** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ @@ -4195,8 +4239,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); - ixgbe_init_rx_page_offset(rx_ring); - /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); @@ -4646,8 +4688,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - ixgbe_init_rx_page_offset(rx_ring); - return 0; err: vfree(rx_ring->rx_buffer_info); @@ -5530,8 +5570,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; - /* Do not perform spoof check for 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + /* Do not perform spoof check for 82598 or if not in IOV mode */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) return; ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); @@ -5543,7 +5584,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) if (!ssvpc) return; - e_warn(drv, "%d Spoofed packets detected\n", ssvpc); + e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } /** @@ -5874,9 +5915,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { + if (unlikely(skb->no_fcs)) + first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; + if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; + } } else { u8 l4_hdr = 0; switch (first->protocol) { @@ -5938,7 +5982,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); /* set HW vlan bit if vlan is present */ @@ -5958,6 +6001,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) #endif cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + /* insert frame checksum */ + if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + return cmd_type; } @@ -6063,8 +6110,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (likely(!data_len)) break; - if (unlikely(skb->no_fcs)) - cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); i++; @@ -6854,9 +6899,9 @@ static int ixgbe_set_features(struct net_device *netdev, return 0; } -static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, +static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -6893,7 +6938,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr) + const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err = -EOPNOTSUPP; @@ -7010,6 +7055,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, is_wol_supported = 1; break; case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: /* check eeprom to see if enabled wol */ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && @@ -7136,11 +7182,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_ioremap; } - for (i = 1; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - } - netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; @@ -7419,6 +7460,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_err(probe, "failed to allocate sysfs resources\n"); #endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_init(adapter); +#endif /* CONFIG_DEBUG_FS */ + return 0; err_register: @@ -7453,6 +7498,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_exit(adapter); +#endif /*CONFIG_DEBUG_FS */ + set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); @@ -7527,7 +7576,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, goto skip_bad_vf_detection; bdev = pdev->bus->self; - while (bdev && (bdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) bdev = bdev->bus->self; if (!bdev) @@ -7677,7 +7726,7 @@ static void ixgbe_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); } -static struct pci_error_handlers ixgbe_err_handler = { +static const struct pci_error_handlers ixgbe_err_handler = { .error_detected = ixgbe_io_error_detected, .slot_reset = ixgbe_io_slot_reset, .resume = ixgbe_io_resume, @@ -7708,6 +7757,10 @@ static int __init ixgbe_init_module(void) pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_init(); +#endif /* CONFIG_DEBUG_FS */ + #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif @@ -7730,6 +7783,11 @@ static void __exit ixgbe_exit_module(void) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); + +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_exit(); +#endif /* CONFIG_DEBUG_FS */ + rcu_barrier(); /* Wait for completion of call_rcu()'s */ } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 3456d561714..d9291316ee9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -106,6 +106,83 @@ static struct sock_filter ptp_filter[] = { }; /** + * ixgbe_ptp_setup_sdp + * @hw: the hardware private structure + * + * this function enables or disables the clock out feature on SDP0 for + * the X540 device. It will create a 1second periodic output that can + * be used as the PPS (via an interrupt). + * + * It calculates when the systime will be on an exact second, and then + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int shift = adapter->cc.shift; + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; + u64 ns = 0, clock_edge = 0; + + if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && + (hw->mac.type == ixgbe_mac_X540)) { + + /* disable the pin first */ + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + IXGBE_WRITE_FLUSH(hw); + + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* + * enable the SDP0 pin as output, and connected to the + * native function for Timesync (ClockOut) + */ + esdp |= (IXGBE_ESDP_SDP0_DIR | + IXGBE_ESDP_SDP0_NATIVE); + + /* + * enable the Clock Out feature on SDP0, and allow + * interrupts to occur when the pin changes + */ + tsauxc = (IXGBE_TSAUXC_EN_CLK | + IXGBE_TSAUXC_SYNCLK | + IXGBE_TSAUXC_SDP0_INT); + + /* clock period (or pulse length) */ + clktiml = (u32)(NSECS_PER_SEC << shift); + clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); + + /* + * Account for the cyclecounter wrap-around value by + * using the converted ns value of the current time to + * check for when the next aligned second would occur. + */ + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); + clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + ns = timecounter_cyc2time(&adapter->tc, clock_edge); + + div_u64_rem(ns, NSECS_PER_SEC, &rem); + clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift); + + /* specify the initial clock start time */ + trgttiml = (u32)clock_edge; + trgttimh = (u32)(clock_edge >> 32); + + IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); + IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); + IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); + + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + } else { + IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); + } + + IXGBE_WRITE_FLUSH(hw); +} + +/** * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) * @cc: the cyclecounter structure * @@ -198,6 +275,9 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) now); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + ixgbe_ptp_setup_sdp(adapter); + return 0; } @@ -251,6 +331,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, timecounter_init(&adapter->tc, &adapter->cc, ns); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ixgbe_ptp_setup_sdp(adapter); return 0; } @@ -281,8 +362,9 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, if (on) adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; else - adapter->flags2 &= - ~IXGBE_FLAG2_PTP_PPS_ENABLED; + adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; + + ixgbe_ptp_setup_sdp(adapter); return 0; default: break; @@ -305,109 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; struct ptp_clock_event event; - event.type = PTP_CLOCK_PPS; - - /* Make sure ptp clock is valid, and PPS event enabled */ - if (!adapter->ptp_clock || - !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) - return; - - if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { - switch (hw->mac.type) { - case ixgbe_mac_X540: - ptp_clock_event(adapter->ptp_clock, &event); - break; - default: - break; - } - } -} - -/** - * ixgbe_ptp_enable_sdp - * @hw: the hardware private structure - * @shift: the clock shift for calculating nanoseconds - * - * this function enables the clock out feature on the sdp0 for the - * X540 device. It will create a 1second periodic output that can be - * used as the PPS (via an interrupt). - * - * It calculates when the systime will be on an exact second, and then - * aligns the start of the PPS signal to that value. The shift is - * necessary because it can change based on the link speed. - */ -static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift) -{ - u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh; - u64 clock_edge = 0; - u32 rem; - switch (hw->mac.type) { case ixgbe_mac_X540: - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - - /* - * enable the SDP0 pin as output, and connected to the native - * function for Timesync (ClockOut) - */ - esdp |= (IXGBE_ESDP_SDP0_DIR | - IXGBE_ESDP_SDP0_NATIVE); - - /* - * enable the Clock Out feature on SDP0, and allow interrupts - * to occur when the pin changes - */ - tsauxc = (IXGBE_TSAUXC_EN_CLK | - IXGBE_TSAUXC_SYNCLK | - IXGBE_TSAUXC_SDP0_INT); - - /* clock period (or pulse length) */ - clktiml = (u32)(NSECS_PER_SEC << shift); - clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); - - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); - clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; - - /* - * account for the fact that we can't do u64 division - * with remainder, by converting the clock values into - * nanoseconds first - */ - clock_edge >>= shift; - div_u64_rem(clock_edge, NSECS_PER_SEC, &rem); - clock_edge += (NSECS_PER_SEC - rem); - clock_edge <<= shift; - - /* specify the initial clock start time */ - trgttiml = (u32)clock_edge; - trgttimh = (u32)(clock_edge >> 32); - - IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); - IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); - IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); - - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); - - IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_TIMESYNC); + ptp_clock_event(adapter->ptp_clock, &event); break; default: break; } } -/** - * ixgbe_ptp_disable_sdp - * @hw: the private hardware structure - * - * this function disables the auxiliary SDP clock out feature - */ -static void ixgbe_ptp_disable_sdp(struct ixgbe_hw *hw) -{ - IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_TIMESYNC); - IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0); -} /** * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow @@ -822,9 +810,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) if (adapter->cycle_speed == cycle_speed && timinca) return; - /* disable the SDP clock out */ - ixgbe_ptp_disable_sdp(hw); - /** * Scale the NIC cycle counter by a large factor so that * relatively small corrections to the frequency can be added @@ -877,10 +862,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); IXGBE_WRITE_FLUSH(hw); - /* now that the shift has been calculated and the systime - * registers reset, (re-)enable the Clock out feature*/ - ixgbe_ptp_enable_sdp(hw, shift); - /* store the new cycle speed */ adapter->cycle_speed = cycle_speed; @@ -901,6 +882,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ktime_to_ns(ktime_get_real())); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + /* + * Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + ixgbe_ptp_setup_sdp(adapter); } /** @@ -960,7 +947,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) /* (Re)start the overflow check */ adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; e_dev_err("ptp_clock_register failed\n"); @@ -978,10 +966,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) */ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) { - ixgbe_ptp_disable_sdp(&adapter->hw); - /* stop the overflow check task */ - adapter->flags2 &= ~IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; + adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | + IXGBE_FLAG2_PTP_PPS_ENABLED); + + ixgbe_ptp_setup_sdp(adapter); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 4fea8716ab6..dce48bf64d9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } @@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) VLAN_PRIO_SHIFT)), vf); ixgbe_set_vmolr(hw, vf, false); } else { + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_set_vmvir(adapter, 0, vf); ixgbe_set_vmolr(hw, vf, true); } @@ -810,9 +815,9 @@ out: return err; } -static int ixgbe_link_mbps(int internal_link_speed) +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { - switch (internal_link_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: return 100; case IXGBE_LINK_SPEED_1GB_FULL: @@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed) } } -static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, - int link_speed) +static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) { - int rf_dec, rf_int; - u32 bcnrc_val; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = 0; + u16 queue, queues_per_pool; + u16 tx_rate = adapter->vfinfo[vf].tx_rate; + + if (tx_rate) { + /* start with base link speed value */ + bcnrc_val = adapter->vf_rate_link_speed; - if (tx_rate != 0) { /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; - - bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & - IXGBE_RTTBCNRC_RF_INT_MASK); - bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); - } else { - bcnrc_val = 0; + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= tx_rate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; } - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported @@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, break; } - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + } } void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) { - int actual_link_speed, i; - bool reset_rate = false; + int i; /* VF Tx rate limit was not set */ - if (adapter->vf_rate_link_speed == 0) + if (!adapter->vf_rate_link_speed) return; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if (actual_link_speed != adapter->vf_rate_link_speed) { - reset_rate = true; + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit rate " - "is disabled\n"); + "Link speed has been changed. VF Transmit rate is disabled\n"); } for (i = 0; i < adapter->num_vfs; i++) { - if (reset_rate) + if (!adapter->vf_rate_link_speed) adapter->vfinfo[i].tx_rate = 0; - ixgbe_set_vf_rate_limit(&adapter->hw, i, - adapter->vfinfo[i].tx_rate, - actual_link_speed); + ixgbe_set_vf_rate_limit(adapter, i); } } int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int actual_link_speed; + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if ((vf >= adapter->num_vfs) || (!adapter->link_up) || - (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || - ((tx_rate != 0) && (tx_rate <= 10))) - /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + /* verify link is up */ + if (!adapter->link_up) return -EINVAL; - adapter->vf_rate_link_speed = actual_link_speed; - adapter->vfinfo[vf].tx_rate = (u16)tx_rate; - ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + /* verify we are linked at 10Gbps */ + link_speed = ixgbe_link_mbps(adapter); + if (link_speed != 10000) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) + return -EINVAL; + + /* store values */ + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].tx_rate = tx_rate; + + /* update hardware configuration */ + ixgbe_set_vf_rate_limit(adapter, vf); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 400f86a3117..0722f336809 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -65,6 +65,7 @@ #define IXGBE_DEV_ID_82599_LS 0x154F #define IXGBE_DEV_ID_X540T 0x1528 #define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +#define IXGBE_DEV_ID_X540T1 0x1560 /* VF Device IDs */ #define IXGBE_DEV_ID_82599_VF 0x10ED |