summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe.h')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h225
1 files changed, 128 insertions, 97 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e6aeb64105a..80e26ff30eb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -72,12 +72,6 @@
/* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
-#define IXGBE_RXBUFFER_2K 2048
-#define IXGBE_RXBUFFER_3K 3072
-#define IXGBE_RXBUFFER_4K 4096
-#define IXGBE_RXBUFFER_7K 7168
-#define IXGBE_RXBUFFER_8K 8192
-#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
@@ -102,14 +96,11 @@
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
-#define IXGBE_MAX_RSC_INT_RATE 162760
-
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128
@@ -156,19 +147,19 @@ struct vf_macvlans {
struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- dma_addr_t dma;
- u32 length;
- u32 tx_flags;
struct sk_buff *skb;
- u32 bytecount;
- u16 gso_segs;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ __be16 protocol;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
};
struct ixgbe_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -180,7 +171,6 @@ struct ixgbe_queue_stats {
struct ixgbe_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
- u64 completed;
u64 tx_done_old;
};
@@ -190,22 +180,18 @@ struct ixgbe_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
+ u64 csum_err;
};
-enum ixbge_ring_state_t {
+enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
- __IXGBE_RX_PS_ENABLED,
__IXGBE_RX_RSC_ENABLED,
+ __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+ __IXGBE_RX_FCOE_BUFSZ,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
@@ -220,18 +206,20 @@ enum ixbge_ring_state_t {
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
+ struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
void *desc; /* descriptor ring memory */
- struct device *dev; /* device for DMA mapping */
- struct net_device *netdev; /* netdev ring belongs to */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
- u16 rx_buf_len;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -239,12 +227,17 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
- u8 atr_sample_rate;
- u8 atr_count;
-
u16 next_to_use;
u16 next_to_clean;
+ union {
+ u16 next_to_alloc;
+ struct {
+ u8 atr_sample_rate;
+ u8 atr_count;
+ };
+ };
+
u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
@@ -252,11 +245,6 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
- int numa_node;
- unsigned int size; /* length in bytes */
- dma_addr_t dma; /* phys. address of descriptor ring */
- struct rcu_head rcu;
- struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@@ -287,6 +275,22 @@ struct ixgbe_ring_feature {
int mask;
} ____cacheline_internodealigned_in_smp;
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
+static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
+{
+ return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+}
+#else
+#define ixgbe_rx_pg_order(_ring) 0
+#endif
+#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
+
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -296,6 +300,10 @@ struct ixgbe_ring_container {
u8 itr; /* current ITR setting for ring */
};
+/* iterator for handling rings in ring container */
+#define ixgbe_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
@@ -315,8 +323,13 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ int numa_node;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
/*
@@ -329,6 +342,13 @@ struct ixgbe_q_vector {
#define IXGBE_10K_ITR 400
#define IXGBE_8K_ITR 500
+/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
{
u16 ntc = ring->next_to_clean;
@@ -337,11 +357,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
}
-#define IXGBE_RX_DESC_ADV(R, i) \
+#define IXGBE_RX_DESC(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
+#define IXGBE_TX_DESC(R, i) \
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+#define IXGBE_TX_CTXTDESC(R, i) \
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -361,18 +381,25 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+/* default to trying for four seconds */
+#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+
/* board specific private data structure */
struct ixgbe_adapter {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
unsigned long state;
/* Some features need tri-state capability,
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
@@ -409,60 +436,52 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u16 bd_number;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
-
- /* DCB parameters */
- struct ieee_pfc *ixgbe_ieee_pfc;
- struct ieee_ets *ixgbe_ieee_ets;
- struct ixgbe_dcb_config dcb_cfg;
- struct ixgbe_dcb_config temp_dcb_cfg;
- u8 dcb_set_bitmap;
- u8 dcbx_cap;
- enum ixgbe_fc_mode last_lfc_mode;
-
- /* Interrupt Throttle Rate */
- u32 rx_itr_setting;
- u32 tx_itr_setting;
- u16 eitr_low;
- u16 eitr_high;
-
- /* Work limits */
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
u16 tx_work_limit;
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
- int num_tx_queues;
- u32 tx_timeout_count;
- bool detect_tx_hung;
u64 restart_queue;
u64 lsc_int;
+ u32 tx_timeout_count;
/* RX */
- struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
- int num_rx_queues;
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
int num_rx_pools; /* == num_rx_queues in 82598 */
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
+ u64 rsc_total_count;
+ u64 rsc_total_flush;
u64 non_eop_descs;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
- struct msix_entry *msix_entries;
-
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
-/* default to trying for four seconds */
-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
+ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
+ struct ixgbe_dcb_config dcb_cfg;
+ struct ixgbe_dcb_config temp_dcb_cfg;
+ u8 dcb_set_bitmap;
+ u8 dcbx_cap;
+ enum ixgbe_fc_mode last_lfc_mode;
+
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
u32 test_icr;
struct ixgbe_ring test_tx_ring;
@@ -473,10 +492,6 @@ struct ixgbe_adapter {
u16 msg_enable;
struct ixgbe_hw_stats stats;
- /* Interrupt Throttle Rate */
- u32 rx_eitr_param;
- u32 tx_eitr_param;
-
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
@@ -485,25 +500,30 @@ struct ixgbe_adapter {
bool link_up;
unsigned long link_check_timeout;
- struct work_struct service_task;
struct timer_list service_timer;
+ struct work_struct service_task;
+
+ struct hlist_head fdir_filter_list;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
u32 fdir_pballoc;
u32 atr_sample_rate;
- unsigned long fdir_overflow; /* number of times ATR was backed off */
spinlock_t fdir_perfect_lock;
+
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
- u64 rsc_total_count;
- u64 rsc_total_flush;
u32 wol;
+
+ u16 bd_number;
+
u16 eeprom_verh;
u16 eeprom_verl;
u16 eeprom_cap;
- int node;
- u32 led_reg;
u32 interrupt_event;
+ u32 led_reg;
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -513,9 +533,6 @@ struct ixgbe_adapter {
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
- struct hlist_head fdir_filter_list;
- union ixgbe_atr_input fdir_mask;
- int fdir_filter_count;
u32 timer_event_accumulator;
u32 vferr_refcount;
};
@@ -535,12 +552,16 @@ enum ixbge_state_t {
__IXGBE_IN_SFP_INIT,
};
-struct ixgbe_rsc_cb {
+struct ixgbe_cb {
+ union { /* Union defining head/tail partner */
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ };
dma_addr_t dma;
- u16 skb_cnt;
- bool delay_unmap;
+ u16 append_cnt;
+ bool page_released;
};
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
enum ixgbe_boards {
board_82598,
@@ -560,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
+#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[];
+#endif /* IXGBE_FCOE */
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -585,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
+extern int ixgbe_poll(struct napi_struct *napi, int budget);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@@ -604,18 +628,20 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
+#ifdef CONFIG_IXGBE_DCB
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+#endif
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len);
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
+ struct ixgbe_tx_buffer *first,
+ u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb,
- u32 staterr);
+ struct sk_buff *skb);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
@@ -632,4 +658,9 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
+static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
#endif /* _IXGBE_H_ */