diff options
Diffstat (limited to 'drivers/net/bnx2x')
-rw-r--r-- | drivers/net/bnx2x/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x.h | 1105 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 1497 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.h | 675 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dcb.c | 880 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dcb.h | 30 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_dump.h | 1721 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_ethtool.c | 698 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_fw_defs.h | 519 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_hsi.h | 5399 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_init.h | 409 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_init_ops.h | 194 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.c | 6355 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_link.h | 183 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_main.c | 6231 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_reg.h | 957 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_sp.c | 5692 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_sp.h | 1297 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.c | 908 | ||||
-rw-r--r-- | drivers/net/bnx2x/bnx2x_stats.h | 228 |
20 files changed, 26026 insertions, 8954 deletions
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index bb83a296127..48fbdd48f88 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_BNX2X) += bnx2x.o -bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o +bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 668a578c49e..c423504a755 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -14,6 +14,7 @@ #ifndef BNX2X_H #define BNX2X_H #include <linux/netdevice.h> +#include <linux/dma-mapping.h> #include <linux/types.h> /* compilation time flags */ @@ -22,14 +23,10 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.12-0" -#define DRV_MODULE_RELDATE "2011/03/20" +#define DRV_MODULE_VERSION "1.70.00-0" +#define DRV_MODULE_RELDATE "2011/06/13" #define BNX2X_BC_VER 0x040200 -#define BNX2X_MULTI_QUEUE - -#define BNX2X_NEW_NAPI - #if defined(CONFIG_DCB) #define BCM_DCBNL #endif @@ -47,11 +44,12 @@ #endif #include <linux/mdio.h> -#include <linux/pci.h> + #include "bnx2x_reg.h" #include "bnx2x_fw_defs.h" #include "bnx2x_hsi.h" #include "bnx2x_link.h" +#include "bnx2x_sp.h" #include "bnx2x_dcb.h" #include "bnx2x_stats.h" @@ -80,6 +78,12 @@ do { \ ##__args); \ } while (0) +#define DP_CONT(__mask, __fmt, __args...) \ +do { \ + if (bp->msg_enable & (__mask)) \ + pr_cont(__fmt, ##__args); \ +} while (0) + /* errors debug print */ #define BNX2X_DBG_ERR(__fmt, __args...) \ do { \ @@ -111,9 +115,12 @@ do { \ dev_info(&bp->pdev->dev, __fmt, ##__args); \ } while (0) -void bnx2x_panic_dump(struct bnx2x *bp); +#define BNX2X_MAC_FMT "%pM" +#define BNX2X_MAC_PRN_LIST(mac) (mac) + #ifdef BNX2X_STOP_ON_ERROR +void bnx2x_int_disable(struct bnx2x *bp); #define bnx2x_panic() do { \ bp->panic = 1; \ BNX2X_ERR("driver assert\n"); \ @@ -233,22 +240,22 @@ void bnx2x_panic_dump(struct bnx2x *bp); * */ /* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CL_ID 17 -#define BNX2X_ISCSI_ETH_CID 17 +#define BNX2X_ISCSI_ETH_CL_ID_IDX 1 +#define BNX2X_ISCSI_ETH_CID 49 /* FCoE L2 */ -#define BNX2X_FCOE_ETH_CL_ID 18 -#define BNX2X_FCOE_ETH_CID 18 +#define BNX2X_FCOE_ETH_CL_ID_IDX 2 +#define BNX2X_FCOE_ETH_CID 50 /** Additional rings budgeting */ #ifdef BCM_CNIC -#define CNIC_CONTEXT_USE 1 -#define FCOE_CONTEXT_USE 1 +#define CNIC_PRESENT 1 +#define FCOE_PRESENT 1 #else -#define CNIC_CONTEXT_USE 0 -#define FCOE_CONTEXT_USE 0 +#define CNIC_PRESENT 0 +#define FCOE_PRESENT 0 #endif /* BCM_CNIC */ -#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE) +#define NON_ETH_CONTEXT_USE (FCOE_PRESENT) #define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR @@ -256,8 +263,35 @@ void bnx2x_panic_dump(struct bnx2x *bp); #define SM_RX_ID 0 #define SM_TX_ID 1 -/* fast path */ +/* defines for multiple tx priority indices */ +#define FIRST_TX_ONLY_COS_INDEX 1 +#define FIRST_TX_COS_INDEX 0 + +/* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ +#define MAX_TXQS_PER_COS FP_SB_MAX_E1x + +#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + +/* rules for calculating the cids of tx-only connections */ +#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) +#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) + +/* fp index inside class of service range */ +#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + +/* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) + */ +#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) +/* fast path */ struct sw_rx_bd { struct sk_buff *skb; DEFINE_DMA_UNMAP_ADDR(mapping); @@ -283,44 +317,73 @@ union db_prod { /* MC hsi */ -#define BCM_PAGE_SHIFT 12 -#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) -#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) +#define BCM_PAGE_SHIFT 12 +#define BCM_PAGE_SIZE (1 << BCM_PAGE_SHIFT) +#define BCM_PAGE_MASK (~(BCM_PAGE_SIZE - 1)) #define BCM_PAGE_ALIGN(addr) (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK) -#define PAGES_PER_SGE_SHIFT 0 -#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) -#define SGE_PAGE_SIZE PAGE_SIZE -#define SGE_PAGE_SHIFT PAGE_SHIFT -#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) +#define PAGES_PER_SGE_SHIFT 0 +#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) +#define SGE_PAGE_SIZE PAGE_SIZE +#define SGE_PAGE_SHIFT PAGE_SHIFT +#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) /* SGE ring related macros */ -#define NUM_RX_SGE_PAGES 2 +#define NUM_RX_SGE_PAGES 2 #define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) -#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) +#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) /* RX_SGE_CNT is promised to be a power of 2 */ -#define RX_SGE_MASK (RX_SGE_CNT - 1) -#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) -#define MAX_RX_SGE (NUM_RX_SGE - 1) +#define RX_SGE_MASK (RX_SGE_CNT - 1) +#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) +#define MAX_RX_SGE (NUM_RX_SGE - 1) #define NEXT_SGE_IDX(x) ((((x) & RX_SGE_MASK) == \ (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1) -#define RX_SGE(x) ((x) & MAX_RX_SGE) +#define RX_SGE(x) ((x) & MAX_RX_SGE) + +/* Manipulate a bit vector defined as an array of u64 */ -/* SGE producer mask related macros */ /* Number of bits in one sge_mask array element */ -#define RX_SGE_MASK_ELEM_SZ 64 -#define RX_SGE_MASK_ELEM_SHIFT 6 -#define RX_SGE_MASK_ELEM_MASK ((u64)RX_SGE_MASK_ELEM_SZ - 1) +#define BIT_VEC64_ELEM_SZ 64 +#define BIT_VEC64_ELEM_SHIFT 6 +#define BIT_VEC64_ELEM_MASK ((u64)BIT_VEC64_ELEM_SZ - 1) + + +#define __BIT_VEC64_SET_BIT(el, bit) \ + do { \ + el = ((el) | ((u64)0x1 << (bit))); \ + } while (0) + +#define __BIT_VEC64_CLEAR_BIT(el, bit) \ + do { \ + el = ((el) & (~((u64)0x1 << (bit)))); \ + } while (0) + + +#define BIT_VEC64_SET_BIT(vec64, idx) \ + __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \ + __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \ + (idx) & BIT_VEC64_ELEM_MASK) + +#define BIT_VEC64_TEST_BIT(vec64, idx) \ + (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \ + ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1) /* Creates a bitmask of all ones in less significant bits. idx - index of the most significant bit in the created mask */ -#define RX_SGE_ONES_MASK(idx) \ - (((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1) -#define RX_SGE_MASK_ELEM_ONE_MASK ((u64)(~0)) +#define BIT_VEC64_ONES_MASK(idx) \ + (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1) +#define BIT_VEC64_ELEM_ONE_MASK ((u64)(~0)) + +/*******************************************************/ + + /* Number of u64 elements in SGE mask array */ #define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \ - RX_SGE_MASK_ELEM_SZ) + BIT_VEC64_ELEM_SZ) #define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1) #define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK) @@ -331,7 +394,53 @@ union host_hc_status_block { struct host_hc_status_block_e2 *e2_sb; }; +struct bnx2x_agg_info { + /* + * First aggregation buffer is an skb, the following - are pages. + * We will preallocate the skbs for each aggregation when + * we open the interface and will replace the BD at the consumer + * with this one when we receive the TPA_START CQE in order to + * keep the Rx BD ring consistent. + */ + struct sw_rx_bd first_buf; + u8 tpa_state; +#define BNX2X_TPA_START 1 +#define BNX2X_TPA_STOP 2 +#define BNX2X_TPA_ERROR 3 + u8 placement_offset; + u16 parsing_flags; + u16 vlan_tag; + u16 len_on_bd; +}; + +#define Q_STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) + +struct bnx2x_fp_txdata { + + struct sw_tx_bd *tx_buf_ring; + + union eth_tx_bd_types *tx_desc_ring; + dma_addr_t tx_desc_mapping; + + u32 cid; + + union db_prod tx_db; + + u16 tx_pkt_prod; + u16 tx_pkt_cons; + u16 tx_bd_prod; + u16 tx_bd_cons; + + unsigned long tx_pkt; + + __le16 *tx_cons_sb; + + int txq_index; +}; + struct bnx2x_fastpath { + struct bnx2x *bp; /* parent */ #define BNX2X_NAPI_WEIGHT 128 struct napi_struct napi; @@ -346,10 +455,8 @@ struct bnx2x_fastpath { dma_addr_t status_blk_mapping; - struct sw_tx_bd *tx_buf_ring; - - union eth_tx_bd_types *tx_desc_ring; - dma_addr_t tx_desc_mapping; + u8 max_cos; /* actual number of active tx coses */ + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ @@ -366,32 +473,15 @@ struct bnx2x_fastpath { u64 sge_mask[RX_SGE_MASK_LEN]; - int state; -#define BNX2X_FP_STATE_CLOSED 0 -#define BNX2X_FP_STATE_IRQ 0x80000 -#define BNX2X_FP_STATE_OPENING 0x90000 -#define BNX2X_FP_STATE_OPEN 0xa0000 -#define BNX2X_FP_STATE_HALTING 0xb0000 -#define BNX2X_FP_STATE_HALTED 0xc0000 -#define BNX2X_FP_STATE_TERMINATING 0xd0000 -#define BNX2X_FP_STATE_TERMINATED 0xe0000 + u32 cid; + + __le16 fp_hc_idx; u8 index; /* number in fp array */ u8 cl_id; /* eth client id */ u8 cl_qzone_id; u8 fw_sb_id; /* status block number in FW */ u8 igu_sb_id; /* status block number in HW */ - u32 cid; - - union db_prod tx_db; - - u16 tx_pkt_prod; - u16 tx_pkt_cons; - u16 tx_bd_prod; - u16 tx_bd_cons; - __le16 *tx_cons_sb; - - __le16 fp_hc_idx; u16 rx_bd_prod; u16 rx_bd_cons; @@ -401,24 +491,19 @@ struct bnx2x_fastpath { /* The last maximal completed SGE */ u16 last_max_sge; __le16 *rx_cons_sb; - - unsigned long tx_pkt, - rx_pkt, + unsigned long rx_pkt, rx_calls; /* TPA related */ - struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; - u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; -#define BNX2X_TPA_START 1 -#define BNX2X_TPA_STOP 2 + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif - struct tstorm_per_client_stats old_tclient; - struct ustorm_per_client_stats old_uclient; - struct xstorm_per_client_stats old_xclient; + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; struct bnx2x_eth_q_stats eth_q_stats; /* The size is calculated using the following: @@ -427,7 +512,13 @@ struct bnx2x_fastpath { 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; - struct bnx2x *bp; /* parent */ + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + }; #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) @@ -435,11 +526,17 @@ struct bnx2x_fastpath { /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 -#ifdef BCM_CNIC -/* FCoE L2 `fastpath' is right after the eth entries */ +/* FCoE L2 `fastpath' entry is right after the eth entries */ #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) + + +#define IS_ETH_FP(fp) (fp->index < \ + BNX2X_NUM_ETH_QUEUES(fp->bp)) +#ifdef BCM_CNIC #define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) #define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) #else @@ -449,77 +546,68 @@ struct bnx2x_fastpath { /* MC hsi */ -#define MAX_FETCH_BD 13 /* HW max BDs per packet */ -#define RX_COPY_THRESH 92 +#define MAX_FETCH_BD 13 /* HW max BDs per packet */ +#define RX_COPY_THRESH 92 -#define NUM_TX_RINGS 16 +#define NUM_TX_RINGS 16 #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types)) -#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) -#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) -#define MAX_TX_BD (NUM_TX_BD - 1) -#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) -#define INIT_JUMBO_TX_RING_SIZE MAX_TX_AVAIL -#define INIT_TX_RING_SIZE MAX_TX_AVAIL +#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) +#define NUM_TX_BD (TX_DESC_CNT * NUM_TX_RINGS) +#define MAX_TX_BD (NUM_TX_BD - 1) +#define MAX_TX_AVAIL (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2) #define NEXT_TX_IDX(x) ((((x) & MAX_TX_DESC_CNT) == \ (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) -#define TX_BD(x) ((x) & MAX_TX_BD) -#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) +#define TX_BD(x) ((x) & MAX_TX_BD) +#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT) /* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */ -#define NUM_RX_RINGS 8 +#define NUM_RX_RINGS 8 #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd)) -#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) -#define RX_DESC_MASK (RX_DESC_CNT - 1) -#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) -#define MAX_RX_BD (NUM_RX_BD - 1) -#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) -#define MIN_RX_SIZE_TPA 72 -#define MIN_RX_SIZE_NONTPA 10 -#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL -#define INIT_RX_RING_SIZE MAX_RX_AVAIL +#define MAX_RX_DESC_CNT (RX_DESC_CNT - 2) +#define RX_DESC_MASK (RX_DESC_CNT - 1) +#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS) +#define MAX_RX_BD (NUM_RX_BD - 1) +#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2) +#define MIN_RX_AVAIL 128 + +#define MIN_RX_SIZE_TPA_HW (CHIP_IS_E1(bp) ? \ + ETH_MIN_RX_CQES_WITH_TPA_E1 : \ + ETH_MIN_RX_CQES_WITH_TPA_E1H_E2) +#define MIN_RX_SIZE_NONTPA_HW ETH_MIN_RX_CQES_WITHOUT_TPA +#define MIN_RX_SIZE_TPA (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL)) +#define MIN_RX_SIZE_NONTPA (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\ + MIN_RX_AVAIL)) + #define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \ (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1) -#define RX_BD(x) ((x) & MAX_RX_BD) +#define RX_BD(x) ((x) & MAX_RX_BD) -/* As long as CQE is 4 times bigger than BD entry we have to allocate - 4 times more pages for CQ ring in order to keep it balanced with - BD ring */ -#define NUM_RCQ_RINGS (NUM_RX_RINGS * 4) +/* + * As long as CQE is X times bigger than BD entry we have to allocate X times + * more pages for CQ ring in order to keep it balanced with BD ring + */ +#define CQE_BD_REL (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd)) +#define NUM_RCQ_RINGS (NUM_RX_RINGS * CQE_BD_REL) #define RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) -#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) -#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) -#define MAX_RCQ_BD (NUM_RCQ_BD - 1) -#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) +#define MAX_RCQ_DESC_CNT (RCQ_DESC_CNT - 1) +#define NUM_RCQ_BD (RCQ_DESC_CNT * NUM_RCQ_RINGS) +#define MAX_RCQ_BD (NUM_RCQ_BD - 1) +#define MAX_RCQ_AVAIL (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2) #define NEXT_RCQ_IDX(x) ((((x) & MAX_RCQ_DESC_CNT) == \ (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1) -#define RCQ_BD(x) ((x) & MAX_RCQ_BD) +#define RCQ_BD(x) ((x) & MAX_RCQ_BD) /* This is needed for determining of last_max */ -#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) +#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) +#define SUB_S32(a, b) (s32)((s32)(a) - (s32)(b)) -#define __SGE_MASK_SET_BIT(el, bit) \ - do { \ - el = ((el) | ((u64)0x1 << (bit))); \ - } while (0) - -#define __SGE_MASK_CLEAR_BIT(el, bit) \ - do { \ - el = ((el) & (~((u64)0x1 << (bit)))); \ - } while (0) - -#define SGE_MASK_SET_BIT(fp, idx) \ - __SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ - ((idx) & RX_SGE_MASK_ELEM_MASK)) - -#define SGE_MASK_CLEAR_BIT(fp, idx) \ - __SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \ - ((idx) & RX_SGE_MASK_ELEM_MASK)) +#define BNX2X_SWCID_SHIFT 17 +#define BNX2X_SWCID_MASK ((0x1 << BNX2X_SWCID_SHIFT) - 1) /* used on a CID received from the HW */ -#define SW_CID(x) (le32_to_cpu(x) & \ - (COMMON_RAMROD_ETH_RX_CQE_CID >> 7)) +#define SW_CID(x) (le32_to_cpu(x) & BNX2X_SWCID_MASK) #define CQE_CMD(x) (le32_to_cpu(x) >> \ COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT) @@ -529,6 +617,9 @@ struct bnx2x_fastpath { #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ #define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) +#error "Min DB doorbell stride is 8" +#endif #define DPM_TRIGER_TYPE 0x40 #define DOORBELL(bp, cid, val) \ do { \ @@ -557,13 +648,11 @@ struct bnx2x_fastpath { /* stuff added to make the code fit 80Col */ - -#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) - -#define TPA_TYPE_START ETH_FAST_PATH_RX_CQE_START_FLG -#define TPA_TYPE_END ETH_FAST_PATH_RX_CQE_END_FLG -#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ - (TPA_TYPE_START | TPA_TYPE_END)) +#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG) +#define CQE_TYPE_STOP(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG) +#define CQE_TYPE_SLOW(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD) +#define CQE_TYPE_FAST(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH) #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG @@ -590,15 +679,38 @@ struct bnx2x_fastpath { #define BNX2X_RX_SUM_FIX(cqe) \ BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags) -#define U_SB_ETH_RX_CQ_INDEX 1 -#define U_SB_ETH_RX_BD_INDEX 2 -#define C_SB_ETH_TX_CQ_INDEX 5 + +#define FP_USB_FUNC_OFF \ + offsetof(struct cstorm_status_block_u, func) +#define FP_CSB_FUNC_OFF \ + offsetof(struct cstorm_status_block_c, func) + +#define HC_INDEX_TOE_RX_CQ_CONS 0 /* Formerly Ustorm TOE CQ index */ + /* (HC_INDEX_U_TOE_RX_CQ_CONS) */ +#define HC_INDEX_ETH_RX_CQ_CONS 1 /* Formerly Ustorm ETH CQ index */ + /* (HC_INDEX_U_ETH_RX_CQ_CONS) */ +#define HC_INDEX_ETH_RX_BD_CONS 2 /* Formerly Ustorm ETH BD index */ + /* (HC_INDEX_U_ETH_RX_BD_CONS) */ + +#define HC_INDEX_TOE_TX_CQ_CONS 4 /* Formerly Cstorm TOE CQ index */ + /* (HC_INDEX_C_TOE_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS0 5 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS1 6 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ +#define HC_INDEX_ETH_TX_CQ_CONS_COS2 7 /* Formerly Cstorm ETH CQ index */ + /* (HC_INDEX_C_ETH_TX_CQ_CONS) */ + +#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0 + #define BNX2X_RX_SB_INDEX \ - (&fp->sb_index_values[U_SB_ETH_RX_CQ_INDEX]) + (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]) -#define BNX2X_TX_SB_INDEX \ - (&fp->sb_index_values[C_SB_ETH_TX_CQ_INDEX]) +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0 + +#define BNX2X_TX_SB_INDEX_COS0 \ + (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]) /* end of fast path */ @@ -615,41 +727,74 @@ struct bnx2x_common { #define CHIP_NUM_57711 0x164f #define CHIP_NUM_57711E 0x1650 #define CHIP_NUM_57712 0x1662 -#define CHIP_NUM_57712E 0x1663 +#define CHIP_NUM_57712_MF 0x1663 +#define CHIP_NUM_57713 0x1651 +#define CHIP_NUM_57713E 0x1652 +#define CHIP_NUM_57800 0x168a +#define CHIP_NUM_57800_MF 0x16a5 +#define CHIP_NUM_57810 0x168e +#define CHIP_NUM_57810_MF 0x16ae +#define CHIP_NUM_57840 0x168d +#define CHIP_NUM_57840_MF 0x16ab #define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) #define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711) #define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E) #define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712) -#define CHIP_IS_57712E(bp) (CHIP_NUM(bp) == CHIP_NUM_57712E) +#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF) +#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800) +#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) +#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) +#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) +#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) +#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) #define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ CHIP_IS_57711E(bp)) #define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \ - CHIP_IS_57712E(bp)) + CHIP_IS_57712_MF(bp)) +#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \ + CHIP_IS_57800_MF(bp) || \ + CHIP_IS_57810(bp) || \ + CHIP_IS_57810_MF(bp) || \ + CHIP_IS_57840(bp) || \ + CHIP_IS_57840_MF(bp)) #define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) -#define IS_E1H_OFFSET (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) - -#define CHIP_REV(bp) (bp->common.chip_id & 0x0000f000) -#define CHIP_REV_Ax 0x00000000 +#define USES_WARPCORE(bp) (CHIP_IS_E3(bp)) +#define IS_E1H_OFFSET (!CHIP_IS_E1(bp)) + +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_MASK (0xF << CHIP_REV_SHIFT) +#define CHIP_REV_VAL(bp) (bp->common.chip_id & CHIP_REV_MASK) +#define CHIP_REV_Ax (0x0 << CHIP_REV_SHIFT) +#define CHIP_REV_Bx (0x1 << CHIP_REV_SHIFT) /* assume maximum 5 revisions */ -#define CHIP_REV_IS_SLOW(bp) (CHIP_REV(bp) > 0x00005000) +#define CHIP_REV_IS_SLOW(bp) (CHIP_REV_VAL(bp) > 0x00005000) /* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */ #define CHIP_REV_IS_EMUL(bp) ((CHIP_REV_IS_SLOW(bp)) && \ - !(CHIP_REV(bp) & 0x00001000)) + !(CHIP_REV_VAL(bp) & 0x00001000)) /* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */ #define CHIP_REV_IS_FPGA(bp) ((CHIP_REV_IS_SLOW(bp)) && \ - (CHIP_REV(bp) & 0x00001000)) + (CHIP_REV_VAL(bp) & 0x00001000)) #define CHIP_TIME(bp) ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \ ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1)) #define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) #define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) -#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) +#define CHIP_REV_SIM(bp) (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\ + (CHIP_REV_SHIFT + 1)) \ + << CHIP_REV_SHIFT) +#define CHIP_REV(bp) (CHIP_REV_IS_SLOW(bp) ? \ + CHIP_REV_SIM(bp) :\ + CHIP_REV_VAL(bp)) +#define CHIP_IS_E3B0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Bx)) +#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ + (CHIP_REV(bp) == CHIP_REV_Ax)) int flash_size; -#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ -#define NVRAM_TIMEOUT_COUNT 30000 -#define NVRAM_PAGE_SIZE 256 +#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ +#define BNX2X_NVRAM_TIMEOUT_COUNT 30000 +#define BNX2X_NVRAM_PAGE_SIZE 256 u32 shmem_base; u32 shmem2_base; @@ -666,7 +811,7 @@ struct bnx2x_common { #define INT_BLOCK_MODE_NORMAL 0 #define INT_BLOCK_MODE_BW_COMP 2 #define CHIP_INT_MODE_IS_NBC(bp) \ - (CHIP_IS_E2(bp) && \ + (!CHIP_IS_E1x(bp) && \ !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP)) #define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp)) @@ -712,19 +857,15 @@ struct bnx2x_port { /* end of port */ -/* e1h Classification CAM line allocations */ -enum { - CAM_ETH_LINE = 0, - CAM_ISCSI_ETH_LINE, - CAM_FIP_ETH_LINE, - CAM_FIP_MCAST_LINE, - CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE -}; -/* number of MACs per function in NIG memory - used for SI mode */ -#define NIG_LLH_FUNC_MEM_SIZE 16 -/* number of entries in NIG_REG_LLHX_FUNC_MEM */ -#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8 +#define STATS_OFFSET32(stat_name) \ + (offsetof(struct bnx2x_eth_stats, stat_name) / 4) +/* slow path */ + +/* slow path work-queue */ +extern struct workqueue_struct *bnx2x_wq; + +#define BNX2X_MAX_NUM_OF_VFS 64 #define BNX2X_VF_ID_INVALID 0xFF /* @@ -749,27 +890,10 @@ enum { * L2 queue is supported. the cid for the FCoE L2 queue is always X. */ -#define FP_SB_MAX_E1x 16 /* fast-path interrupt contexts E1x */ -#define FP_SB_MAX_E2 16 /* fast-path interrupt contexts E2 */ - -/* - * cid_cnt paramter below refers to the value returned by - * 'bnx2x_get_l2_cid_count()' routine - */ - -/* - * The number of FP context allocated by the driver == max number of regular - * L2 queues + 1 for the FCoE L2 queue - */ -#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) - -/* - * The number of FP-SB allocated by the driver == max number of regular L2 - * queues + 1 for the CNIC which also consumes an FP-SB - */ -#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE) -#define NUM_IGU_SB_REQUIRED(cid_cnt) \ - (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE) +/* fast-path interrupt contexts E1x */ +#define FP_SB_MAX_E1x 16 +/* fast-path interrupt contexts E2 */ +#define FP_SB_MAX_E2 HC_SB_MAX_SB_E2 union cdu_context { struct eth_context eth; @@ -778,7 +902,7 @@ union cdu_context { /* CDU host DB constants */ #define CDU_ILT_PAGE_SZ_HW 3 -#define CDU_ILT_PAGE_SZ (4096 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) #ifdef BCM_CNIC @@ -788,38 +912,63 @@ union cdu_context { #define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) #endif -#define QM_ILT_PAGE_SZ_HW 3 -#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 32K */ +#define QM_ILT_PAGE_SZ_HW 0 +#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ #define QM_CID_ROUND 1024 #ifdef BCM_CNIC /* TM (timers) host DB constants */ -#define TM_ILT_PAGE_SZ_HW 2 -#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 16K */ +#define TM_ILT_PAGE_SZ_HW 0 +#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ /* #define TM_CONN_NUM (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */ #define TM_CONN_NUM 1024 #define TM_ILT_SZ (8 * TM_CONN_NUM) #define TM_ILT_LINES DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ) /* SRC (Searcher) host DB constants */ -#define SRC_ILT_PAGE_SZ_HW 3 -#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 32K */ +#define SRC_ILT_PAGE_SZ_HW 0 +#define SRC_ILT_PAGE_SZ (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */ #define SRC_HASH_BITS 10 #define SRC_CONN_NUM (1 << SRC_HASH_BITS) /* 1024 */ #define SRC_ILT_SZ (sizeof(struct src_ent) * SRC_CONN_NUM) #define SRC_T2_SZ SRC_ILT_SZ #define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) + #endif -#define MAX_DMAE_C 8 +#define MAX_DMAE_C 8 /* DMA memory not used in fastpath */ struct bnx2x_slowpath { - struct eth_stats_query fw_stats; - struct mac_configuration_cmd mac_config; - struct mac_configuration_cmd mcast_config; - struct mac_configuration_cmd uc_mac_config; - struct client_init_ramrod_data client_init_data; + union { + struct mac_configuration_cmd e1x; + struct eth_classify_rules_ramrod_data e2; + } mac_rdata; + + + union { + struct tstorm_eth_mac_filter_config e1x; + struct eth_filter_rules_ramrod_data e2; + } rx_mode_rdata; + + union { + struct mac_configuration_cmd e1; + struct eth_multicast_rules_ramrod_data e2; + } mcast_rdata; + + struct eth_rss_update_ramrod_data rss_rdata; + + /* Queue State related ramrods are always sent under rtnl_lock */ + union { + struct client_init_ramrod_data init_data; + struct client_update_ramrod_data update_data; + } q_rdata; + + union { + struct function_start_data func_start; + /* pfc configuration for DCBX ramrod */ + struct flow_control_configuration pfc_config; + } func_rdata; /* used by dmae command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -833,8 +982,6 @@ struct bnx2x_slowpath { u32 wb_comp; u32 wb_data[4]; - /* pfc configuration for DCBX ramrod */ - struct flow_control_configuration pfc_config; }; #define bnx2x_sp(bp, var) (&bp->slowpath->var) @@ -846,7 +993,7 @@ struct bnx2x_slowpath { #define MAX_DYNAMIC_ATTN_GRPS 8 struct attn_route { - u32 sig[5]; + u32 sig[5]; }; struct iro { @@ -866,13 +1013,15 @@ struct hw_context { /* forward */ struct bnx2x_ilt; -typedef enum { + +enum bnx2x_recovery_state { BNX2X_RECOVERY_DONE, BNX2X_RECOVERY_INIT, BNX2X_RECOVERY_WAIT, -} bnx2x_recovery_state_t; + BNX2X_RECOVERY_FAILED +}; -/** +/* * Event queue (EQ or event ring) MC hsi * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2 */ @@ -910,6 +1059,31 @@ enum { BNX2X_LINK_REPORT_TX_FC_ON, }; +enum { + BNX2X_PORT_QUERY_IDX, + BNX2X_PF_QUERY_IDX, + BNX2X_FIRST_QUEUE_QUERY_IDX, +}; + +struct bnx2x_fw_stats_req { + struct stats_query_header hdr; + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + +struct bnx2x_fw_stats_data { + struct stats_counter storm_counters; + struct per_port_stats port; + struct per_pf_stats pf; + struct per_queue_stats queue_stats[1]; +}; + +/* Public slow path states */ +enum { + BNX2X_SP_RTNL_SETUP_TC, + BNX2X_SP_RTNL_TX_TIMEOUT, +}; + + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -919,19 +1093,28 @@ struct bnx2x { void __iomem *doorbells; u16 db_size; + u8 pf_num; /* absolute PF number */ + u8 pfid; /* per-path PF number */ + int base_fw_ndsb; /**/ +#define BP_PATH(bp) (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1)) +#define BP_PORT(bp) (bp->pfid & 1) +#define BP_FUNC(bp) (bp->pfid) +#define BP_ABS_FUNC(bp) (bp->pf_num) +#define BP_E1HVN(bp) (bp->pfid >> 1) +#define BP_VN(bp) (BP_E1HVN(bp)) /*remove when approved*/ +#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) +#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ + BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1)) + struct net_device *dev; struct pci_dev *pdev; - struct iro *iro_arr; + const struct iro *iro_arr; #define IRO (bp->iro_arr) - atomic_t intr_sem; - - bnx2x_recovery_state_t recovery_state; + enum bnx2x_recovery_state recovery_state; int is_leader; struct msix_entry *msix_table; -#define INT_MODE_INTx 1 -#define INT_MODE_MSI 2 int tx_ring_size; @@ -944,7 +1127,8 @@ struct bnx2x { /* Max supported alignment is 256 (8 shift) */ #define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \ L1_CACHE_SHIFT : 8) -#define BNX2X_RX_ALIGN (1 << BNX2X_RX_ALIGN_SHIFT) + /* FW use 2 Cache lines Alignment for start packet and size */ +#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) struct host_sp_status_block *def_status_blk; @@ -974,10 +1158,12 @@ struct bnx2x { __le16 *eq_cons_sb; atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ - /* Flags for marking that there is a STAT_QUERY or - SET_MAC ramrod pending */ - int stats_pending; - int set_mac_pending; + + + /* Counter for marking that there is a STAT_QUERY ramrod pending */ + u16 stats_pending; + /* Counter for completed statistics ramrods */ + u16 stats_comp; /* End of fields used in the performance code paths */ @@ -985,54 +1171,35 @@ struct bnx2x { int msg_enable; u32 flags; -#define PCIX_FLAG 1 -#define PCI_32BIT_FLAG 2 -#define ONE_PORT_FLAG 4 -#define NO_WOL_FLAG 8 -#define USING_DAC_FLAG 0x10 -#define USING_MSIX_FLAG 0x20 -#define USING_MSI_FLAG 0x40 - -#define TPA_ENABLE_FLAG 0x80 -#define NO_MCP_FLAG 0x100 -#define DISABLE_MSI_FLAG 0x200 +#define PCIX_FLAG (1 << 0) +#define PCI_32BIT_FLAG (1 << 1) +#define ONE_PORT_FLAG (1 << 2) +#define NO_WOL_FLAG (1 << 3) +#define USING_DAC_FLAG (1 << 4) +#define USING_MSIX_FLAG (1 << 5) +#define USING_MSI_FLAG (1 << 6) +#define DISABLE_MSI_FLAG (1 << 7) +#define TPA_ENABLE_FLAG (1 << 8) +#define NO_MCP_FLAG (1 << 9) + #define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) -#define MF_FUNC_DIS 0x1000 -#define FCOE_MACS_SET 0x2000 -#define NO_FCOE_FLAG 0x4000 -#define NO_ISCSI_OOO_FLAG 0x8000 -#define NO_ISCSI_FLAG 0x10000 +#define MF_FUNC_DIS (1 << 11) +#define OWN_CNIC_IRQ (1 << 12) +#define NO_ISCSI_OOO_FLAG (1 << 13) +#define NO_ISCSI_FLAG (1 << 14) +#define NO_FCOE_FLAG (1 << 15) -#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) - - int pf_num; /* absolute PF number */ - int pfid; /* per-path PF number */ - int base_fw_ndsb; -#define BP_PATH(bp) (!CHIP_IS_E2(bp) ? \ - 0 : (bp->pf_num & 1)) -#define BP_PORT(bp) (bp->pfid & 1) -#define BP_FUNC(bp) (bp->pfid) -#define BP_ABS_FUNC(bp) (bp->pf_num) -#define BP_E1HVN(bp) (bp->pfid >> 1) -#define BP_VN(bp) (CHIP_MODE_IS_4_PORT(bp) ? \ - 0 : BP_E1HVN(bp)) -#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) -#define BP_FW_MB_IDX(bp) (BP_PORT(bp) +\ - BP_VN(bp) * (CHIP_IS_E1x(bp) ? 2 : 1)) - -#ifdef BCM_CNIC -#define BCM_CNIC_CID_START 16 -#define BCM_ISCSI_ETH_CL_ID 17 -#endif +#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) int pm_cap; - int pcie_cap; int mrrs; struct delayed_work sp_task; - struct delayed_work reset_task; + struct delayed_work sp_rtnl_task; + + struct delayed_work period_task; struct timer_list timer; int current_interval; @@ -1052,9 +1219,9 @@ struct bnx2x { struct cmng_struct_per_port cmng; u32 vn_weight_sum; - u32 mf_config[E1HVN_MAX]; u32 mf2_config[E2_FUNC_MAX]; + u32 path_has_ovlan; /* E3 */ u16 mf_ov; u8 mf_mode; #define IS_MF(bp) (bp->mf_mode != 0) @@ -1079,33 +1246,24 @@ struct bnx2x { u32 lin_cnt; - int state; + u16 state; #define BNX2X_STATE_CLOSED 0 #define BNX2X_STATE_OPENING_WAIT4_LOAD 0x1000 #define BNX2X_STATE_OPENING_WAIT4_PORT 0x2000 #define BNX2X_STATE_OPEN 0x3000 #define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 #define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 -#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 -#define BNX2X_STATE_FUNC_STARTED 0x7000 + #define BNX2X_STATE_DIAG 0xe000 #define BNX2X_STATE_ERROR 0xf000 int multi_mode; +#define BNX2X_MAX_PRIORITY 8 +#define BNX2X_MAX_ENTRIES_PER_PRI 16 +#define BNX2X_MAX_COS 3 +#define BNX2X_MAX_TX_COS 2 int num_queues; int disable_tpa; - int int_mode; - u32 *rx_indir_table; - - struct tstorm_eth_mac_filter_config mac_filters; -#define BNX2X_ACCEPT_NONE 0x0000 -#define BNX2X_ACCEPT_UNICAST 0x0001 -#define BNX2X_ACCEPT_MULTICAST 0x0002 -#define BNX2X_ACCEPT_ALL_UNICAST 0x0004 -#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 -#define BNX2X_ACCEPT_BROADCAST 0x0010 -#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020 -#define BNX2X_PROMISCUOUS_MODE 0x10000 u32 rx_mode; #define BNX2X_RX_MODE_NONE 0 @@ -1113,7 +1271,6 @@ struct bnx2x { #define BNX2X_RX_MODE_ALLMULTI 2 #define BNX2X_RX_MODE_PROMISC 3 #define BNX2X_MAX_MULTICAST 64 -#define BNX2X_MAX_EMUL_MULTI 16 u8 igu_dsb_id; u8 igu_base_sb; @@ -1122,16 +1279,53 @@ struct bnx2x { struct bnx2x_slowpath *slowpath; dma_addr_t slowpath_mapping; + + /* Total number of FW statistics requests */ + u8 fw_stats_num; + + /* + * This is a memory buffer that will contain both statistics + * ramrod request and data. + */ + void *fw_stats; + dma_addr_t fw_stats_mapping; + + /* + * FW statistics request shortcut (points at the + * beginning of fw_stats buffer). + */ + struct bnx2x_fw_stats_req *fw_stats_req; + dma_addr_t fw_stats_req_mapping; + int fw_stats_req_sz; + + /* + * FW statistics data shortcut (points at the begining of + * fw_stats buffer + fw_stats_req_sz). + */ + struct bnx2x_fw_stats_data *fw_stats_data; + dma_addr_t fw_stats_data_mapping; + int fw_stats_data_sz; + struct hw_context context; struct bnx2x_ilt *ilt; #define BP_ILT(bp) ((bp)->ilt) -#define ILT_MAX_LINES 128 +#define ILT_MAX_LINES 256 +/* + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes + * to CNIC. + */ +#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) - int l2_cid_count; -#define L2_ILT_LINES(bp) (DIV_ROUND_UP((bp)->l2_cid_count, \ - ILT_PAGE_CIDS)) -#define BNX2X_DB_SIZE(bp) ((bp)->l2_cid_count * (1 << BNX2X_DB_SHIFT)) +/* + * Maximum CID count that might be required by the bnx2x: + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) + */ +#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) +#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ + ILT_PAGE_CIDS)) +#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) int qm_cid_count; @@ -1148,16 +1342,18 @@ struct bnx2x { struct cnic_eth_dev cnic_eth_dev; union host_hc_status_block cnic_sb; dma_addr_t cnic_sb_mapping; -#define CNIC_SB_ID(bp) ((bp)->base_fw_ndsb + BP_L_ID(bp)) -#define CNIC_IGU_SB_ID(bp) ((bp)->igu_base_sb) struct eth_spe *cnic_kwq; struct eth_spe *cnic_kwq_prod; struct eth_spe *cnic_kwq_cons; struct eth_spe *cnic_kwq_last; u16 cnic_kwq_pending; u16 cnic_spq_pending; - struct mutex cnic_mutex; u8 fip_mac[ETH_ALEN]; + struct mutex cnic_mutex; + struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj; + + /* Start index of the "special" (CNIC related) L2 cleints */ + u8 cnic_base_cl_id; #endif int dmae_ready; @@ -1194,6 +1390,8 @@ struct bnx2x { u16 *init_ops_offsets; /* Data blob - has 32 bit granularity */ u32 *init_data; + u32 init_mode_flags; +#define INIT_MODE_FLAGS(bp) (bp->init_mode_flags) /* Zipped PRAM blobs - raw data */ const u8 *tsem_int_table_data; const u8 *tsem_pram_data; @@ -1215,10 +1413,9 @@ struct bnx2x { #define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) #define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) +#define PHY_FW_VER_LEN 20 char fw_ver[32]; const struct firmware *firmware; - /* LLDP params */ - struct bnx2x_config_lldp_params lldp_config_params; /* DCB support on/off */ u16 dcb_state; @@ -1235,59 +1432,56 @@ struct bnx2x { bool dcbx_mode_uset; struct bnx2x_config_dcbx_params dcbx_config_params; - struct bnx2x_dcbx_port_params dcbx_port_params; int dcb_version; - /* DCBX Negotiation results */ + /* CAM credit pools */ + struct bnx2x_credit_pool_obj macs_pool; + + /* RX_MODE object */ + struct bnx2x_rx_mode_obj rx_mode_obj; + + /* MCAST object */ + struct bnx2x_mcast_obj mcast_obj; + + /* RSS configuration object */ + struct bnx2x_rss_config_obj rss_conf_obj; + + /* Function State controlling object */ + struct bnx2x_func_sp_obj func_obj; + + unsigned long sp_state; + + /* operation indication for the sp_rtnl task */ + unsigned long sp_rtnl_state; + + /* DCBX Negotation results */ struct dcbx_features dcbx_local_feat; u32 dcbx_error; + #ifdef BCM_DCBNL struct dcbx_features dcbx_remote_feat; u32 dcbx_remote_flags; #endif u32 pending_max; -}; -/** - * Init queue/func interface - */ -/* queue init flags */ -#define QUEUE_FLG_TPA 0x0001 -#define QUEUE_FLG_CACHE_ALIGN 0x0002 -#define QUEUE_FLG_STATS 0x0004 -#define QUEUE_FLG_OV 0x0008 -#define QUEUE_FLG_VLAN 0x0010 -#define QUEUE_FLG_COS 0x0020 -#define QUEUE_FLG_HC 0x0040 -#define QUEUE_FLG_DHC 0x0080 -#define QUEUE_FLG_OOO 0x0100 - -#define QUEUE_DROP_IP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR -#define QUEUE_DROP_TCP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR -#define QUEUE_DROP_TTL0 TSTORM_ETH_CLIENT_CONFIG_DROP_TTL0 -#define QUEUE_DROP_UDP_CS_ERR TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR - - - -/* rss capabilities */ -#define RSS_IPV4_CAP 0x0001 -#define RSS_IPV4_TCP_CAP 0x0002 -#define RSS_IPV6_CAP 0x0004 -#define RSS_IPV6_TCP_CAP 0x0008 + /* multiple tx classes of service */ + u8 max_cos; -#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) -#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE) + /* priority to cos mapping */ + u8 prio_to_cos[8]; +}; -/* ethtool statistics are displayed for all regular ethernet queues and the - * fcoe L2 queue if not disabled - */ -#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \ - (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE)) +/* Tx queues may be less or equal to Rx queues */ +extern int num_queues; +#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) +#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) -#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) +#define BNX2X_MAX_QUEUES(bp) BNX2X_MAX_RSS_COUNT(bp) +/* #define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1) */ #define RSS_IPV4_CAP_MASK \ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY @@ -1302,107 +1496,15 @@ struct bnx2x { TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY /* func init flags */ -#define FUNC_FLG_STATS 0x0001 -#define FUNC_FLG_TPA 0x0002 -#define FUNC_FLG_SPQ 0x0004 -#define FUNC_FLG_LEADING 0x0008 /* PF only */ - -struct rxq_pause_params { - u16 bd_th_lo; - u16 bd_th_hi; - u16 rcq_th_lo; - u16 rcq_th_hi; - u16 sge_th_lo; /* valid iff QUEUE_FLG_TPA */ - u16 sge_th_hi; /* valid iff QUEUE_FLG_TPA */ - u16 pri_map; -}; +#define FUNC_FLG_RSS 0x0001 +#define FUNC_FLG_STATS 0x0002 +/* removed FUNC_FLG_UNMATCHED 0x0004 */ +#define FUNC_FLG_TPA 0x0008 +#define FUNC_FLG_SPQ 0x0010 +#define FUNC_FLG_LEADING 0x0020 /* PF only */ -struct bnx2x_rxq_init_params { - /* cxt*/ - struct eth_context *cxt; - - /* dma */ - dma_addr_t dscr_map; - dma_addr_t sge_map; - dma_addr_t rcq_map; - dma_addr_t rcq_np_map; - - u16 flags; - u16 drop_flags; - u16 mtu; - u16 buf_sz; - u16 fw_sb_id; - u16 cl_id; - u16 spcl_id; - u16 cl_qzone_id; - - /* valid iff QUEUE_FLG_STATS */ - u16 stat_id; - - /* valid iff QUEUE_FLG_TPA */ - u16 tpa_agg_sz; - u16 sge_buf_sz; - u16 max_sges_pkt; - - /* valid iff QUEUE_FLG_CACHE_ALIGN */ - u8 cache_line_log; - - u8 sb_cq_index; - u32 cid; - - /* desired interrupts per sec. valid iff QUEUE_FLG_HC */ - u32 hc_rate; -}; - -struct bnx2x_txq_init_params { - /* cxt*/ - struct eth_context *cxt; - - /* dma */ - dma_addr_t dscr_map; - - u16 flags; - u16 fw_sb_id; - u8 sb_cq_index; - u8 cos; /* valid iff QUEUE_FLG_COS */ - u16 stat_id; /* valid iff QUEUE_FLG_STATS */ - u16 traffic_type; - u32 cid; - u16 hc_rate; /* desired interrupts per sec.*/ - /* valid iff QUEUE_FLG_HC */ - -}; - -struct bnx2x_client_ramrod_params { - int *pstate; - int state; - u16 index; - u16 cl_id; - u32 cid; - u8 poll; -#define CLIENT_IS_FCOE 0x01 -#define CLIENT_IS_LEADING_RSS 0x02 - u8 flags; -}; - -struct bnx2x_client_init_params { - struct rxq_pause_params pause; - struct bnx2x_rxq_init_params rxq_params; - struct bnx2x_txq_init_params txq_params; - struct bnx2x_client_ramrod_params ramrod_params; -}; - -struct bnx2x_rss_params { - int mode; - u16 cap; - u16 result_mask; -}; struct bnx2x_func_init_params { - - /* rss */ - struct bnx2x_rss_params *rss; /* valid iff FUNC_FLG_RSS */ - /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ dma_addr_t spq_map; /* valid iff FUNC_FLG_SPQ */ @@ -1414,42 +1516,40 @@ struct bnx2x_func_init_params { }; #define for_each_eth_queue(bp, var) \ - for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++) + for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) #define for_each_nondefault_eth_queue(bp, var) \ - for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++) - -#define for_each_napi_queue(bp, var) \ - for (var = 0; \ - var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \ - if (skip_queue(bp, var)) \ - continue; \ - else + for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) #define for_each_queue(bp, var) \ - for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_queue(bp, var)) \ continue; \ else +/* Skip forwarding FP */ #define for_each_rx_queue(bp, var) \ - for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_rx_queue(bp, var)) \ continue; \ else +/* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ - for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \ + for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_tx_queue(bp, var)) \ continue; \ else #define for_each_nondefault_queue(bp, var) \ - for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \ + for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ if (skip_queue(bp, var)) \ continue; \ else +#define for_each_cos_in_tx_queue(fp, var) \ + for ((var) = 0; (var) < (fp)->max_cos; (var)++) + /* skip rx queue * if FCOE l2 support is disabled and this is the fcoe L2 queue */ @@ -1462,11 +1562,66 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) -#define WAIT_RAMROD_POLL 0x01 -#define WAIT_RAMROD_COMMON 0x02 + + +/** + * bnx2x_set_mac_one - configure a single MAC address + * + * @bp: driver handle + * @mac: MAC to configure + * @obj: MAC object handle + * @set: if 'true' add a new MAC, otherwise - delete + * @mac_type: the type of the MAC to configure (e.g. ETH, UC list) + * @ramrod_flags: RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT) + * + * Configures one MAC according to provided parameters or continues the + * execution of previously scheduled commands if RAMROD_CONT is set in + * ramrod_flags. + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags); +/** + * Deletes all MACs configured for the specific MAC object. + * + * @param bp Function driver instance + * @param mac_obj MAC object to cleanup + * + * @return zero if all MACs were cleaned + */ + +/** + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object + * + * @bp: driver handle + * @mac_obj: MAC object handle + * @mac_type: type of the MACs to clear (BNX2X_XXX_MAC) + * @wait_for_comp: if 'true' block until completion + * + * Deletes all MACs of the specific type (e.g. ETH, UC list). + * + * Returns zero if operation has successfully completed, a positive value if the + * operation has been successfully scheduled and a negative - if a requested + * operations has failed. + */ +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp); + +/* Init Function API */ +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p); +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode); +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); void bnx2x_read_mf_cfg(struct bnx2x *bp); + /* dmae */ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, @@ -1477,22 +1632,12 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode); u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type, bool with_comp, u8 comp_type); -int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port); -int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); -int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port); -u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); void bnx2x_calc_fc_adv(struct bnx2x *bp); int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, - u32 data_hi, u32 data_lo, int common); - -/* Clears multicast and unicast list configuration in the chip. */ -void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp); -void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp); -void bnx2x_invalidate_uc_list(struct bnx2x *bp); - + u32 data_hi, u32 data_lo, int cmd_type); void bnx2x_update_coalesce(struct bnx2x *bp); -int bnx2x_get_link_cfg_idx(struct bnx2x *bp); +int bnx2x_get_cur_phy_idx(struct bnx2x *bp); static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, int wait) @@ -1648,7 +1793,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, /* must be used on a CID before placing it on a HW ring */ #define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \ - (BP_E1HVN(bp) << 17) | (x)) + (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \ + (x)) #define SP_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_spe)) #define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) @@ -1718,12 +1864,14 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \ - AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT) + AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT) #define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\ - AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR) + AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR) #define HW_INTERRUT_ASSERT_SET_1 \ (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \ @@ -1736,17 +1884,22 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT) -#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\ +#define HW_PRTY_ASSERT_SET_1 (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \ - AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR) + AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\ + AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR) #define HW_INTERRUT_ASSERT_SET_2 \ (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \ AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \ @@ -1758,6 +1911,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\ AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) @@ -1766,6 +1920,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ + AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) + #define RSS_FLAGS(bp) \ (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ @@ -1775,6 +1932,30 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT)) #define MULTI_MASK 0x7f + +#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func) +#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func) +#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func) +#define DEF_TSB_FUNC_OFF offsetof(struct tstorm_def_status_block, func) + +#define DEF_USB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_u, igu_index) +#define DEF_CSB_IGU_INDEX_OFF \ + offsetof(struct cstorm_def_status_block_c, igu_index) +#define DEF_XSB_IGU_INDEX_OFF \ + offsetof(struct xstorm_def_status_block, igu_index) +#define DEF_TSB_IGU_INDEX_OFF \ + offsetof(struct tstorm_def_status_block, igu_index) + +#define DEF_USB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_u, segment) +#define DEF_CSB_SEGMENT_OFF \ + offsetof(struct cstorm_def_status_block_c, segment) +#define DEF_XSB_SEGMENT_OFF \ + offsetof(struct xstorm_def_status_block, segment) +#define DEF_TSB_SEGMENT_OFF \ + offsetof(struct tstorm_def_status_block, segment) + #define BNX2X_SP_DSB_INDEX \ (&bp->def_status_blk->sp_sb.\ index_values[HC_SP_INDEX_ETH_DEF_CONS]) @@ -1786,7 +1967,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, } while (0) #define GET_FLAG(value, mask) \ - (((value) &= (mask)) >> (mask##_SHIFT)) + (((value) & (mask)) >> (mask##_SHIFT)) #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) @@ -1821,15 +2002,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define HC_SEG_ACCESS_ATTN 4 #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ -#ifdef BNX2X_MAIN -#define BNX2X_EXTERN -#else -#define BNX2X_EXTERN extern -#endif - -BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ - -extern void bnx2x_set_ethtool_ops(struct net_device *netdev); -void bnx2x_push_indir_table(struct bnx2x *bp); +static const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; +void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_notify_link_changed(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 289044332ed..5b0dba6d4ef 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -17,16 +17,17 @@ #include <linux/etherdevice.h> #include <linux/if_vlan.h> +#include <linux/interrupt.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <linux/firmware.h> #include <linux/prefetch.h> #include "bnx2x_cmn.h" - #include "bnx2x_init.h" +#include "bnx2x_sp.h" + -static int bnx2x_setup_irqs(struct bnx2x *bp); /** * bnx2x_bz_fp - zero content of the fastpath structure. @@ -46,6 +47,25 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) /* Restore the NAPI object as it has been already initialized */ fp->napi = orig_napi; + + fp->bp = bp; + fp->index = index; + if (IS_ETH_FP(fp)) + fp->max_cos = bp->max_cos; + else + /* Special queues support only one CoS */ + fp->max_cos = 1; + + /* + * set the tpa flag for each queue. The tpa flag determines the queue + * minimal size so it must be set prior to queue memory allocation + */ + fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); + +#ifdef BCM_CNIC + /* We don't want TPA on FCoE, FWD and OOO L2 rings */ + bnx2x_fcoe(bp, disable_tpa) = 1; +#endif } /** @@ -71,13 +91,15 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) to_fp->napi = orig_napi; } +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ + /* free skb in the packet ring at pos idx * return idx of last bd freed */ -static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, u16 idx) { - struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx]; + struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx]; struct eth_tx_start_bd *tx_start_bd; struct eth_tx_bd *tx_data_bd; struct sk_buff *skb = tx_buf->skb; @@ -87,15 +109,16 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); - DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", - idx, tx_buf, skb); + DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + txdata->txq_index, idx, tx_buf, skb); /* unmap first bd */ DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); - tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; + tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) { @@ -122,7 +145,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, while (nbd > 0) { DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); - tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); if (--nbd) @@ -138,20 +161,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, return new_cons; } -int bnx2x_tx_int(struct bnx2x_fastpath *fp) +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata) { - struct bnx2x *bp = fp->bp; struct netdev_queue *txq; - u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; + u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return -1; #endif - txq = netdev_get_tx_queue(bp->dev, fp->index); - hw_cons = le16_to_cpu(*fp->tx_cons_sb); - sw_cons = fp->tx_pkt_cons; + txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + sw_cons = txdata->tx_pkt_cons; while (sw_cons != hw_cons) { u16 pkt_cons; @@ -160,20 +182,23 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp) DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u " " pkt_cons %u\n", - fp->index, hw_cons, sw_cons, pkt_cons); + txdata->txq_index, hw_cons, sw_cons, pkt_cons); - bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); + bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons); sw_cons++; } - fp->tx_pkt_cons = sw_cons; - fp->tx_bd_cons = bd_cons; + txdata->tx_pkt_cons = sw_cons; + txdata->tx_bd_cons = bd_cons; /* Need to make the tx_bd_cons update visible to start_xmit() * before checking for netif_tx_queue_stopped(). Without the * memory barrier, there is a small possibility that * start_xmit() will miss it and cause the queue to be stopped * forever. + * On the other hand we need an rmb() here to ensure the proper + * ordering of bit testing in the following + * netif_tx_queue_stopped(txq) call. */ smp_mb(); @@ -192,7 +217,7 @@ int bnx2x_tx_int(struct bnx2x_fastpath *fp) if ((netif_tx_queue_stopped(txq)) && (bp->state == BNX2X_STATE_OPEN) && - (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) + (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)) netif_tx_wake_queue(txq); __netif_tx_unlock(txq); @@ -225,7 +250,7 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, /* First mark all used pages */ for (i = 0; i < sge_len; i++) - SGE_MASK_CLEAR_BIT(fp, + BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", @@ -237,8 +262,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); - last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT; - first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT; + last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; + first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* If ring is not full */ if (last_elem + 1 != first_elem) @@ -249,8 +274,8 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, if (likely(fp->sge_mask[i])) break; - fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK; - delta += RX_SGE_MASK_ELEM_SZ; + fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; + delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { @@ -265,33 +290,56 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, } static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, - struct sk_buff *skb, u16 cons, u16 prod) + struct sk_buff *skb, u16 cons, u16 prod, + struct eth_fast_path_rx_cqe *cqe) { struct bnx2x *bp = fp->bp; struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; dma_addr_t mapping; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; - /* move empty skb from pool to prod and map it */ - prod_rx_buf->skb = fp->tpa_pool[queue].skb; - mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, - fp->rx_buf_size, DMA_FROM_DEVICE); - dma_unmap_addr_set(prod_rx_buf, mapping, mapping); - - /* move partial skb from cons to pool (don't unmap yet) */ - fp->tpa_pool[queue] = *cons_rx_buf; - - /* mark bin state as start - print error if current state != stop */ - if (fp->tpa_state[queue] != BNX2X_TPA_STOP) + /* print error if current state != stop */ + if (tpa_info->tpa_state != BNX2X_TPA_STOP) BNX2X_ERR("start of bin not in stop [%d]\n", queue); - fp->tpa_state[queue] = BNX2X_TPA_START; + /* Try to map an empty skb from the aggregation info */ + mapping = dma_map_single(&bp->pdev->dev, + first_buf->skb->data, + fp->rx_buf_size, DMA_FROM_DEVICE); + /* + * ...if it fails - move the skb from the consumer to the producer + * and set the current aggregation state as ERROR to drop it + * when TPA_STOP arrives. + */ + + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + /* Move the BD from the consumer to the producer */ + bnx2x_reuse_rx_skb(fp, cons, prod); + tpa_info->tpa_state = BNX2X_TPA_ERROR; + return; + } + /* move empty skb from pool to prod */ + prod_rx_buf->skb = first_buf->skb; + dma_unmap_addr_set(prod_rx_buf, mapping, mapping); /* point prod_bd to new skb */ prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); + /* move partial skb from cons to pool (don't unmap yet) */ + *first_buf = *cons_rx_buf; + + /* mark bin state as START */ + tpa_info->parsing_flags = + le16_to_cpu(cqe->pars_flags.flags); + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + tpa_info->tpa_state = BNX2X_TPA_START; + tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); + tpa_info->placement_offset = cqe->placement_offset; + #ifdef BNX2X_STOP_ON_ERROR fp->tpa_queue_used |= (1 << queue); #ifdef _ASM_GENERIC_INT_L64_H @@ -322,10 +370,17 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, u16 len_on_bd) { - /* TPA arrgregation won't have an IP options and TCP options - * other than timestamp. + /* + * TPA arrgregation won't have either IP options or TCP options + * other than timestamp or IPv6 extension headers. */ - u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); + u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr); + + if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == + PRS_FLAG_OVERETH_IPV6) + hdrs_len += sizeof(struct ipv6hdr); + else /* IPv4 */ + hdrs_len += sizeof(struct iphdr); /* Check if there was a TCP timestamp, if there is it's will @@ -340,30 +395,30 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, } static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, - struct sk_buff *skb, - struct eth_fast_path_rx_cqe *fp_cqe, - u16 cqe_idx, u16 parsing_flags) + u16 queue, struct sk_buff *skb, + struct eth_end_agg_rx_cqe *cqe, + u16 cqe_idx) { struct sw_rx_page *rx_pg, old_rx_pg; - u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); u32 i, frag_len, frag_size, pages; int err; int j; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + u16 len_on_bd = tpa_info->len_on_bd; - frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; + frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; /* This is needed in order to enable forwarding support */ if (frag_size) - skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, - len_on_bd); + skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, + tpa_info->parsing_flags, len_on_bd); #ifdef BNX2X_STOP_ON_ERROR if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", pages, cqe_idx); - BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", - fp_cqe->pkt_len, len_on_bd); + BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); bnx2x_panic(); return -EINVAL; } @@ -371,8 +426,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* Run through the SGL and compose the fragmented skb */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { - u16 sge_idx = - RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j])); + u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j])); /* FW gives the indices of the SGE as if the ring is an array (meaning that "next" element will consume 2 indices) */ @@ -407,13 +461,28 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, } static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 queue, int pad, int len, union eth_rx_cqe *cqe, + u16 queue, struct eth_end_agg_rx_cqe *cqe, u16 cqe_idx) { - struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue]; + struct sw_rx_bd *rx_buf = &tpa_info->first_buf; + u8 pad = tpa_info->placement_offset; + u16 len = tpa_info->len_on_bd; struct sk_buff *skb = rx_buf->skb; /* alloc new skb */ - struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); + struct sk_buff *new_skb; + u8 old_tpa_state = tpa_info->tpa_state; + + tpa_info->tpa_state = BNX2X_TPA_STOP; + + /* If we there was an error during the handling of the TPA_START - + * drop this aggregation. + */ + if (old_tpa_state == BNX2X_TPA_ERROR) + goto drop; + + /* Try to allocate the new skb */ + new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation @@ -422,11 +491,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, fp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_skb)) { - /* fix ip xsum and give it to the stack */ - /* (no need to map the new skb) */ - u16 parsing_flags = - le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); - prefetch(skb); prefetch(((char *)(skb)) + L1_CACHE_BYTES); @@ -446,21 +510,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, skb->protocol = eth_type_trans(skb, bp->dev); skb->ip_summed = CHECKSUM_UNNECESSARY; - { - struct iphdr *iph; - - iph = (struct iphdr *)skb->data; - iph->check = 0; - iph->check = ip_fast_csum((u8 *)iph, iph->ihl); - } - - if (!bnx2x_fill_frag_skb(bp, fp, skb, - &cqe->fast_path_cqe, cqe_idx, - parsing_flags)) { - if (parsing_flags & PARSING_FLAGS_VLAN) - __vlan_hwaccel_put_tag(skb, - le16_to_cpu(cqe->fast_path_cqe. - vlan_tag)); + if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { + if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) + __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); napi_gro_receive(&fp->napi, skb); } else { DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" @@ -470,16 +522,16 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* put new skb in bin */ - fp->tpa_pool[queue].skb = new_skb; + rx_buf->skb = new_skb; - } else { - /* else drop the packet and keep the buffer in the bin */ - DP(NETIF_MSG_RX_STATUS, - "Failed to allocate new skb - dropping packet!\n"); - fp->eth_q_stats.rx_skb_alloc_failed++; + return; } - fp->tpa_state[queue] = BNX2X_TPA_STOP; +drop: + /* drop the packet and keep the buffer in the bin */ + DP(NETIF_MSG_RX_STATUS, + "Failed to allocate or map a new skb - dropping packet!\n"); + fp->eth_q_stats.rx_skb_alloc_failed++; } /* Set Toeplitz hash value in the skb using the value from the @@ -533,9 +585,16 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) struct sw_rx_bd *rx_buf = NULL; struct sk_buff *skb; union eth_rx_cqe *cqe; + struct eth_fast_path_rx_cqe *cqe_fp; u8 cqe_fp_flags; + enum eth_rx_cqe_type cqe_fp_type; u16 len, pad; +#ifdef BNX2X_STOP_ON_ERROR + if (unlikely(bp->panic)) + return 0; +#endif + comp_ring_cons = RCQ_BD(sw_comp_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); @@ -548,17 +607,18 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) PAGE_SIZE + 1)); cqe = &fp->rx_comp_ring[comp_ring_cons]; - cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; + cqe_fp = &cqe->fast_path_cqe; + cqe_fp_flags = cqe_fp->type_error_flags; + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), - cqe_fp_flags, cqe->fast_path_cqe.status_flags, - le32_to_cpu(cqe->fast_path_cqe.rss_hash_result), - le16_to_cpu(cqe->fast_path_cqe.vlan_tag), - le16_to_cpu(cqe->fast_path_cqe.pkt_len)); + cqe_fp_flags, cqe_fp->status_flags, + le32_to_cpu(cqe_fp->rss_hash_result), + le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); /* is this a slowpath msg? */ - if (unlikely(CQE_TYPE(cqe_fp_flags))) { + if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { bnx2x_sp_event(fp, cqe); goto next_cqe; @@ -567,61 +627,59 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) rx_buf = &fp->rx_buf_ring[bd_cons]; skb = rx_buf->skb; prefetch(skb); - len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); - pad = cqe->fast_path_cqe.placement_offset; - /* - If CQE is marked both TPA_START and TPA_END it is - * a non-TPA CQE. - * - FP CQE will always have either TPA_START or/and - * TPA_STOP flags set. - */ - if ((!fp->disable_tpa) && - (TPA_TYPE(cqe_fp_flags) != - (TPA_TYPE_START | TPA_TYPE_END))) { - u16 queue = cqe->fast_path_cqe.queue_index; + if (!CQE_TYPE_FAST(cqe_fp_type)) { +#ifdef BNX2X_STOP_ON_ERROR + /* sanity check */ + if (fp->disable_tpa && + (CQE_TYPE_START(cqe_fp_type) || + CQE_TYPE_STOP(cqe_fp_type))) + BNX2X_ERR("START/STOP packet while " + "disable_tpa type %x\n", + CQE_TYPE(cqe_fp_type)); +#endif - if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { + if (CQE_TYPE_START(cqe_fp_type)) { + u16 queue = cqe_fp->queue_index; DP(NETIF_MSG_RX_STATUS, "calling tpa_start on queue %d\n", queue); bnx2x_tpa_start(fp, queue, skb, - bd_cons, bd_prod); + bd_cons, bd_prod, + cqe_fp); - /* Set Toeplitz hash for an LRO skb */ + /* Set Toeplitz hash for LRO skb */ bnx2x_set_skb_rxhash(bp, cqe, skb); goto next_rx; - } else { /* TPA_STOP */ + + } else { + u16 queue = + cqe->end_agg_cqe.queue_index; DP(NETIF_MSG_RX_STATUS, "calling tpa_stop on queue %d\n", queue); - if (!BNX2X_RX_SUM_FIX(cqe)) - BNX2X_ERR("STOP on none TCP " - "data\n"); - - /* This is a size of the linear data - on this skb */ - len = le16_to_cpu(cqe->fast_path_cqe. - len_on_bd); - bnx2x_tpa_stop(bp, fp, queue, pad, - len, cqe, comp_ring_cons); + bnx2x_tpa_stop(bp, fp, queue, + &cqe->end_agg_cqe, + comp_ring_cons); #ifdef BNX2X_STOP_ON_ERROR if (bp->panic) return 0; #endif - bnx2x_update_sge_prod(fp, - &cqe->fast_path_cqe); + bnx2x_update_sge_prod(fp, cqe_fp); goto next_cqe; } } - - dma_sync_single_for_device(&bp->pdev->dev, + /* non TPA */ + len = le16_to_cpu(cqe_fp->pkt_len); + pad = cqe_fp->placement_offset; + dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - pad + RX_COPY_THRESH, - DMA_FROM_DEVICE); + pad + RX_COPY_THRESH, + DMA_FROM_DEVICE); prefetch(((char *)(skb)) + L1_CACHE_BYTES); /* is this an error packet? */ @@ -640,8 +698,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) (len <= RX_COPY_THRESH)) { struct sk_buff *new_skb; - new_skb = netdev_alloc_skb(bp->dev, - len + pad); + new_skb = netdev_alloc_skb(bp->dev, len + pad); if (new_skb == NULL) { DP(NETIF_MSG_RX_ERR, "ERROR packet dropped " @@ -687,6 +744,7 @@ reuse_rx: skb_checksum_none_assert(skb); if (bp->dev->features & NETIF_F_RXCSUM) { + if (likely(BNX2X_RX_CSUM_OK(cqe))) skb->ip_summed = CHECKSUM_UNNECESSARY; else @@ -696,10 +754,10 @@ reuse_rx: skb_record_rx_queue(skb, fp->index); - if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & - PARSING_FLAGS_VLAN) + if (le16_to_cpu(cqe_fp->pars_flags.flags) & + PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, - le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); + le16_to_cpu(cqe_fp->vlan_tag)); napi_gro_receive(&fp->napi, skb); @@ -737,12 +795,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) { struct bnx2x_fastpath *fp = fp_cookie; struct bnx2x *bp = fp->bp; - - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } + u8 cos; DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB " "[fp %d fw_sd %d igusb %d]\n", @@ -756,7 +809,10 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) /* Handle Rx and Tx according to MSI-X vector */ prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); + + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); + prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); @@ -931,7 +987,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) { int func = BP_FUNC(bp); int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H; + ETH_MAX_AGGREGATION_QUEUES_E1H_E2; u16 ring_prod; int i, j; @@ -943,11 +999,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); if (!fp->disable_tpa) { - /* Fill the per-aggregation pool */ + /* Fill the per-aggregtion pool */ for (i = 0; i < max_agg_queues; i++) { - fp->tpa_pool[i].skb = - netdev_alloc_skb(bp->dev, fp->rx_buf_size); - if (!fp->tpa_pool[i].skb) { + struct bnx2x_agg_info *tpa_info = + &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = + &tpa_info->first_buf; + + first_buf->skb = netdev_alloc_skb(bp->dev, + fp->rx_buf_size); + if (!first_buf->skb) { BNX2X_ERR("Failed to allocate TPA " "skb pool for queue[%d] - " "disabling TPA on this " @@ -956,10 +1017,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) fp->disable_tpa = 1; break; } - dma_unmap_addr_set((struct sw_rx_bd *) - &bp->fp->tpa_pool[i], - mapping, 0); - fp->tpa_state[i] = BNX2X_TPA_STOP; + dma_unmap_addr_set(first_buf, mapping, 0); + tpa_info->tpa_state = BNX2X_TPA_STOP; } /* "next page" elements initialization */ @@ -975,13 +1034,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) { BNX2X_ERR("was only able to allocate " "%d rx sges\n", i); - BNX2X_ERR("disabling TPA for" - " queue[%d]\n", j); + BNX2X_ERR("disabling TPA for " + "queue[%d]\n", j); /* Cleanup already allocated elements */ - bnx2x_free_rx_sge_range(bp, - fp, ring_prod); - bnx2x_free_tpa_pool(bp, - fp, max_agg_queues); + bnx2x_free_rx_sge_range(bp, fp, + ring_prod); + bnx2x_free_tpa_pool(bp, fp, + max_agg_queues); fp->disable_tpa = 1; ring_prod = 0; break; @@ -1009,7 +1068,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) if (j != 0) continue; - if (!CHIP_IS_E2(bp)) { + if (CHIP_IS_E1(bp)) { REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func), U64_LO(fp->rx_comp_mapping)); @@ -1023,17 +1082,22 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) static void bnx2x_free_tx_skbs(struct bnx2x *bp) { int i; + u8 cos; for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; - u16 bd_cons = fp->tx_bd_cons; - u16 sw_prod = fp->tx_pkt_prod; - u16 sw_cons = fp->tx_pkt_cons; + u16 bd_cons = txdata->tx_bd_cons; + u16 sw_prod = txdata->tx_pkt_prod; + u16 sw_cons = txdata->tx_pkt_cons; - while (sw_cons != sw_prod) { - bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons)); - sw_cons++; + while (sw_cons != sw_prod) { + bd_cons = bnx2x_free_tx_pkt(bp, txdata, + TX_BD(sw_cons)); + sw_cons++; + } } } } @@ -1053,7 +1117,6 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) if (skb == NULL) continue; - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); @@ -1075,7 +1138,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) if (!fp->disable_tpa) bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : - ETH_MAX_AGGREGATION_QUEUES_E1H); + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); } } @@ -1102,30 +1165,43 @@ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) } } -static void bnx2x_free_msix_irqs(struct bnx2x *bp) +/** + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors + * + * @bp: driver handle + * @nvecs: number of vectors to be released + */ +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs) { - int i, offset = 1; + int i, offset = 0; - free_irq(bp->msix_table[0].vector, bp->dev); + if (nvecs == offset) + return; + free_irq(bp->msix_table[offset].vector, bp->dev); DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", - bp->msix_table[0].vector); - + bp->msix_table[offset].vector); + offset++; #ifdef BCM_CNIC + if (nvecs == offset) + return; offset++; #endif + for_each_eth_queue(bp, i) { - DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " - "state %x\n", i, bp->msix_table[i + offset].vector, - bnx2x_fp(bp, i, state)); + if (nvecs == offset) + return; + DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d " + "irq\n", i, bp->msix_table[offset].vector); - free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]); + free_irq(bp->msix_table[offset++].vector, &bp->fp[i]); } } void bnx2x_free_irq(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG) - bnx2x_free_msix_irqs(bp); + bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + + CNIC_PRESENT + 1); else if (bp->flags & USING_MSI_FLAG) free_irq(bp->pdev->irq, bp->dev); else @@ -1147,6 +1223,7 @@ int bnx2x_enable_msix(struct bnx2x *bp) bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); msix_vec++; #endif + /* We need separate vectors for ETH queues only (not FCoE) */ for_each_eth_queue(bp, i) { bp->msix_table[msix_vec].entry = msix_vec; DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " @@ -1154,7 +1231,7 @@ int bnx2x_enable_msix(struct bnx2x *bp) msix_vec++; } - req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1; + req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); @@ -1198,9 +1275,10 @@ int bnx2x_enable_msix(struct bnx2x *bp) static int bnx2x_req_msix_irqs(struct bnx2x *bp) { - int i, rc, offset = 1; + int i, rc, offset = 0; - rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, + rc = request_irq(bp->msix_table[offset++].vector, + bnx2x_msix_sp_int, 0, bp->dev->name, bp->dev); if (rc) { BNX2X_ERR("request sp irq failed\n"); @@ -1218,17 +1296,17 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) rc = request_irq(bp->msix_table[offset].vector, bnx2x_msix_fp_int, 0, fp->name, fp); if (rc) { - BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc); - bnx2x_free_msix_irqs(bp); + BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i, + bp->msix_table[offset].vector, rc); + bnx2x_free_msix_irqs(bp, offset); return -EBUSY; } offset++; - fp->state = BNX2X_FP_STATE_IRQ; } i = BNX2X_NUM_ETH_QUEUES(bp); - offset = 1 + CNIC_CONTEXT_USE; + offset = 1 + CNIC_PRESENT; netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" " ... fp[%d] %d\n", bp->msix_table[0].vector, @@ -1264,42 +1342,56 @@ static int bnx2x_req_irq(struct bnx2x *bp) rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev); - if (!rc) - bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ; - return rc; } -static void bnx2x_napi_enable(struct bnx2x *bp) +static inline int bnx2x_setup_irqs(struct bnx2x *bp) +{ + int rc = 0; + if (bp->flags & USING_MSIX_FLAG) { + rc = bnx2x_req_msix_irqs(bp); + if (rc) + return rc; + } else { + bnx2x_ack_int(bp); + rc = bnx2x_req_irq(bp); + if (rc) { + BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); + return rc; + } + if (bp->flags & USING_MSI_FLAG) { + bp->dev->irq = bp->pdev->irq; + netdev_info(bp->dev, "using MSI IRQ %d\n", + bp->pdev->irq); + } + } + + return 0; +} + +static inline void bnx2x_napi_enable(struct bnx2x *bp) { int i; - for_each_napi_queue(bp, i) + for_each_rx_queue(bp, i) napi_enable(&bnx2x_fp(bp, i, napi)); } -static void bnx2x_napi_disable(struct bnx2x *bp) +static inline void bnx2x_napi_disable(struct bnx2x *bp) { int i; - for_each_napi_queue(bp, i) + for_each_rx_queue(bp, i) napi_disable(&bnx2x_fp(bp, i, napi)); } void bnx2x_netif_start(struct bnx2x *bp) { - int intr_sem; - - intr_sem = atomic_dec_and_test(&bp->intr_sem); - smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - - if (intr_sem) { - if (netif_running(bp->dev)) { - bnx2x_napi_enable(bp); - bnx2x_int_enable(bp); - if (bp->state == BNX2X_STATE_OPEN) - netif_tx_wake_all_queues(bp->dev); - } + if (netif_running(bp->dev)) { + bnx2x_napi_enable(bp); + bnx2x_int_enable(bp); + if (bp->state == BNX2X_STATE_OPEN) + netif_tx_wake_all_queues(bp->dev); } } @@ -1307,13 +1399,12 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) { bnx2x_int_disable_sync(bp, disable_hw); bnx2x_napi_disable(bp); - netif_tx_disable(bp->dev); } u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) { -#ifdef BCM_CNIC struct bnx2x *bp = netdev_priv(dev); +#ifdef BCM_CNIC if (NO_FCOE(bp)) return skb_tx_hash(dev, skb); else { @@ -1330,13 +1421,12 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) /* If ethertype is FCoE or FIP - use FCoE ring */ if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) - return bnx2x_fcoe(bp, index); + return bnx2x_fcoe_tx(bp, txq_index); } #endif /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring */ - return __skb_tx_hash(dev, skb, - dev->real_num_tx_queues - FCOE_CONTEXT_USE); + return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); } void bnx2x_set_num_queues(struct bnx2x *bp) @@ -1355,40 +1445,38 @@ void bnx2x_set_num_queues(struct bnx2x *bp) } /* Add special queues */ - bp->num_queues += NONE_ETH_CONTEXT_USE; + bp->num_queues += NON_ETH_CONTEXT_USE; } -#ifdef BCM_CNIC -static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp) +static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) { + int rc, tx, rx; + + tx = MAX_TXQS_PER_COS * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); + +/* account for fcoe queue */ +#ifdef BCM_CNIC if (!NO_FCOE(bp)) { - if (!IS_MF_SD(bp)) - bnx2x_set_fip_eth_mac_addr(bp, 1); - bnx2x_set_all_enode_macs(bp, 1); - bp->flags |= FCOE_MACS_SET; + rx += FCOE_PRESENT; + tx += FCOE_PRESENT; } -} #endif -static void bnx2x_release_firmware(struct bnx2x *bp) -{ - kfree(bp->init_ops_offsets); - kfree(bp->init_ops); - kfree(bp->init_data); - release_firmware(bp->firmware); -} - -static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) -{ - int rc, num = bp->num_queues; + rc = netif_set_real_num_tx_queues(bp->dev, tx); + if (rc) { + BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc); + return rc; + } + rc = netif_set_real_num_rx_queues(bp->dev, rx); + if (rc) { + BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc); + return rc; + } -#ifdef BCM_CNIC - if (NO_FCOE(bp)) - num -= FCOE_CONTEXT_USE; + DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n", + tx, rx); -#endif - netif_set_real_num_tx_queues(bp->dev, num); - rc = netif_set_real_num_rx_queues(bp->dev, num); return rc; } @@ -1409,27 +1497,198 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) */ fp->rx_buf_size = BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + - BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; else fp->rx_buf_size = - bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + - IP_HEADER_ALIGNMENT_PADDING; + bp->dev->mtu + ETH_OVREHEAD + + BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + } +} + +static inline int bnx2x_init_rss_pf(struct bnx2x *bp) +{ + int i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); + + /* + * Prepare the inital contents fo the indirection table if RSS is + * enabled + */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = + bp->fp->cl_id + (i % num_eth_queues); + } + + /* + * For 57710 and 57711 SEARCHER configuration (rss_keys) is + * per-port, so if explicit configuration is needed , do it only + * for a PMF. + * + * For 57712 and newer on the other hand it's a per-function + * configuration. + */ + return bnx2x_config_rss_pf(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); +} + +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) +{ + struct bnx2x_config_rss_params params = {0}; + int i; + + /* Although RSS is meaningless when there is a single HW queue we + * still need it enabled in order to have HW Rx hash generated. + * + * if (!is_eth_multi(bp)) + * bp->multi_mode = ETH_RSS_MODE_DISABLED; + */ + + params.rss_obj = &bp->rss_conf_obj; + + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + /* RSS mode */ + switch (bp->multi_mode) { + case ETH_RSS_MODE_DISABLED: + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_REGULAR: + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_VLAN_PRI: + __set_bit(BNX2X_RSS_MODE_VLAN_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_E1HOV_PRI: + __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, ¶ms.rss_flags); + break; + case ETH_RSS_MODE_IP_DSCP: + __set_bit(BNX2X_RSS_MODE_IP_DSCP, ¶ms.rss_flags); + break; + default: + BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode); + return -EINVAL; + } + + /* If RSS is enabled */ + if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + + /* Hash bits */ + params.rss_result_mask = MULTI_MASK; + + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); + + if (config_hash) { + /* RSS keys */ + for (i = 0; i < sizeof(params.rss_key) / 4; i++) + params.rss_key[i] = random32(); + + __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); + } + } + + return bnx2x_config_rss(bp, ¶ms); +} + +static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) +{ + struct bnx2x_func_state_params func_params = {0}; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_INIT; + + func_params.params.hw_init.load_phase = load_code; + + return bnx2x_func_state_change(bp, &func_params); +} + +/* + * Cleans the object that have internal lists without sending + * ramrods. Should be run when interrutps are disabled. + */ +static void bnx2x_squeeze_objects(struct bnx2x *bp) +{ + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; + struct bnx2x_mcast_ramrod_params rparam = {0}; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + + /***************** Cleanup MACs' object first *************************/ + + /* Wait for completion of requested */ + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Perform a dry cleanup */ + __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); + + /* Clean ETH primary MAC */ + __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); + + /* Cleanup UC list */ + vlan_mac_flags = 0; + __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags); + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, + &ramrod_flags); + if (rc != 0) + BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc); + + /***************** Now clean mcast object *****************************/ + rparam.mcast_obj = &bp->mcast_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); + + /* Add a DEL command... */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to add a new DEL command to a multi-cast " + "object: %d\n", rc); + + /* ...and wait until all pending commands are cleared */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + while (rc != 0) { + if (rc < 0) { + BNX2X_ERR("Failed to clean multi-cast object: %d\n", + rc); + return; + } + + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); } } +#ifndef BNX2X_STOP_ON_ERROR +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + goto label; \ + } while (0) +#else +#define LOAD_ERROR_EXIT(bp, label) \ + do { \ + (bp)->state = BNX2X_STATE_ERROR; \ + (bp)->panic = 1; \ + return -EBUSY; \ + } while (0) +#endif + /* must be called with rtnl_lock */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { + int port = BP_PORT(bp); u32 load_code; int i, rc; - /* Set init arrays */ - rc = bnx2x_init_firmware(bp); - if (rc) { - BNX2X_ERR("Error loading firmware\n"); - return rc; - } - #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return -EPERM; @@ -1447,24 +1706,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* must be called before memory allocation and HW init */ bnx2x_ilt_set_info(bp); - /* zero fastpath structures preserving invariants like napi which are - * allocated only once + /* + * Zero fastpath structures preserving invariants like napi, which are + * allocated only once, fp index, max_cos, bp pointer. + * Also set fp->disable_tpa. */ for_each_queue(bp, i) bnx2x_bz_fp(bp, i); + /* Set the receive queues buffer size */ bnx2x_set_rx_buf_size(bp); - for_each_queue(bp, i) - bnx2x_fp(bp, i, disable_tpa) = - ((bp->flags & TPA_ENABLE_FLAG) == 0); - -#ifdef BCM_CNIC - /* We don't want TPA on FCoE L2 ring */ - bnx2x_fcoe(bp, disable_tpa) = 1; -#endif - if (bnx2x_alloc_mem(bp)) return -ENOMEM; @@ -1475,31 +1728,36 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) rc = bnx2x_set_real_num_queues(bp); if (rc) { BNX2X_ERR("Unable to set real_num_queues\n"); - goto load_error0; + LOAD_ERROR_EXIT(bp, load_error0); } + /* configure multi cos mappings in kernel. + * this configuration may be overriden by a multi class queue discipline + * or by a dcbx negotiation result. + */ + bnx2x_setup_tc(bp->dev, bp->max_cos); + bnx2x_napi_enable(bp); /* Send LOAD_REQUEST command to MCP - Returns the type of LOAD command: - if it is the first port to be initialized - common blocks should be initialized, otherwise - not - */ + * Returns the type of LOAD command: + * if it is the first port to be initialized + * common blocks should be initialized, otherwise - not + */ if (!BP_NOMCP(bp)) { load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EBUSY; - goto load_error1; + LOAD_ERROR_EXIT(bp, load_error1); } if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { rc = -EBUSY; /* other port in diagnostic mode */ - goto load_error1; + LOAD_ERROR_EXIT(bp, load_error1); } } else { int path = BP_PATH(bp); - int port = BP_PORT(bp); DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], @@ -1519,36 +1777,60 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || - (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) + (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { bp->port.pmf = 1; - else + /* + * We need the barrier to ensure the ordering between the + * writing to bp->port.pmf here and reading it from the + * bnx2x_periodic_task(). + */ + smp_mb(); + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + } else bp->port.pmf = 0; + DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + /* Init Function state controlling object */ + bnx2x__init_func_obj(bp); + /* Initialize HW */ rc = bnx2x_init_hw(bp, load_code); if (rc) { BNX2X_ERR("HW init failed, aborting\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - goto load_error2; + LOAD_ERROR_EXIT(bp, load_error2); } /* Connect to IRQs */ rc = bnx2x_setup_irqs(bp); if (rc) { bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); - goto load_error2; + LOAD_ERROR_EXIT(bp, load_error2); } /* Setup NIC internals and enable interrupts */ bnx2x_nic_init(bp, load_code); + /* Init per-function objects */ + bnx2x_init_bp_objs(bp); + if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) && - (bp->common.shmem2_base)) - SHMEM2_WR(bp, dcc_support, - (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | - SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + (bp->common.shmem2_base)) { + if (SHMEM2_HAS(bp, dcc_support)) + SHMEM2_WR(bp, dcc_support, + (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | + SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); + } + + bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; + rc = bnx2x_func_start(bp); + if (rc) { + BNX2X_ERR("Function start failed!\n"); + bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0); + LOAD_ERROR_EXIT(bp, load_error3); + } /* Send LOAD_DONE command to MCP */ if (!BP_NOMCP(bp)) { @@ -1556,74 +1838,38 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EBUSY; - goto load_error3; + LOAD_ERROR_EXIT(bp, load_error3); } } - bnx2x_dcbx_init(bp); - - bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; - - rc = bnx2x_func_start(bp); - if (rc) { - BNX2X_ERR("Function start failed!\n"); -#ifndef BNX2X_STOP_ON_ERROR - goto load_error3; -#else - bp->panic = 1; - return -EBUSY; -#endif - } - - rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */); + rc = bnx2x_setup_leading(bp); if (rc) { BNX2X_ERR("Setup leading failed!\n"); -#ifndef BNX2X_STOP_ON_ERROR - goto load_error3; -#else - bp->panic = 1; - return -EBUSY; -#endif - } - - if (!CHIP_IS_E1(bp) && - (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) { - DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); - bp->flags |= MF_FUNC_DIS; + LOAD_ERROR_EXIT(bp, load_error3); } #ifdef BCM_CNIC /* Enable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1); + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1); #endif for_each_nondefault_queue(bp, i) { - rc = bnx2x_setup_client(bp, &bp->fp[i], 0); + rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); if (rc) -#ifdef BCM_CNIC - goto load_error4; -#else - goto load_error3; -#endif + LOAD_ERROR_EXIT(bp, load_error4); } + rc = bnx2x_init_rss_pf(bp); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); + /* Now when Clients are configured we are ready to work */ bp->state = BNX2X_STATE_OPEN; -#ifdef BCM_CNIC - bnx2x_set_fcoe_eth_macs(bp); -#endif - - bnx2x_set_eth_mac(bp, 1); - - /* Clear MC configuration */ - if (CHIP_IS_E1(bp)) - bnx2x_invalidate_e1_mc_list(bp); - else - bnx2x_invalidate_e1h_mc_list(bp); - - /* Clear UC lists configuration */ - bnx2x_invalidate_uc_list(bp); + /* Configure a ucast MAC */ + rc = bnx2x_set_eth_mac(bp, true); + if (rc) + LOAD_ERROR_EXIT(bp, load_error4); if (bp->pending_max) { bnx2x_update_max_mf_config(bp, bp->pending_max); @@ -1633,15 +1879,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (bp->port.pmf) bnx2x_initial_phy_init(bp, load_mode); - /* Initialize Rx filtering */ + /* Start fast path */ + + /* Initialize Rx filter. */ + netif_addr_lock_bh(bp->dev); bnx2x_set_rx_mode(bp->dev); + netif_addr_unlock_bh(bp->dev); - /* Start fast path */ + /* Start the Tx */ switch (load_mode) { case LOAD_NORMAL: /* Tx queue should be only reenabled */ netif_tx_wake_all_queues(bp->dev); - /* Initialize the receive filter. */ break; case LOAD_OPEN: @@ -1670,18 +1919,28 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) #endif bnx2x_inc_load_cnt(bp); - bnx2x_release_firmware(bp); + /* Wait for all pending SP commands to complete */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { + BNX2X_ERR("Timeout waiting for SP elements to complete\n"); + bnx2x_nic_unload(bp, UNLOAD_CLOSE); + return -EBUSY; + } + bnx2x_dcbx_init(bp); return 0; -#ifdef BCM_CNIC +#ifndef BNX2X_STOP_ON_ERROR load_error4: +#ifdef BCM_CNIC /* Disable Timer scan */ - REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0); + REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); #endif load_error3: bnx2x_int_disable_sync(bp, 1); + /* Clean queueable objects */ + bnx2x_squeeze_objects(bp); + /* Free SKBs, SGEs, TPA pool and driver internals */ bnx2x_free_skbs(bp); for_each_rx_queue(bp, i) @@ -1701,42 +1960,52 @@ load_error1: load_error0: bnx2x_free_mem(bp); - bnx2x_release_firmware(bp); - return rc; +#endif /* ! BNX2X_STOP_ON_ERROR */ } /* must be called with rtnl_lock */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) { int i; - - if (bp->state == BNX2X_STATE_CLOSED) { - /* Interface has been removed - nothing to recover */ + bool global = false; + + if ((bp->state == BNX2X_STATE_CLOSED) || + (bp->state == BNX2X_STATE_ERROR)) { + /* We can get here if the driver has been unloaded + * during parity error recovery and is either waiting for a + * leader to complete or for other functions to unload and + * then ifdown has been issued. In this case we want to + * unload and let other functions to complete a recovery + * process. + */ bp->recovery_state = BNX2X_RECOVERY_DONE; bp->is_leader = 0; - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); - smp_wmb(); + bnx2x_release_leader_lock(bp); + smp_mb(); + + DP(NETIF_MSG_HW, "Releasing a leadership...\n"); return -EINVAL; } + /* Stop Tx */ + bnx2x_tx_disable(bp); + #ifdef BCM_CNIC bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); #endif bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + smp_mb(); - /* Set "drop all" */ bp->rx_mode = BNX2X_RX_MODE_NONE; - bnx2x_set_storm_rx_mode(bp); - - /* Stop Tx */ - bnx2x_tx_disable(bp); del_timer_sync(&bp->timer); - SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, - (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); + /* Set ALWAYS_ALIVE bit in shmem */ + bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; + + bnx2x_drv_pulse(bp); bnx2x_stats_handle(bp, STATS_EVENT_STOP); @@ -1744,13 +2013,38 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) if (unload_mode != UNLOAD_RECOVERY) bnx2x_chip_cleanup(bp, unload_mode); else { - /* Disable HW interrupts, NAPI and Tx */ + /* Send the UNLOAD_REQUEST to the MCP */ + bnx2x_send_unload_req(bp, unload_mode); + + /* + * Prevent transactions to host from the functions on the + * engine that doesn't reset global blocks in case of global + * attention once gloabl blocks are reset and gates are opened + * (the engine which leader will perform the recovery + * last). + */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_disable(bp); + + /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); /* Release IRQs */ bnx2x_free_irq(bp); + + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); } + /* + * At this stage no more interrupts will arrive so we may safly clean + * the queueable objects here in case they failed to get cleaned so far. + */ + bnx2x_squeeze_objects(bp); + + /* There should be no more pending SP commands at this stage */ + bp->sp_state = 0; + bp->port.pmf = 0; /* Free SKBs, SGEs, TPA pool and driver internals */ @@ -1762,17 +2056,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bp->state = BNX2X_STATE_CLOSED; + /* Check if there are pending parity attentions. If there are - set + * RECOVERY_IN_PROGRESS. + */ + if (bnx2x_chk_parity_attn(bp, &global, false)) { + bnx2x_set_reset_in_progress(bp); + + /* Set RESET_IS_GLOBAL if needed */ + if (global) + bnx2x_set_reset_global(bp); + } + + /* The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ - if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) && - bnx2x_reset_is_done(bp)) + if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) bnx2x_disable_close_the_gate(bp); - /* Reset MCP mail box sequence if there is on going recovery */ - if (unload_mode == UNLOAD_RECOVERY) - bp->fw_seq = 0; - return 0; } @@ -1834,6 +2135,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) int bnx2x_poll(struct napi_struct *napi, int budget) { int work_done = 0; + u8 cos; struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, napi); struct bnx2x *bp = fp->bp; @@ -1846,8 +2148,10 @@ int bnx2x_poll(struct napi_struct *napi, int budget) } #endif - if (bnx2x_has_tx_work(fp)) - bnx2x_tx_int(fp); + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + bnx2x_tx_int(bp, &fp->txdata[cos]); + if (bnx2x_has_rx_work(fp)) { work_done += bnx2x_rx_int(fp, budget - work_done); @@ -1909,7 +2213,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget) * in Other Operating Systems(TM) */ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, - struct bnx2x_fastpath *fp, + struct bnx2x_fp_txdata *txdata, struct sw_tx_bd *tx_buf, struct eth_tx_start_bd **tx_bd, u16 hlen, u16 bd_prod, int nbd) @@ -1930,7 +2234,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp, /* now get a new data BD * (after the pbd) and fill it */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), le32_to_cpu(h_tx_bd->addr_lo)) + hlen; @@ -2148,6 +2452,22 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, sizeof(struct udphdr) - skb->data; } +static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, + struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) +{ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; + + if (xmit_type & XMIT_CSUM_V4) + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IP_CSUM; + else + tx_start_bd->bd_flags.as_bitfield |= + ETH_TX_BD_FLAGS_IPV6; + + if (!(xmit_type & XMIT_CSUM_TCP)) + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; +} + /** * bnx2x_set_pbd_csum - update PBD with checksum and return header length * @@ -2210,16 +2530,18 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_fastpath *fp; struct netdev_queue *txq; + struct bnx2x_fp_txdata *txdata; struct sw_tx_bd *tx_buf; - struct eth_tx_start_bd *tx_start_bd; + struct eth_tx_start_bd *tx_start_bd, *first_bd; struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; - int nbd, fp_index; + int nbd, txq_index, fp_index, txdata_index; dma_addr_t mapping; u32 xmit_type = bnx2x_xmit_type(bp, skb); int i; @@ -2233,12 +2555,43 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; #endif - fp_index = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, fp_index); + txq_index = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, txq_index); + BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); + + /* decode the fastpath index and the cos index from the txq */ + fp_index = TXQ_TO_FP(txq_index); + txdata_index = TXQ_TO_COS(txq_index); + +#ifdef BCM_CNIC + /* + * Override the above for the FCoE queue: + * - FCoE fp entry is right after the ETH entries. + * - FCoE L2 queue uses bp->txdata[0] only. + */ + if (unlikely(!NO_FCOE(bp) && (txq_index == + bnx2x_fcoe_tx(bp, txq_index)))) { + fp_index = FCOE_IDX; + txdata_index = 0; + } +#endif + + /* enable this debug print to view the transmission queue being used + DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d", + txq_index, fp_index, txdata_index); */ + + /* locate the fastpath and the txdata */ fp = &bp->fp[fp_index]; + txdata = &fp->txdata[txdata_index]; - if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { + /* enable this debug print to view the tranmission details + DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d" + " tx_data ptr %p fp pointer %p", + txdata->cid, fp_index, txdata_index, txdata, fp); */ + + if (unlikely(bnx2x_tx_avail(bp, txdata) < + (skb_shinfo(skb)->nr_frags + 3))) { fp->eth_q_stats.driver_xoff++; netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); @@ -2247,7 +2600,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x " "protocol(%x,%x) gso type %x xmit_type %x\n", - fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, + txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); eth = (struct ethhdr *)skb->data; @@ -2275,7 +2628,15 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #endif - + /* Map skb linear data for DMA */ + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - " + "silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } /* Please read carefully. First we use one BD which we mark as start, then we have a parsing info BD (used for TSO or xsum), @@ -2285,12 +2646,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) And above all, all pdb sizes are in words - NOT DWORDS! */ - pkt_prod = fp->tx_pkt_prod++; - bd_prod = TX_BD(fp->tx_bd_prod); + /* get current pkt produced now - advance it just before sending packet + * since mapping of pages may fail and cause packet to be dropped + */ + pkt_prod = txdata->tx_pkt_prod; + bd_prod = TX_BD(txdata->tx_bd_prod); - /* get a tx_buf and first BD */ - tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)]; - tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; + /* get a tx_buf and first BD + * tx_start_bd may be changed during SPLIT, + * but first_bd will always stay first + */ + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; + first_bd = tx_start_bd; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE, @@ -2300,13 +2668,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); /* remember the first BD of the packet */ - tx_buf->first_bd = fp->tx_bd_prod; + tx_buf->first_bd = txdata->tx_bd_prod; tx_buf->skb = skb; tx_buf->flags = 0; DP(NETIF_MSG_TX_QUEUED, "sending pkt %u @%p next_idx %u bd %u @%p\n", - pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); + pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd); if (vlan_tx_tag_present(skb)) { tx_start_bd->vlan_or_ethertype = @@ -2319,31 +2687,33 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - if (xmit_type & XMIT_CSUM) { - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; - - if (xmit_type & XMIT_CSUM_V4) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IP_CSUM; - else - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IPV6; - - if (!(xmit_type & XMIT_CSUM_TCP)) - tx_start_bd->bd_flags.as_bitfield |= - ETH_TX_BD_FLAGS_IS_UDP; - } + if (xmit_type & XMIT_CSUM) + bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type); - if (CHIP_IS_E2(bp)) { - pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2; + if (!CHIP_IS_E1x(bp)) { + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); /* Set PBD in checksum offload case */ if (xmit_type & XMIT_CSUM) hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); + if (IS_MF_SI(bp)) { + /* + * fill in the MAC addresses in the PBD - for local + * switching + */ + bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, + &pbd_e2->src_mac_addr_mid, + &pbd_e2->src_mac_addr_lo, + eth->h_source); + bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, + &pbd_e2->dst_mac_addr_mid, + &pbd_e2->dst_mac_addr_lo, + eth->h_dest); + } } else { - pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); /* Set PBD in checksum offload case */ if (xmit_type & XMIT_CSUM) @@ -2351,15 +2721,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) } - /* Map skb linear data for DMA */ - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - /* Setup the data pointer of the first BD of the packet */ tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); - nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */ - tx_start_bd->nbd = cpu_to_le16(nbd); + nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); pkt_size = tx_start_bd->nbytes; @@ -2380,9 +2745,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (unlikely(skb_headlen(skb) > hlen)) - bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, - hlen, bd_prod, ++nbd); - if (CHIP_IS_E2(bp)) + bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, + &tx_start_bd, hlen, + bd_prod, ++nbd); + if (!CHIP_IS_E1x(bp)) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); else @@ -2401,19 +2767,35 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + mapping = dma_map_page(&bp->pdev->dev, frag->page, + frag->page_offset, frag->size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + + DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " + "dropping packet...\n"); + + /* we need unmap all buffers already mapped + * for this SKB; + * first_bd->nbd need to be properly updated + * before call to bnx2x_free_tx_pkt + */ + first_bd->nbd = cpu_to_le16(nbd); + bnx2x_free_tx_pkt(bp, txdata, + TX_BD(txdata->tx_pkt_prod)); + return NETDEV_TX_OK; + } + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd; + tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; if (total_pkt_bd == NULL) - total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; - - mapping = dma_map_page(&bp->pdev->dev, frag->page, - frag->page_offset, - frag->size, DMA_TO_DEVICE); + total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd; tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_data_bd->nbytes = cpu_to_le16(frag->size); le16_add_cpu(&pkt_size, frag->size); + nbd++; DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p addr (%x:%x) nbytes %d\n", @@ -2423,6 +2805,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd); + /* update with actual num BDs */ + first_bd->nbd = cpu_to_le16(nbd); + bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); /* now send a tx doorbell, counting the next BD @@ -2431,6 +2816,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (TX_BD_POFF(bd_prod) < nbd) nbd++; + /* total_pkt_bytes should be set on the first data BD if + * it's not an LSO packet and there is more than one + * data BD. In this case pkt_size is limited by an MTU value. + * However we prefer to set it for an LSO packet (while we don't + * have to) in order to save some CPU cycles in a none-LSO + * case, when we much more care about them. + */ if (total_pkt_bd != NULL) total_pkt_bd->total_pkt_bytes = pkt_size; @@ -2451,6 +2843,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) pbd_e2->parsing_data); DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); + txdata->tx_pkt_prod++; /* * Make sure that the BD data is updated before updating the producer * since FW might read the BD right after the producer is updated. @@ -2460,16 +2853,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) */ wmb(); - fp->tx_db.data.prod += nbd; + txdata->tx_db.data.prod += nbd; barrier(); - DOORBELL(bp, fp->cid, fp->tx_db.raw); + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); - fp->tx_bd_prod += nbd; + txdata->tx_bd_prod += nbd; - if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { + if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) { netif_tx_stop_queue(txq); /* paired memory barrier is in bnx2x_tx_int(), we have to keep @@ -2478,34 +2871,110 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) smp_mb(); fp->eth_q_stats.driver_xoff++; - if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) + if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3) netif_tx_wake_queue(txq); } - fp->tx_pkt++; + txdata->tx_pkt++; return NETDEV_TX_OK; } +/** + * bnx2x_setup_tc - routine to configure net_device for multi tc + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + * + * callback connected to the ndo_setup_tc function pointer + */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) +{ + int cos, prio, count, offset; + struct bnx2x *bp = netdev_priv(dev); + + /* setup tc must be called under rtnl lock */ + ASSERT_RTNL(); + + /* no traffic classes requested. aborting */ + if (!num_tc) { + netdev_reset_tc(dev); + return 0; + } + + /* requested to support too many traffic classes */ + if (num_tc > bp->max_cos) { + DP(NETIF_MSG_TX_ERR, "support for too many traffic classes" + " requested: %d. max supported is %d", + num_tc, bp->max_cos); + return -EINVAL; + } + + /* declare amount of supported traffic classes */ + if (netdev_set_num_tc(dev, num_tc)) { + DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes", + num_tc); + return -EINVAL; + } + + /* configure priority to traffic class mapping */ + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", + prio, bp->prio_to_cos[prio]); + } + + + /* Use this configuration to diffrentiate tc0 from other COSes + This can be used for ets or pfc, and save the effort of setting + up a multio class queue disc or negotiating DCBX with a switch + netdev_set_prio_tc_map(dev, 0, 0); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0); + for (prio = 1; prio < 16; prio++) { + netdev_set_prio_tc_map(dev, prio, 1); + DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1); + } */ + + /* configure traffic class to transmission queue mapping */ + for (cos = 0; cos < bp->max_cos; cos++) { + count = BNX2X_NUM_ETH_QUEUES(bp); + offset = cos * MAX_TXQS_PER_COS; + netdev_set_tc_queue(dev, cos, count, offset); + DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d", + cos, offset, count); + } + + return 0; +} + /* called with rtnl_lock */ int bnx2x_change_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct bnx2x *bp = netdev_priv(dev); + int rc = 0; if (!is_valid_ether_addr((u8 *)(addr->sa_data))) return -EINVAL; + if (netif_running(dev)) { + rc = bnx2x_set_eth_mac(bp, false); + if (rc) + return rc; + } + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + if (netif_running(dev)) - bnx2x_set_eth_mac(bp, 1); + rc = bnx2x_set_eth_mac(bp, true); - return 0; + return rc; } static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) { union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); struct bnx2x_fastpath *fp = &bp->fp[fp_index]; + u8 cos; /* Common */ #ifdef BCM_CNIC @@ -2516,7 +2985,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) } else { #endif /* status blocks */ - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) BNX2X_PCI_FREE(sb->e2_sb, bnx2x_fp(bp, fp_index, status_blk_mapping), @@ -2554,10 +3023,18 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) /* Tx */ if (!skip_tx_queue(bp, fp_index)) { /* fastpath tx rings: tx_buf tx_desc */ - BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring)); - BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring), - bnx2x_fp(bp, fp_index, tx_desc_mapping), - sizeof(union eth_tx_bd_types) * NUM_TX_BD); + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, + "freeing tx memory of fp %d cos %d cid %d", + fp_index, cos, txdata->cid); + + BNX2X_FREE(txdata->tx_buf_ring); + BNX2X_PCI_FREE(txdata->tx_desc_ring, + txdata->tx_desc_mapping, + sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } } /* end of fastpath */ } @@ -2572,7 +3049,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp) static inline void set_sb_shortcuts(struct bnx2x *bp, int index) { union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { bnx2x_fp(bp, index, sb_index_values) = (__le16 *)status_blk.e2_sb->sb.index_values; bnx2x_fp(bp, index, sb_running_index) = @@ -2590,26 +3067,24 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) union host_hc_status_block *sb; struct bnx2x_fastpath *fp = &bp->fp[index]; int ring_size = 0; + u8 cos; /* if rx_ring_size specified - use it */ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : - MAX_RX_AVAIL/bp->num_queues; + MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); /* allocate at least number of buffers required by FW */ - rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA : + rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA, rx_ring_size); - bnx2x_fp(bp, index, bp) = bp; - bnx2x_fp(bp, index, index) = index; - /* Common */ sb = &bnx2x_fp(bp, index, status_blk); #ifdef BCM_CNIC if (!IS_FCOE_IDX(index)) { #endif /* status blocks */ - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) BNX2X_PCI_ALLOC(sb->e2_sb, &bnx2x_fp(bp, index, status_blk_mapping), sizeof(struct host_hc_status_block_e2)); @@ -2620,16 +3095,29 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) #ifdef BCM_CNIC } #endif - set_sb_shortcuts(bp, index); + + /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to + * set shortcuts for it. + */ + if (!IS_FCOE_IDX(index)) + set_sb_shortcuts(bp, index); /* Tx */ if (!skip_tx_queue(bp, index)) { /* fastpath tx rings: tx_buf tx_desc */ - BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring), + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + DP(BNX2X_MSG_SP, "allocating tx memory of " + "fp %d cos %d", + index, cos); + + BNX2X_ALLOC(txdata->tx_buf_ring, sizeof(struct sw_tx_bd) * NUM_TX_BD); - BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring), - &bnx2x_fp(bp, index, tx_desc_mapping), + BNX2X_PCI_ALLOC(txdata->tx_desc_ring, + &txdata->tx_desc_mapping, sizeof(union eth_tx_bd_types) * NUM_TX_BD); + } } /* Rx */ @@ -2672,7 +3160,7 @@ alloc_mem_err: index, ring_size); /* FW will drop all packets if queue is not big enough, * In these cases we disable the queue - * Min size diferent for TPA and non-TPA queues + * Min size is different for OOO, TPA and non-TPA queues */ if (ring_size < (fp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { @@ -2690,17 +3178,24 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) /** * 1. Allocate FP for leading - fatal if error * 2. {CNIC} Allocate FCoE FP - fatal if error - * 3. Allocate RSS - fix number of queues if error + * 3. {CNIC} Allocate OOO + FWD - disable OOO if error + * 4. Allocate RSS - fix number of queues if error */ /* leading */ if (bnx2x_alloc_fp_mem_at(bp, 0)) return -ENOMEM; + #ifdef BCM_CNIC - /* FCoE */ - if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) - return -ENOMEM; + if (!NO_FCOE(bp)) + /* FCoE */ + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) + /* we will fail load process instead of mark + * NO_FCOE_FLAG + */ + return -ENOMEM; #endif + /* RSS */ for_each_nondefault_eth_queue(bp, i) if (bnx2x_alloc_fp_mem_at(bp, i)) @@ -2718,7 +3213,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) * FCOE_IDX < FWD_IDX < OOO_IDX */ - /* move FCoE fp */ + /* move FCoE fp even NO_FCOE_FLAG is on */ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); #endif bp->num_queues -= delta; @@ -2729,30 +3224,6 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) return 0; } -static int bnx2x_setup_irqs(struct bnx2x *bp) -{ - int rc = 0; - if (bp->flags & USING_MSIX_FLAG) { - rc = bnx2x_req_msix_irqs(bp); - if (rc) - return rc; - } else { - bnx2x_ack_int(bp); - rc = bnx2x_req_irq(bp); - if (rc) { - BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc); - return rc; - } - if (bp->flags & USING_MSI_FLAG) { - bp->dev->irq = bp->pdev->irq; - netdev_info(bp->dev, "using MSI IRQ %d\n", - bp->pdev->irq); - } - } - - return 0; -} - void bnx2x_free_mem_bp(struct bnx2x *bp) { kfree(bp->fp); @@ -2765,16 +3236,23 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) struct bnx2x_fastpath *fp; struct msix_entry *tbl; struct bnx2x_ilt *ilt; + int msix_table_size = 0; + + /* + * The biggest MSI-X table we might need is as a maximum number of fast + * path IGU SBs plus default SB (for PF). + */ + msix_table_size = bp->igu_sb_cnt + 1; - /* fp array */ - fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL); + /* fp array: RSS plus CNIC related L2 queues */ + fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) * + sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; bp->fp = fp; /* msix table */ - tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl), - GFP_KERNEL); + tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL); if (!tbl) goto alloc_err; bp->msix_table = tbl; @@ -2792,7 +3270,7 @@ alloc_err: } -static int bnx2x_reload_if_running(struct net_device *dev) +int bnx2x_reload_if_running(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -2803,6 +3281,78 @@ static int bnx2x_reload_if_running(struct net_device *dev) return bnx2x_nic_load(bp, LOAD_NORMAL); } +int bnx2x_get_cur_phy_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = 0; + if (bp->link_params.num_phys <= 1) + return INT_PHY; + + if (bp->link_vars.link_up) { + sel_phy_idx = EXT_PHY1; + /* In case link is SERDES, check if the EXT_PHY2 is the one */ + if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && + (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) + sel_phy_idx = EXT_PHY2; + } else { + + switch (bnx2x_phy_selection(&bp->link_params)) { + case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: + case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: + sel_phy_idx = EXT_PHY1; + break; + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: + case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: + sel_phy_idx = EXT_PHY2; + break; + } + } + + return sel_phy_idx; + +} +int bnx2x_get_link_cfg_idx(struct bnx2x *bp) +{ + u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp); + /* + * The selected actived PHY is always after swapping (in case PHY + * swapping is enabled). So when swapping is enabled, we need to reverse + * the configuration + */ + + if (bp->link_params.multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED) { + if (sel_phy_idx == EXT_PHY1) + sel_phy_idx = EXT_PHY2; + else if (sel_phy_idx == EXT_PHY2) + sel_phy_idx = EXT_PHY1; + } + return LINK_CONFIG_IDX(sel_phy_idx); +} + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) +{ + struct bnx2x *bp = netdev_priv(dev); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + + switch (type) { + case NETDEV_FCOE_WWNN: + *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi, + cp->fcoe_wwn_node_name_lo); + break; + case NETDEV_FCOE_WWPN: + *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi, + cp->fcoe_wwn_port_name_lo); + break; + default: + return -EINVAL; + } + + return 0; +} +#endif + /* called with rtnl_lock */ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) { @@ -2882,8 +3432,13 @@ void bnx2x_tx_timeout(struct net_device *dev) if (!bp->panic) bnx2x_panic(); #endif + + smp_mb__before_clear_bit(); + set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state); + smp_mb__after_clear_bit(); + /* This allows the netif to be shutdown gracefully before resetting */ - schedule_delayed_work(&bp->reset_task, 0); + schedule_delayed_work(&bp->sp_rtnl_task, 0); } int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) @@ -2954,3 +3509,57 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } + + +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid) +{ + /* ustorm cxt validation */ + cxt->ustorm_ag_context.cdu_usage = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); + /* xcontext validation */ + cxt->xstorm_ag_context.cdu_reserved = + CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), + CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); +} + +static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, + u8 fw_sb_id, u8 sb_index, + u8 ticks) +{ + + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index); + REG_WR8(bp, addr, ticks); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", + port, fw_sb_id, sb_index, ticks); +} + +static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, + u16 fw_sb_id, u8 sb_index, + u8 disable) +{ + u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); + u32 addr = BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index); + u16 flags = REG_RD16(bp, addr); + /* clear and set */ + flags &= ~HC_INDEX_DATA_HC_ENABLED; + flags |= enable_flag; + REG_WR16(bp, addr, flags); + DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", + port, fw_sb_id, sb_index, disable); +} + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec) +{ + int port = BP_PORT(bp); + u8 ticks = usec / BNX2X_BTR; + + storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); + + disable = disable ? 1 : (usec ? 0 : 1); + storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); +} diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 1a3545bd8a9..223bfeebc59 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -18,11 +18,15 @@ #define BNX2X_CMN_H #include <linux/types.h> +#include <linux/pci.h> #include <linux/netdevice.h> #include "bnx2x.h" +/* This is used as a replacement for an MCP if it's not present */ +extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ + extern int num_queues; /************************ Macros ********************************/ @@ -61,6 +65,73 @@ extern int num_queues; /*********************** Interfaces **************************** * Functions that need to be implemented by each driver version */ +/* Init */ + +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp); + +/** + * bnx2x_config_rss_pf - configure RSS parameters. + * + * @bp: driver handle + * @ind_table: indirection table to configure + * @config_hash: re-configure RSS hash keys configuration + */ +int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); + +/** + * bnx2x__init_func_obj - init function object + * + * @bp: driver handle + * + * Initializes the Function Object with the appropriate + * parameters which include a function slow path driver + * interface. + */ +void bnx2x__init_func_obj(struct bnx2x *bp); + +/** + * bnx2x_setup_queue - setup eth queue. + * + * @bp: driver handle + * @fp: pointer to the fastpath structure + * @leading: boolean + * + */ +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading); + +/** + * bnx2x_setup_leading - bring up a leading eth queue. + * + * @bp: driver handle + */ +int bnx2x_setup_leading(struct bnx2x *bp); + +/** + * bnx2x_fw_command - send the MCP a request + * + * @bp: driver handle + * @command: request + * @param: request's parameter + * + * block until there is a reply + */ +u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); /** * bnx2x_initial_phy_init - initialize link parameters structure variables. @@ -88,6 +159,32 @@ void bnx2x_link_set(struct bnx2x *bp); u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes); /** + * bnx2x_drv_pulse - write driver pulse to shmem + * + * @bp: driver handle + * + * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox + * in the shmem. + */ +void bnx2x_drv_pulse(struct bnx2x *bp); + +/** + * bnx2x_igu_ack_sb - update IGU with current SB value + * + * @bp: driver handle + * @igu_sb_id: SB id + * @segment: SB segment + * @index: SB index + * @op: SB operation + * @update: is HW update required + */ +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update); + +/* Disable transactions from chip to host */ +void bnx2x_pf_disable(struct bnx2x *bp); + +/** * bnx2x__link_status_update - handles link status change. * * @bp: driver handle @@ -165,21 +262,6 @@ void bnx2x_int_enable(struct bnx2x *bp); void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); /** - * bnx2x_init_firmware - loads device firmware - * - * @bp: driver handle - */ -int bnx2x_init_firmware(struct bnx2x *bp); - -/** - * bnx2x_init_hw - init HW blocks according to current initialization stage. - * - * @bp: driver handle - * @load_code: COMMON, PORT or FUNCTION - */ -int bnx2x_init_hw(struct bnx2x *bp, u32 load_code); - -/** * bnx2x_nic_init - init driver internals. * * @bp: driver handle @@ -207,16 +289,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp); void bnx2x_free_mem(struct bnx2x *bp); /** - * bnx2x_setup_client - setup eth client. - * - * @bp: driver handle - * @fp: pointer to fastpath structure - * @is_leading: boolean - */ -int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, - int is_leading); - -/** * bnx2x_set_num_queues - set number of queues according to mode. * * @bp: driver handle @@ -252,36 +324,21 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource); int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource); /** - * bnx2x_set_eth_mac - configure eth MAC address in the HW - * - * @bp: driver handle - * @set: set or clear - * - * Configures according to the value in netdev->dev_addr. - */ -void bnx2x_set_eth_mac(struct bnx2x *bp, int set); - -#ifdef BCM_CNIC -/** - * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s) + * bnx2x_release_leader_lock - release recovery leader lock * * @bp: driver handle - * @set: set or clear the CAM entry - * - * Used next enties in the CAM after the ETH MAC(s). - * This function will wait until the ramdord completion returns. - * Return 0 if cussess, -ENODEV if ramrod doesn't return. */ -int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set); +int bnx2x_release_leader_lock(struct bnx2x *bp); /** - * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC. + * bnx2x_set_eth_mac - configure eth MAC address in the HW * * @bp: driver handle * @set: set or clear + * + * Configures according to the value in netdev->dev_addr. */ -int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); -#endif +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set); /** * bnx2x_set_rx_mode - set MAC filtering configurations. @@ -289,6 +346,8 @@ int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set); * @dev: netdevice * * called with netif_tx_lock from dev_mcast.c + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev); @@ -296,25 +355,38 @@ void bnx2x_set_rx_mode(struct net_device *dev); * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW. * * @bp: driver handle + * + * If bp->state is OPEN, should be called with + * netif_addr_lock_bh(). */ void bnx2x_set_storm_rx_mode(struct bnx2x *bp); +/** + * bnx2x_set_q_rx_mode - configures rx_mode for a single queue. + * + * @bp: driver handle + * @cl_id: client id + * @rx_mode_flags: rx mode configuration + * @rx_accept_flags: rx accept configuration + * @tx_accept_flags: tx accept configuration (tx switch) + * @ramrod_flags: ramrod configuration + */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags); + /* Parity errors related */ void bnx2x_inc_load_cnt(struct bnx2x *bp); u32 bnx2x_dec_load_cnt(struct bnx2x *bp); -bool bnx2x_chk_parity_attn(struct bnx2x *bp); -bool bnx2x_reset_is_done(struct bnx2x *bp); +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print); +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine); +void bnx2x_set_reset_in_progress(struct bnx2x *bp); +void bnx2x_set_reset_global(struct bnx2x *bp); void bnx2x_disable_close_the_gate(struct bnx2x *bp); /** - * bnx2x_stats_handle - perform statistics handling according to event. - * - * @bp: driver handle - * @event: bnx2x_stats_event - */ -void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); - -/** * bnx2x_sp_event - handle ramrods completion. * * @fp: fastpath handle for the event @@ -323,15 +395,6 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe); /** - * bnx2x_func_start - init function - * - * @bp: driver handle - * - * Must be called before sending CLIENT_SETUP for the first client. - */ -int bnx2x_func_start(struct bnx2x *bp); - -/** * bnx2x_ilt_set_info - prepare ILT configurations. * * @bp: driver handle @@ -362,6 +425,10 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); * @value: new value */ void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); +/* Error handling */ +void bnx2x_panic_dump(struct bnx2x *bp); + +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); /* dev_close main block */ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); @@ -372,16 +439,25 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode); /* hard_xmit callback */ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); +/* setup_tc callback */ +int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); + /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +/* reload helper */ +int bnx2x_reload_if_running(struct net_device *dev); + int bnx2x_change_mac_addr(struct net_device *dev, void *p); /* NAPI poll Rx part */ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); + /* NAPI poll Tx part */ -int bnx2x_tx_int(struct bnx2x_fastpath *fp); +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); /* suspend/resume callbacks */ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); @@ -392,7 +468,6 @@ void bnx2x_free_irq(struct bnx2x *bp); void bnx2x_free_fp_mem(struct bnx2x *bp); int bnx2x_alloc_fp_mem(struct bnx2x *bp); - void bnx2x_init_rx_rings(struct bnx2x *bp); void bnx2x_free_skbs(struct bnx2x *bp); void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); @@ -447,6 +522,17 @@ void bnx2x_free_mem_bp(struct bnx2x *bp); */ int bnx2x_change_mtu(struct net_device *dev, int new_mtu); +#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)) +/** + * bnx2x_fcoe_get_wwn - return the requested WWN value for this port + * + * @dev: net_device + * @wwn: output buffer + * @type: WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port) + * + */ +int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); +#endif u32 bnx2x_fix_features(struct net_device *dev, u32 features); int bnx2x_set_features(struct net_device *dev, u32 features); @@ -457,19 +543,20 @@ int bnx2x_set_features(struct net_device *dev, u32 features); */ void bnx2x_tx_timeout(struct net_device *dev); +/*********************** Inlines **********************************/ +/*********************** Fast path ********************************/ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) { barrier(); /* status block is written to by the chip */ fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; } -static inline void bnx2x_update_rx_prod(struct bnx2x *bp, - struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, - u16 rx_sge_prod) +static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, + struct bnx2x_fastpath *fp, u16 bd_prod, + u16 rx_comp_prod, u16 rx_sge_prod, u32 start) { struct ustorm_eth_rx_producers rx_prods = {0}; - int i; + u32 i; /* Update producers */ rx_prods.bd_prod = bd_prod; @@ -486,10 +573,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, */ wmb(); - for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++) - REG_WR(bp, - BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4, - ((u32 *)&rx_prods)[i]); + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); mmiowb(); /* keep prod updates ordered */ @@ -519,7 +604,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, barrier(); } -static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, +static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_Pf) { u32 data, ctl, cnt = 100; @@ -527,7 +612,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; u32 sb_bit = 1 << (idu_sb_id%32); - u32 func_encode = BP_FUNC(bp) | + u32 func_encode = func | ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT); u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; @@ -590,15 +675,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, barrier(); } -static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, - u16 index, u8 op, u8 update) -{ - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; - - bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, - igu_addr); -} - static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm, u16 index, u8 op, u8 update) { @@ -653,21 +729,22 @@ static inline u16 bnx2x_ack_int(struct bnx2x *bp) return bnx2x_igu_ack_int(bp); } -static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp) +static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata) { /* Tell compiler that consumer and producer can change */ barrier(); - return fp->tx_pkt_prod != fp->tx_pkt_cons; + return txdata->tx_pkt_prod != txdata->tx_pkt_cons; } -static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) +static inline u16 bnx2x_tx_avail(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) { s16 used; u16 prod; u16 cons; - prod = fp->tx_bd_prod; - cons = fp->tx_bd_cons; + prod = txdata->tx_bd_prod; + cons = txdata->tx_bd_cons; /* NUM_TX_RINGS = number of "next-page" entries It will be used as a threshold */ @@ -675,21 +752,30 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) #ifdef BNX2X_STOP_ON_ERROR WARN_ON(used < 0); - WARN_ON(used > fp->bp->tx_ring_size); - WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); + WARN_ON(used > bp->tx_ring_size); + WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); #endif - return (s16)(fp->bp->tx_ring_size) - used; + return (s16)(bp->tx_ring_size) - used; } -static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) { u16 hw_cons; /* Tell compiler that status block fields can change */ barrier(); - hw_cons = le16_to_cpu(*fp->tx_cons_sb); - return hw_cons != fp->tx_pkt_cons; + hw_cons = le16_to_cpu(*txdata->tx_cons_sb); + return hw_cons != txdata->tx_pkt_cons; +} + +static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) +{ + u8 cos; + for_each_cos_in_tx_queue(fp, cos) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + return true; + return false; } static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) @@ -705,7 +791,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp) } /** - * disables tx from stack point of view + * bnx2x_tx_disable - disables tx from stack point of view * * @bp: driver handle */ @@ -740,7 +826,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) int i; /* Add NAPI objects */ - for_each_napi_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -749,7 +835,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } @@ -779,7 +865,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) int idx = RX_SGE_CNT * i - 1; for (j = 0; j < 2; j++) { - SGE_MASK_CLEAR_BIT(fp, idx); + BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } @@ -789,7 +875,7 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp) { /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */ memset(fp->sge_mask, 0xff, - (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); + (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64)); /* Clear the two last indices in the page to 1: these are the indices that correspond to the "next" element, @@ -861,22 +947,69 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, u16 cons, u16 prod) { - struct bnx2x *bp = fp->bp; struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons]; struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod]; struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; - dma_sync_single_for_device(&bp->pdev->dev, - dma_unmap_addr(cons_rx_buf, mapping), - RX_COPY_THRESH, DMA_FROM_DEVICE); - - prod_rx_buf->skb = cons_rx_buf->skb; dma_unmap_addr_set(prod_rx_buf, mapping, dma_unmap_addr(cons_rx_buf, mapping)); + prod_rx_buf->skb = cons_rx_buf->skb; *prod_bd = *cons_bd; } +/************************* Init ******************************************/ + +/** + * bnx2x_func_start - init function + * + * @bp: driver handle + * + * Must be called before sending CLIENT_SETUP for the first client. + */ +static inline int bnx2x_func_start(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_start_params *start_params = + &func_params.params.start; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_START; + + /* Function parameters */ + start_params->mf_mode = bp->mf_mode; + start_params->sd_vlan_tag = bp->mf_ov; + if (CHIP_IS_E1x(bp)) + start_params->network_cos_mode = OVERRIDE_COS; + else + start_params->network_cos_mode = STATIC_COS; + + return bnx2x_func_state_change(bp, &func_params); +} + + +/** + * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format + * + * @fw_hi: pointer to upper part + * @fw_mid: pointer to middle part + * @fw_lo: pointer to lower part + * @mac: pointer to MAC address + */ +static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo, + u8 *mac) +{ + ((u8 *)fw_hi)[0] = mac[1]; + ((u8 *)fw_hi)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lo)[0] = mac[5]; + ((u8 *)fw_lo)[1] = mac[4]; +} + static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, struct bnx2x_fastpath *fp, int last) { @@ -895,57 +1028,58 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, int i; for (i = 0; i < last; i++) { - struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]); - struct sk_buff *skb = rx_buf->skb; + struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i]; + struct sw_rx_bd *first_buf = &tpa_info->first_buf; + struct sk_buff *skb = first_buf->skb; if (skb == NULL) { DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i); continue; } - - if (fp->tpa_state[i] == BNX2X_TPA_START) + if (tpa_info->tpa_state == BNX2X_TPA_START) dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(rx_buf, mapping), + dma_unmap_addr(first_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - dev_kfree_skb(skb); - rx_buf->skb = NULL; + first_buf->skb = NULL; } } -static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp) +static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata) { int i; for (i = 1; i <= NUM_TX_RINGS; i++) { struct eth_tx_next_bd *tx_next_bd = - &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; + &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd; tx_next_bd->addr_hi = - cpu_to_le32(U64_HI(fp->tx_desc_mapping + + cpu_to_le32(U64_HI(txdata->tx_desc_mapping + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); tx_next_bd->addr_lo = - cpu_to_le32(U64_LO(fp->tx_desc_mapping + + cpu_to_le32(U64_LO(txdata->tx_desc_mapping + BCM_PAGE_SIZE*(i % NUM_TX_RINGS))); } - SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); - fp->tx_db.data.zero_fill1 = 0; - fp->tx_db.data.prod = 0; + SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1); + txdata->tx_db.data.zero_fill1 = 0; + txdata->tx_db.data.prod = 0; - fp->tx_pkt_prod = 0; - fp->tx_pkt_cons = 0; - fp->tx_bd_prod = 0; - fp->tx_bd_cons = 0; - fp->tx_pkt = 0; + txdata->tx_pkt_prod = 0; + txdata->tx_pkt_cons = 0; + txdata->tx_bd_prod = 0; + txdata->tx_bd_cons = 0; + txdata->tx_pkt = 0; } static inline void bnx2x_init_tx_rings(struct bnx2x *bp) { int i; + u8 cos; for_each_tx_queue(bp, i) - bnx2x_init_tx_ring_one(&bp->fp[i]); + for_each_cos_in_tx_queue(&bp->fp[i], cos) + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); } static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) @@ -1038,31 +1172,220 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, return i - fp->eth_q_stats.rx_skb_alloc_failed; } +/* Statistics ID are global per chip/path, while Client IDs for E1x are per + * port. + */ +static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp) +{ + if (!CHIP_IS_E1x(fp->bp)) + return fp->cl_id; + else + return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x; +} + +static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, + bnx2x_obj_type obj_type) +{ + struct bnx2x *bp = fp->bp; + + /* Configure classification DBs */ + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, obj_type, + &bp->macs_pool); +} + +/** + * bnx2x_get_path_func_num - get number of active functions + * + * @bp: driver handle + * + * Calculates the number of active (not hidden) functions on the + * current path. + */ +static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) +{ + u8 func_num = 0, i; + + /* 57710 has only one function per-port */ + if (CHIP_IS_E1(bp)) + return 1; + + /* Calculate a number of functions enabled on the current + * PATH/PORT. + */ + if (CHIP_REV_IS_SLOW(bp)) { + if (IS_MF(bp)) + func_num = 4; + else + func_num = 2; + } else { + for (i = 0; i < E1H_FUNC_MAX / 2; i++) { + u32 func_config = + MF_CFG_RD(bp, + func_mf_config[BP_PORT(bp) + 2 * i]. + config); + func_num += + ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); + } + } + + WARN_ON(!func_num); + + return func_num; +} + +static inline void bnx2x_init_bp_objs(struct bnx2x *bp) +{ + /* RX_MODE controlling object */ + bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj); + + /* multicast configuration controlling object */ + bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid, + BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, mcast_rdata), + bnx2x_sp_mapping(bp, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); + + /* Setup CAM credit pools */ + bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp), + bnx2x_get_path_func_num(bp)); + + /* RSS configuration object */ + bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id, + bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp), + bnx2x_sp(bp, rss_rdata), + bnx2x_sp_mapping(bp, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state, + BNX2X_OBJ_TYPE_RX); +} + +static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H; + else + return fp->cl_id; +} + +static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) +{ + struct bnx2x *bp = fp->bp; + + if (!CHIP_IS_E1x(bp)) + return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); + else + return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); +} + +static inline void bnx2x_init_txdata(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) +{ + txdata->cid = cid; + txdata->txq_index = txq_index; + txdata->tx_cons_sb = tx_cons_sb; + + DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d", + txdata->cid, txdata->txq_index); +} + #ifdef BCM_CNIC +static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) +{ + return bp->cnic_base_cl_id + cl_idx + + (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; +} + +static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) +{ + + /* the 'first' id is allocated for the cnic */ + return bp->base_fw_ndsb; +} + +static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp) +{ + return bp->igu_base_sb; +} + + static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) { - bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID + - BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; + struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); + unsigned long q_type = 0; + + bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, + BNX2X_FCOE_ETH_CL_ID_IDX); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; - bnx2x_fcoe(bp, bp) = bp; - bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; - bnx2x_fcoe(bp, index) = FCOE_IDX; bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX; + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); + + DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index); + /* qZone id equals to FW (per path) client id */ - bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) + - BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : - ETH_MAX_RX_CLIENTS_E1H); + bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp); /* init shortcut */ - bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ? - USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) : - USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id); + bnx2x_fcoe(bp, ustorm_rx_prods_offset) = + bnx2x_rx_ustorm_prods_offset(fp); + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + /* No multi-CoS for FCoE L2 client */ + BUG_ON(fp->max_cos != 1); + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d " + "igu_sb %d\n", + fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, + fp->igu_sb_id); } #endif +static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, + struct bnx2x_fp_txdata *txdata) +{ + int cnt = 1000; + + while (bnx2x_has_tx_work_unload(txdata)) { + if (!cnt) { + BNX2X_ERR("timeout waiting for queue[%d]: " + "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n", + txdata->txq_index, txdata->tx_pkt_prod, + txdata->tx_pkt_cons); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); + return -EBUSY; +#else + break; +#endif + } + cnt--; + usleep_range(1000, 1000); + } + + return 0; +} + +int bnx2x_get_link_cfg_idx(struct bnx2x *bp); + static inline void __storm_memset_struct(struct bnx2x *bp, u32 addr, size_t size, u32 *data) { @@ -1071,42 +1394,78 @@ static inline void __storm_memset_struct(struct bnx2x *bp, REG_WR(bp, addr + (i * 4), data[i]); } -static inline void storm_memset_mac_filters(struct bnx2x *bp, - struct tstorm_eth_mac_filter_config *mac_filters, - u16 abs_fid) +static inline void storm_memset_func_cfg(struct bnx2x *bp, + struct tstorm_eth_function_common_config *tcfg, + u16 abs_fid) { - size_t size = sizeof(struct tstorm_eth_mac_filter_config); + size_t size = sizeof(struct tstorm_eth_function_common_config); u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid); + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); - __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); + __storm_memset_struct(bp, addr, size, (u32 *)tcfg); } static inline void storm_memset_cmng(struct bnx2x *bp, struct cmng_struct_per_port *cmng, u8 port) { - size_t size = - sizeof(struct rate_shaping_vars_per_port) + - sizeof(struct fairness_vars_per_port) + - sizeof(struct safc_struct_per_port) + - sizeof(struct pfc_struct_per_port); + size_t size = sizeof(struct cmng_struct_per_port); u32 addr = BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port); __storm_memset_struct(bp, addr, size, (u32 *)cmng); +} - addr += size + 4 /* SKIP DCB+LLFC */; - size = sizeof(struct cmng_struct_per_port) - - size /* written */ - 4 /*skipped*/; +/** + * bnx2x_wait_sp_comp - wait for the outstanding SP commands. + * + * @bp: driver handle + * @mask: bits that need to be cleared + */ +static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask) +{ + int tout = 5000; /* Wait for 5 secs tops */ + + while (tout--) { + smp_mb(); + netif_addr_lock_bh(bp->dev); + if (!(bp->sp_state & mask)) { + netif_addr_unlock_bh(bp->dev); + return true; + } + netif_addr_unlock_bh(bp->dev); + + usleep_range(1000, 1000); + } + + smp_mb(); - __storm_memset_struct(bp, addr, size, - (u32 *)(cmng->traffic_type_to_priority_cos)); + netif_addr_lock_bh(bp->dev); + if (bp->sp_state & mask) { + BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, " + "mask 0x%lx\n", bp->sp_state, mask); + netif_addr_unlock_bh(bp->dev); + return false; + } + netif_addr_unlock_bh(bp->dev); + + return true; } -/* HW Lock for shared dual port PHYs */ +/** + * bnx2x_set_ctx_validation - set CDU context validation values + * + * @bp: driver handle + * @cxt: context of the connection on the host memory + * @cid: SW CID of the connection to be configured + */ +void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, + u32 cid); + +void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id, + u8 sb_index, u8 disable, u16 usec); void bnx2x_acquire_phy_lock(struct bnx2x *bp); void bnx2x_release_phy_lock(struct bnx2x *bp); diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index 410a49e571a..a4ea35f6a45 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c @@ -19,20 +19,18 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/errno.h> -#ifdef BCM_DCBNL -#include <linux/dcbnl.h> -#endif +#include <linux/rtnetlink.h> +#include <net/dcbnl.h> #include "bnx2x.h" #include "bnx2x_cmn.h" #include "bnx2x_dcb.h" - /* forward declarations of dcbx related functions */ -static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp); static void bnx2x_pfc_set_pfc(struct bnx2x *bp); static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); -static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp); static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, u32 *set_configuration_ets_pg, u32 *pri_pg_tbl); @@ -47,34 +45,56 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, struct cos_help_data *cos_data, u32 *pg_pri_orginal_spread, struct dcbx_ets_feature *ets); -static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp); +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params*); + +/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */ +static void bnx2x_read_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + *buff = REG_RD(bp, addr + i); +} +static void bnx2x_write_data(struct bnx2x *bp, u32 *buff, + u32 addr, u32 len) +{ + int i; + for (i = 0; i < len; i += 4, buff++) + REG_WR(bp, addr + i, *buff); +} static void bnx2x_pfc_set(struct bnx2x *bp) { struct bnx2x_nig_brb_pfc_port_params pfc_params = {0}; u32 pri_bit, val = 0; - u8 pri; + int i; - /* Tx COS configuration */ - if (bp->dcbx_port_params.ets.cos_params[0].pauseable) - pfc_params.rx_cos0_priority_mask = - bp->dcbx_port_params.ets.cos_params[0].pri_bitmask; - if (bp->dcbx_port_params.ets.cos_params[1].pauseable) - pfc_params.rx_cos1_priority_mask = - bp->dcbx_port_params.ets.cos_params[1].pri_bitmask; + pfc_params.num_of_rx_cos_priority_mask = + bp->dcbx_port_params.ets.num_of_cos; + /* Tx COS configuration */ + for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++) + /* + * We configure only the pauseable bits (non pauseable aren't + * configured at all) it's done to avoid false pauses from + * network + */ + pfc_params.rx_cos_priority_mask[i] = + bp->dcbx_port_params.ets.cos_params[i].pri_bitmask + & DCBX_PFC_PRI_PAUSE_MASK(bp); - /** + /* * Rx COS configuration * Changing PFC RX configuration . * In RX COS0 will always be configured to lossy and COS1 to lossless */ - for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) { - pri_bit = 1 << pri; + for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) { + pri_bit = 1 << i; if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp)) - val |= 1 << (pri * 4); + val |= 1 << (i * 4); } pfc_params.pkt_priority_to_cos = val; @@ -200,7 +220,11 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n"); - if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) { + if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH)) + DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n"); + + if (app->enabled && + !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) { bp->dcbx_port_params.app.enabled = true; @@ -253,12 +277,11 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp, /* Clean up old settings of ets on COS */ - for (i = 0; i < E2_NUM_OF_COS ; i++) { - + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) { cos_params[i].pauseable = false; - cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT; + cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID; cos_params[i].bw_tbl = DCBX_INVALID_COS_BW; - cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0); + cos_params[i].pri_bitmask = 0; } if (bp->dcbx_port_params.app.enabled && @@ -296,7 +319,7 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n"); if (bp->dcbx_port_params.app.enabled && - !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) && + !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) && pfc->enabled) { bp->dcbx_port_params.pfc.enabled = true; bp->dcbx_port_params.pfc.priority_non_pauseable_mask = @@ -308,6 +331,32 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp, } } +/* maps unmapped priorities to to the same COS as L2 */ +static void bnx2x_dcbx_map_nw(struct bnx2x *bp) +{ + int i; + u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */ + u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW]; + struct bnx2x_dcbx_cos_params *cos_params = + bp->dcbx_port_params.ets.cos_params; + + /* get unmapped priorities by clearing mapped bits */ + for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) + unmapped &= ~(1 << ttp[i]); + + /* find cos for nw prio and extend it with unmapped */ + for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) { + if (cos_params[i].pri_bitmask & nw_prio) { + /* extend the bitmask with unmapped */ + DP(NETIF_MSG_LINK, + "cos %d extended with 0x%08x", i, unmapped); + cos_params[i].pri_bitmask |= unmapped; + break; + } + } +} + static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, struct dcbx_features *features, u32 error) @@ -317,6 +366,8 @@ static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp, bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error); bnx2x_dcbx_get_ets_feature(bp, &features->ets, error); + + bnx2x_dcbx_map_nw(bp); } #define DCBX_LOCAL_MIB_MAX_TRY_READ (100) @@ -325,8 +376,8 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, u32 offset, int read_mib_type) { - int max_try_read = 0, i; - u32 *buff, mib_size, prefix_seq_num, suffix_seq_num; + int max_try_read = 0; + u32 mib_size, prefix_seq_num, suffix_seq_num; struct lldp_remote_mib *remote_mib ; struct lldp_local_mib *local_mib; @@ -345,9 +396,7 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, offset += BP_PORT(bp) * mib_size; do { - buff = base_mib_addr; - for (i = 0; i < mib_size; i += 4, buff++) - *buff = REG_RD(bp, offset + i); + bnx2x_read_data(bp, base_mib_addr, offset, mib_size); max_try_read++; @@ -378,60 +427,50 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp, static void bnx2x_pfc_set_pfc(struct bnx2x *bp) { - if (CHIP_IS_E2(bp)) { - if (BP_PORT(bp)) { - BNX2X_ERR("4 port mode is not supported"); - return; - } + if (bp->dcbx_port_params.pfc.enabled && + !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + /* + * 1. Fills up common PFC structures if required + * 2. Configure NIG, MAC and BRB via the elink + */ + bnx2x_pfc_set(bp); + else + bnx2x_pfc_clear(bp); +} - if (bp->dcbx_port_params.pfc.enabled) +static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {0}; - /* 1. Fills up common PFC structures if required.*/ - /* 2. Configure NIG, MAC and BRB via the elink: - * elink must first check if BMAC is not in reset - * and only then configures the BMAC - * Or, configure EMAC. - */ - bnx2x_pfc_set(bp); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_STOP; - else - bnx2x_pfc_clear(bp); - } + DP(NETIF_MSG_LINK, "STOP TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); } -static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) +static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) { - DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n"); - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, - 0 /* connectionless */, - 0 /* dataHi is zero */, - 0 /* dataLo is zero */, - 1 /* common */); -} + struct bnx2x_func_state_params func_params = {0}; + struct bnx2x_func_tx_start_params *tx_params = + &func_params.params.tx_start; -static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) -{ - bnx2x_pfc_fw_struct_e2(bp); - DP(NETIF_MSG_LINK, "sending START TRAFFIC\n"); - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, - 0, /* connectionless */ - U64_HI(bnx2x_sp_mapping(bp, pfc_config)), - U64_LO(bnx2x_sp_mapping(bp, pfc_config)), - 1 /* commmon */); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_TX_START; + + bnx2x_dcbx_fw_struct(bp, tx_params); + + DP(NETIF_MSG_LINK, "START TRAFFIC\n"); + return bnx2x_func_state_change(bp, &func_params); } -static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) +static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) { struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); - u8 status = 0; - - bnx2x_ets_disabled(&bp->link_params); - - if (!ets->enabled) - return; + int rc = 0; - if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) { - BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos); + if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) { + BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos); return; } @@ -440,9 +479,9 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) return; /* sanity */ - if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) && + if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) && (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) || - ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) && + ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) && (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) { BNX2X_ERR("all COS should have at least bw_limit or strict" "ets->cos_params[0].strict= %x" @@ -474,17 +513,71 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1); } else { - if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT) - status = bnx2x_ets_strict(&bp->link_params, 0); + if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 0); else if (ets->cos_params[1].strict - == BNX2X_DCBX_COS_HIGH_STRICT) - status = bnx2x_ets_strict(&bp->link_params, 1); - - if (status) + == BNX2X_DCBX_STRICT_COS_HIGHEST) + rc = bnx2x_ets_strict(&bp->link_params, 1); + if (rc) BNX2X_ERR("update_ets_params failed\n"); } } +/* + * In E3B0 the configuration may have more than 2 COS. + */ +void bnx2x_dcbx_update_ets_config(struct bnx2x *bp) +{ + struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets); + struct bnx2x_ets_params ets_params = { 0 }; + u8 i; + + ets_params.num_of_cos = ets->num_of_cos; + + for (i = 0; i < ets->num_of_cos; i++) { + /* COS is SP */ + if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) { + if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + + ets_params.cos[i].state = bnx2x_cos_state_strict; + ets_params.cos[i].params.sp_params.pri = + ets->cos_params[i].strict; + } else { /* COS is BW */ + if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) { + BNX2X_ERR("COS can't be not BW and not SP\n"); + return; + } + ets_params.cos[i].state = bnx2x_cos_state_bw; + ets_params.cos[i].params.bw_params.bw = + (u8)ets->cos_params[i].bw_tbl; + } + } + + /* Configure the ETS in HW */ + if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars, + &ets_params)) { + BNX2X_ERR("bnx2x_ets_e3b0_config failed\n"); + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + } +} + +static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) +{ + bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); + + if (!bp->dcbx_port_params.ets.enabled || + (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) + return; + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_update_ets_config(bp); + else + bnx2x_dcbx_2cos_limit_update_ets_config(bp); +} + #ifdef BCM_DCBNL static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp) { @@ -527,6 +620,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n"); return -EINVAL; } + rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset, DCBX_READ_LOCAL_MIB); @@ -563,15 +657,6 @@ u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) DCB_APP_IDTYPE_ETHTYPE; } -static inline -void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp) -{ - int i; - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) - bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &= - ~DCBX_APP_ENTRY_VALID; -} - int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) { int i, err = 0; @@ -597,32 +682,50 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) } #endif -void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) +static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) { - switch (state) { - case BNX2X_DCBX_STATE_NEG_RECEIVED: -#ifdef BCM_CNIC - if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { - struct cnic_ops *c_ops; - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) { - bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD); - rcu_read_unlock(); - return; + if (SHMEM2_HAS(bp, drv_flags)) { + u32 drv_flags; + bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS); + drv_flags = SHMEM2_RD(bp, drv_flags); + + if (set) + SET_FLAGS(drv_flags, flags); + else + RESET_FLAGS(drv_flags, flags); + + SHMEM2_WR(bp, drv_flags, drv_flags); + DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags); + bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS); + } +} + +static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp) +{ + u8 prio, cos; + for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) { + for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) { + if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask + & (1 << prio)) { + bp->prio_to_cos[prio] = cos; + DP(NETIF_MSG_LINK, + "tx_mapping %d --> %d\n", prio, cos); } - rcu_read_unlock(); } + } - /* fall through if no CNIC initialized */ - case BNX2X_DCBX_STATE_ISCSI_STOPPED: -#endif + /* setup tc must be called under rtnl lock, but we can't take it here + * as we are handling an attetntion on a work queue which must be + * flushed at some rtnl-locked contexts (e.g. if down) + */ + if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + schedule_delayed_work(&bp->sp_rtnl_task, 0); +} +void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) +{ + switch (state) { + case BNX2X_DCBX_STATE_NEG_RECEIVED: { DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); #ifdef BCM_DCBNL @@ -646,102 +749,53 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, bp->dcbx_error); - if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { -#ifdef BCM_DCBNL - /** - * Add new app tlvs to dcbnl - */ - bnx2x_dcbnl_update_applist(bp, false); -#endif - bnx2x_dcbx_stop_hw_tx(bp); - return; - } - /* fall through */ + /* mark DCBX result for PMF migration */ + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1); #ifdef BCM_DCBNL /** - * Invalidate the local app tlvs if they are not added - * to the dcbnl app list to avoid deleting them from - * the list later on + * Add new app tlvs to dcbnl */ - bnx2x_dcbx_invalidate_local_apps(bp); + bnx2x_dcbnl_update_applist(bp, false); #endif + bnx2x_dcbx_stop_hw_tx(bp); + + /* reconfigure the netdevice with the results of the new + * dcbx negotiation. + */ + bnx2x_dcbx_update_tc_mapping(bp); + + return; } case BNX2X_DCBX_STATE_TX_PAUSED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); bnx2x_pfc_set_pfc(bp); bnx2x_dcbx_update_ets_params(bp); - if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { - bnx2x_dcbx_resume_hw_tx(bp); - return; - } - /* fall through */ + bnx2x_dcbx_resume_hw_tx(bp); + return; case BNX2X_DCBX_STATE_TX_RELEASED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n"); - if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) - bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); - + bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0); +#ifdef BCM_DCBNL + /* + * Send a notification for the new negotiated parameters + */ + dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0); +#endif return; default: BNX2X_ERR("Unknown DCBX_STATE\n"); } } - -#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\ - sizeof(struct lldp_dcbx_stat)) - -/* calculate struct offset in array according to chip information */ -#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params)) - #define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \ BP_PORT(bp)*sizeof(struct lldp_admin_mib)) -static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp, - u32 dcbx_lldp_params_offset) -{ - struct lldp_params lldp_params = {0}; - u32 i = 0, *buff = NULL; - u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp); - - DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset); - - if ((bp->lldp_config_params.overwrite_settings == - BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) { - /* Read the data first */ - buff = (u32 *)&lldp_params; - for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++) - *buff = REG_RD(bp, (offset + i)); - - lldp_params.msg_tx_hold = - (u8)bp->lldp_config_params.msg_tx_hold; - lldp_params.msg_fast_tx_interval = - (u8)bp->lldp_config_params.msg_fast_tx; - lldp_params.tx_crd_max = - (u8)bp->lldp_config_params.tx_credit_max; - lldp_params.msg_tx_interval = - (u8)bp->lldp_config_params.msg_tx_interval; - lldp_params.tx_fast = - (u8)bp->lldp_config_params.tx_fast; - - /* Write the data.*/ - buff = (u32 *)&lldp_params; - for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++) - REG_WR(bp, (offset + i) , *buff); - - - } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE == - bp->lldp_config_params.overwrite_settings) - bp->lldp_config_params.overwrite_settings = - BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID; -} - static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, u32 dcbx_lldp_params_offset) { struct lldp_admin_mib admin_mib; u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0; - u32 *buff; u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp); /*shortcuts*/ @@ -749,18 +803,18 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params; memset(&admin_mib, 0, sizeof(struct lldp_admin_mib)); - buff = (u32 *)&admin_mib; + /* Read the data first */ - for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++) - *buff = REG_RD(bp, (offset + i)); + bnx2x_read_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON) SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); else RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED); - if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE == - dp->overwrite_settings)) { + if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) { + RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK); admin_mib.ver_cfg_flags |= (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) & @@ -856,19 +910,17 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, af->app.default_pri = (u8)dp->admin_default_priority; - } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE == - dp->overwrite_settings) - dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID; + } /* Write the data. */ - buff = (u32 *)&admin_mib; - for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++) - REG_WR(bp, (offset + i), *buff); + bnx2x_write_data(bp, (u32 *)&admin_mib, offset, + sizeof(struct lldp_admin_mib)); + } void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { - if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) { + if (!CHIP_IS_E1x(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { @@ -966,7 +1018,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp) DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); - if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && + if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); @@ -974,56 +1026,21 @@ void bnx2x_dcbx_init(struct bnx2x *bp) DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n", dcbx_lldp_params_offset); - if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { - bnx2x_dcbx_lldp_updated_params(bp, - dcbx_lldp_params_offset); + bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0); + if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { bnx2x_dcbx_admin_mib_updated_params(bp, dcbx_lldp_params_offset); - /* set default configuration BC has */ - bnx2x_dcbx_set_params(bp, - BNX2X_DCBX_STATE_NEG_RECEIVED); - + /* Let HW start negotiation */ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); } } } - -void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp) -{ - struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES]; - u32 i = 0, addr; - memset(pricos, 0, sizeof(pricos)); - /* Default initialization */ - for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++) - pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; - - /* Store per port struct to internal memory */ - addr = BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + - offsetof(struct cmng_struct_per_port, - traffic_type_to_priority_cos); - __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos); - - - /* LLFC disabled.*/ - REG_WR8(bp , BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + - offsetof(struct cmng_struct_per_port, llfc_mode), - LLFC_MODE_NONE); - - /* DCBX disabled.*/ - REG_WR8(bp , BAR_XSTRORM_INTMEM + - XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) + - offsetof(struct cmng_struct_per_port, dcb_enabled), - DCB_DISABLED); -} - static void bnx2x_dcbx_print_cos_params(struct bnx2x *bp, - struct flow_control_configuration *pfc_fw_cfg) + struct bnx2x_func_tx_start_params *pfc_fw_cfg) { u8 pri = 0; u8 cos = 0; @@ -1171,7 +1188,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, /* If we join a group and one is strict * than the bw rulls */ cos_data->data[entry].strict = - BNX2X_DCBX_COS_HIGH_STRICT; + BNX2X_DCBX_STRICT_COS_HIGHEST; } if ((0 == cos_data->data[0].pri_join_mask) && (0 == cos_data->data[1].pri_join_mask)) @@ -1183,7 +1200,7 @@ static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp, #define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1)))) #endif -static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, +static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp, struct pg_help_data *pg_help_data, struct cos_help_data *cos_data, u32 pri_join_mask, @@ -1263,14 +1280,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) > DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) { cos_data->data[0].strict = - BNX2X_DCBX_COS_HIGH_STRICT; + BNX2X_DCBX_STRICT_COS_HIGHEST; cos_data->data[1].strict = - BNX2X_DCBX_COS_LOW_STRICT; + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); } else { cos_data->data[0].strict = - BNX2X_DCBX_COS_LOW_STRICT; + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); cos_data->data[1].strict = - BNX2X_DCBX_COS_HIGH_STRICT; + BNX2X_DCBX_STRICT_COS_HIGHEST; } /* Pauseable */ cos_data->data[0].pausable = true; @@ -1306,13 +1325,16 @@ static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp, * and that with the highest priority * gets the highest strict priority in the arbiter. */ - cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT; - cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT; + cos_data->data[0].strict = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI( + BNX2X_DCBX_STRICT_COS_HIGHEST); + cos_data->data[1].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; } } } -static void bnx2x_dcbx_two_pg_to_cos_params( +static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, @@ -1322,7 +1344,7 @@ static void bnx2x_dcbx_two_pg_to_cos_params( u8 num_of_dif_pri) { u8 i = 0; - u8 pg[E2_NUM_OF_COS] = {0}; + u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 }; /* If there are both pauseable and non-pauseable priorities, * the pauseable priorities go to the first queue and @@ -1378,16 +1400,68 @@ static void bnx2x_dcbx_two_pg_to_cos_params( } /* There can be only one strict pg */ - for (i = 0 ; i < E2_NUM_OF_COS; i++) { + for (i = 0 ; i < ARRAY_SIZE(pg); i++) { if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES) cos_data->data[i].cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]); else - cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT; + cos_data->data[i].strict = + BNX2X_DCBX_STRICT_COS_HIGHEST; } } -static void bnx2x_dcbx_three_pg_to_cos_params( +static int bnx2x_dcbx_join_pgs( + struct bnx2x *bp, + struct dcbx_ets_feature *ets, + struct pg_help_data *pg_help_data, + u8 required_num_of_pg) +{ + u8 entry_joined = pg_help_data->num_of_pg - 1; + u8 entry_removed = entry_joined + 1; + u8 pg_joined = 0; + + if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data) + <= pg_help_data->num_of_pg) { + + BNX2X_ERR("required_num_of_pg can't be zero\n"); + return -EINVAL; + } + + while (required_num_of_pg < pg_help_data->num_of_pg) { + entry_joined = pg_help_data->num_of_pg - 2; + entry_removed = entry_joined + 1; + /* protect index */ + entry_removed %= ARRAY_SIZE(pg_help_data->data); + + pg_help_data->data[entry_joined].pg_priority |= + pg_help_data->data[entry_removed].pg_priority; + + pg_help_data->data[entry_joined].num_of_dif_pri += + pg_help_data->data[entry_removed].num_of_dif_pri; + + if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG || + pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG) + /* Entries joined strict priority rules */ + pg_help_data->data[entry_joined].pg = + DCBX_STRICT_PRI_PG; + else { + /* Entries can be joined join BW */ + pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg) + + DCBX_PG_BW_GET(ets->pg_bw_tbl, + pg_help_data->data[entry_removed].pg); + + DCBX_PG_BW_SET(ets->pg_bw_tbl, + pg_help_data->data[entry_joined].pg, pg_joined); + } + /* Joined the entries */ + pg_help_data->num_of_pg--; + } + + return 0; +} + +static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( struct bnx2x *bp, struct pg_help_data *pg_help_data, struct dcbx_ets_feature *ets, @@ -1459,102 +1533,271 @@ static void bnx2x_dcbx_three_pg_to_cos_params( /* If we join a group and one is strict * than the bw rulls */ cos_data->data[1].strict = - BNX2X_DCBX_COS_HIGH_STRICT; + BNX2X_DCBX_STRICT_COS_HIGHEST; } } } } -static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, +static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp, struct pg_help_data *help_data, struct dcbx_ets_feature *ets, - u32 *pg_pri_orginal_spread) + struct cos_help_data *cos_data, + u32 *pg_pri_orginal_spread, + u32 pri_join_mask, + u8 num_of_dif_pri) { - struct cos_help_data cos_data ; - u8 i = 0; - u32 pri_join_mask = 0; - u8 num_of_dif_pri = 0; - - memset(&cos_data, 0, sizeof(cos_data)); - /* Validate the pg value */ - for (i = 0; i < help_data->num_of_pg ; i++) { - if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && - DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) - BNX2X_ERR("Invalid pg[%d] data %x\n", i, - help_data->data[i].pg); - pri_join_mask |= help_data->data[i].pg_priority; - num_of_dif_pri += help_data->data[i].num_of_dif_pri; - } - /* default settings */ - cos_data.num_of_cos = 2; - for (i = 0; i < E2_NUM_OF_COS ; i++) { - cos_data.data[i].pri_join_mask = pri_join_mask; - cos_data.data[i].pausable = false; - cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT; - cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; - } + /* default E2 settings */ + cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2; switch (help_data->num_of_pg) { case 1: - - bxn2x_dcbx_single_pg_to_cos_params( + bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params( bp, help_data, - &cos_data, + cos_data, pri_join_mask, num_of_dif_pri); break; case 2: - bnx2x_dcbx_two_pg_to_cos_params( + bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params( bp, help_data, ets, - &cos_data, + cos_data, pg_pri_orginal_spread, pri_join_mask, num_of_dif_pri); break; case 3: - bnx2x_dcbx_three_pg_to_cos_params( + bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params( bp, help_data, ets, - &cos_data, + cos_data, pg_pri_orginal_spread, pri_join_mask, num_of_dif_pri); - break; default: BNX2X_ERR("Wrong pg_help_data.num_of_pg\n"); bnx2x_dcbx_ets_disabled_entry_data(bp, - &cos_data, pri_join_mask); + cos_data, pri_join_mask); + } +} + +static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST; + u8 num_of_app_pri = MAX_PFC_PRIORITIES; + u8 app_pri_bit = 0; + + while (num_spread_of_entries && num_of_app_pri > 0) { + app_pri_bit = 1 << (num_of_app_pri - 1); + if (app_pri_bit & strict_app_pris) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + num_spread_of_entries--; + if (num_spread_of_entries == 0) { + /* last entry needed put all the entries left */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } else { + strict_app_pris &= ~app_pri_bit; + + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = strict_pri; + data->pri_join_mask = app_pri_bit; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + } + + strict_pri = + BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri); + entry++; + } + + num_of_app_pri--; + } + + if (num_spread_of_entries) + return -EINVAL; + + return 0; +} + +static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp, + struct cos_help_data *cos_data, + u8 entry, + u8 num_spread_of_entries, + u8 strict_app_pris) +{ + + if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry, + num_spread_of_entries, + strict_app_pris)) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_INVALID_COS_BW; + data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST; + data->pri_join_mask = strict_app_pris; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + return 1; + } + + return num_spread_of_entries; +} + +static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + struct cos_help_data *cos_data, + u32 pri_join_mask) + +{ + u8 need_num_of_entries = 0; + u8 i = 0; + u8 entry = 0; + + /* + * if the number of requested PG-s in CEE is greater than 3 + * then the results are not determined since this is a violation + * of the standard. + */ + if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) { + if (bnx2x_dcbx_join_pgs(bp, ets, help_data, + DCBX_COS_MAX_NUM_E3B0)) { + BNX2X_ERR("Unable to reduce the number of PGs -" + "we will disables ETS\n"); + bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, + pri_join_mask); + return; + } + } + + for (i = 0 ; i < help_data->num_of_pg; i++) { + struct pg_entry_help_data *pg = &help_data->data[i]; + if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) { + struct cos_entry_help_data *data = &cos_data-> + data[entry]; + /* Fill BW entry */ + data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg); + data->strict = BNX2X_DCBX_STRICT_INVALID; + data->pri_join_mask = pg->pg_priority; + data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp, + data->pri_join_mask); + + entry++; + } else { + need_num_of_entries = min_t(u8, + (u8)pg->num_of_dif_pri, + (u8)DCBX_COS_MAX_NUM_E3B0 - + help_data->num_of_pg + 1); + /* + * If there are still VOQ-s which have no associated PG, + * then associate these VOQ-s to PG15. These PG-s will + * be used for SP between priorities on PG15. + */ + entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data, + entry, need_num_of_entries, pg->pg_priority); + } } + /* the entry will represent the number of COSes used */ + cos_data->num_of_cos = entry; +} +static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp, + struct pg_help_data *help_data, + struct dcbx_ets_feature *ets, + u32 *pg_pri_orginal_spread) +{ + struct cos_help_data cos_data; + u8 i = 0; + u32 pri_join_mask = 0; + u8 num_of_dif_pri = 0; + + memset(&cos_data, 0, sizeof(cos_data)); + + /* Validate the pg value */ + for (i = 0; i < help_data->num_of_pg ; i++) { + if (DCBX_STRICT_PRIORITY != help_data->data[i].pg && + DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg) + BNX2X_ERR("Invalid pg[%d] data %x\n", i, + help_data->data[i].pg); + pri_join_mask |= help_data->data[i].pg_priority; + num_of_dif_pri += help_data->data[i].num_of_dif_pri; + } + + /* defaults */ + cos_data.num_of_cos = 1; + for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) { + cos_data.data[i].pri_join_mask = 0; + cos_data.data[i].pausable = false; + cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID; + cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW; + } + + if (CHIP_IS_E3B0(bp)) + bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets, + &cos_data, pri_join_mask); + else /* E2 + E3A0 */ + bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp, + help_data, ets, + &cos_data, + pg_pri_orginal_spread, + pri_join_mask, + num_of_dif_pri); + for (i = 0; i < cos_data.num_of_cos ; i++) { - struct bnx2x_dcbx_cos_params *params = + struct bnx2x_dcbx_cos_params *p = &bp->dcbx_port_params.ets.cos_params[i]; - params->pauseable = cos_data.data[i].pausable; - params->strict = cos_data.data[i].strict; - params->bw_tbl = cos_data.data[i].cos_bw; - if (params->pauseable) { - params->pri_bitmask = - DCBX_PFC_PRI_GET_PAUSE(bp, - cos_data.data[i].pri_join_mask); + p->strict = cos_data.data[i].strict; + p->bw_tbl = cos_data.data[i].cos_bw; + p->pri_bitmask = cos_data.data[i].pri_join_mask; + p->pauseable = cos_data.data[i].pausable; + + /* sanity */ + if (p->bw_tbl != DCBX_INVALID_COS_BW || + p->strict != BNX2X_DCBX_STRICT_INVALID) { + if (p->pri_bitmask == 0) + BNX2X_ERR("Invalid pri_bitmask for %d\n", i); + + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) { + + if (p->pauseable && + DCBX_PFC_PRI_GET_NON_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "pausable COS %d\n", i); + + if (!p->pauseable && + DCBX_PFC_PRI_GET_PAUSE(bp, + p->pri_bitmask) != 0) + BNX2X_ERR("Inconsistent config for " + "nonpausable COS %d\n", i); + } + } + + if (p->pauseable) DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n", i, cos_data.data[i].pri_join_mask); - } else { - params->pri_bitmask = - DCBX_PFC_PRI_GET_NON_PAUSE(bp, - cos_data.data[i].pri_join_mask); + else DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask " "0x%x\n", i, cos_data.data[i].pri_join_mask); - } } bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ; @@ -1574,30 +1817,26 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, } } -static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) +static void bnx2x_dcbx_fw_struct(struct bnx2x *bp, + struct bnx2x_func_tx_start_params *pfc_fw_cfg) { - struct flow_control_configuration *pfc_fw_cfg = NULL; u16 pri_bit = 0; u8 cos = 0, pri = 0; struct priority_cos *tt2cos; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; - pfc_fw_cfg = (struct flow_control_configuration *) - bnx2x_sp(bp, pfc_config); - memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration)); + memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); + + /* to disable DCB - the structure must be zeroed */ + if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) + return; /*shortcut*/ tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos; /* Fw version should be incremented each update */ pfc_fw_cfg->dcb_version = ++bp->dcb_version; - pfc_fw_cfg->dcb_enabled = DCB_ENABLED; - - /* Default initialization */ - for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) { - tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED; - tt2cos[pri].cos = 0; - } + pfc_fw_cfg->dcb_enabled = 1; /* Fill priority parameters */ for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) { @@ -1605,14 +1844,37 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) pri_bit = 1 << tt2cos[pri].priority; /* Fill COS parameters based on COS calculated to - * make it more generally for future use */ + * make it more general for future use */ for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) if (bp->dcbx_port_params.ets.cos_params[cos]. pri_bitmask & pri_bit) tt2cos[pri].cos = cos; } + + /* we never want the FW to add a 0 vlan tag */ + pfc_fw_cfg->dont_add_pri_0_en = 1; + bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); } + +void bnx2x_dcbx_pmf_update(struct bnx2x *bp) +{ + /* if we need to syncronize DCBX result from prev PMF + * read it from shmem and update bp accordingly + */ + if (SHMEM2_HAS(bp, drv_flags) && + GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) { + /* Read neg results if dcbx is in the FW */ + if (bnx2x_dcbx_read_shmem_neg_results(bp)) + return; + + bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat, + bp->dcbx_error); + } +} + /* DCB netlink */ #ifdef BCM_DCBNL @@ -1879,10 +2141,12 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) if (bp->dcb_state) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: - *num = E2_NUM_OF_COS; + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; break; case DCB_NUMTCS_ATTR_PFC: - *num = E2_NUM_OF_COS; + *num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 : + DCBX_COS_MAX_NUM_E2; break; default: rval = -EINVAL; diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h index bed369d67e0..2c6a3bca6f2 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.h +++ b/drivers/net/bnx2x/bnx2x_dcb.h @@ -27,22 +27,30 @@ struct bnx2x_dcbx_app_params { u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX]; }; -#define E2_NUM_OF_COS 2 -#define BNX2X_DCBX_COS_NOT_STRICT 0 -#define BNX2X_DCBX_COS_LOW_STRICT 1 -#define BNX2X_DCBX_COS_HIGH_STRICT 2 +#define DCBX_COS_MAX_NUM_E2 DCBX_E2E3_MAX_NUM_COS +/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */ +#define BNX2X_MAX_COS_SUPPORT 3 +#define DCBX_COS_MAX_NUM_E3B0 BNX2X_MAX_COS_SUPPORT +#define DCBX_COS_MAX_NUM BNX2X_MAX_COS_SUPPORT struct bnx2x_dcbx_cos_params { u32 bw_tbl; u32 pri_bitmask; + /* + * strict priority: valid values are 0..5; 0 is highest priority. + * There can't be two COSes with the same priority. + */ u8 strict; +#define BNX2X_DCBX_STRICT_INVALID DCBX_COS_MAX_NUM +#define BNX2X_DCBX_STRICT_COS_HIGHEST 0 +#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp) ((sp) + 1) u8 pauseable; }; struct bnx2x_dcbx_pg_params { u32 enabled; u8 num_of_cos; /* valid COS entries */ - struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS]; + struct bnx2x_dcbx_cos_params cos_params[DCBX_COS_MAX_NUM]; }; struct bnx2x_dcbx_pfc_params { @@ -60,6 +68,8 @@ struct bnx2x_dcbx_port_params { #define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0 #define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1 #define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE) +#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\ + (bp)->dcbx_port_params.ets.enabled) struct bnx2x_config_lldp_params { u32 overwrite_settings; @@ -132,7 +142,7 @@ struct cos_entry_help_data { }; struct cos_help_data { - struct cos_entry_help_data data[E2_NUM_OF_COS]; + struct cos_entry_help_data data[DCBX_COS_MAX_NUM]; u8 num_of_cos; }; @@ -148,6 +158,8 @@ struct cos_help_data { ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp))) #define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \ (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri)) +#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri) \ + (0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)) #define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \ (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri))) #define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\ @@ -170,22 +182,18 @@ struct pg_help_data { /* forward DCB/PFC related declarations */ struct bnx2x; -void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp); void bnx2x_dcbx_update(struct work_struct *work); void bnx2x_dcbx_init_params(struct bnx2x *bp); void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled); enum { BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1, -#ifdef BCM_CNIC - BNX2X_DCBX_STATE_ISCSI_STOPPED, -#endif BNX2X_DCBX_STATE_TX_PAUSED, BNX2X_DCBX_STATE_TX_RELEASED }; void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); - +void bnx2x_dcbx_pmf_update(struct bnx2x *bp); /* DCB netlink */ #ifdef BCM_DCBNL extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h index fb3ff7c4d7c..b983825d0ee 100644 --- a/drivers/net/bnx2x/bnx2x_dump.h +++ b/drivers/net/bnx2x/bnx2x_dump.h @@ -25,34 +25,94 @@ /*definitions */ -#define XSTORM_WAITP_ADDR 0x2b8a80 -#define TSTORM_WAITP_ADDR 0x1b8a80 -#define USTORM_WAITP_ADDR 0x338a80 -#define CSTORM_WAITP_ADDR 0x238a80 -#define TSTORM_CAM_MODE 0x1B1440 +#define XSTORM_WAITP_ADDR 0x2b8a80 +#define TSTORM_WAITP_ADDR 0x1b8a80 +#define USTORM_WAITP_ADDR 0x338a80 +#define CSTORM_WAITP_ADDR 0x238a80 +#define TSTORM_CAM_MODE 0x1B1440 + +#define MAX_TIMER_PENDING 200 +#define TIMER_SCAN_DONT_CARE 0xFF +#define RI_E1 0x1 +#define RI_E1H 0x2 +#define RI_E2 0x4 +#define RI_E3 0x8 +#define RI_E3B0 0x10 +#define RI_ONLINE 0x100 +#define RI_OFFLINE 0x0 +#define RI_PATH0_DUMP 0x200 +#define RI_PATH1_DUMP 0x400 -#define MAX_TIMER_PENDING 200 -#define TIMER_SCAN_DONT_CARE 0xFF -#define RI_E1 0x1 -#define RI_E1H 0x2 -#define RI_E2 0x4 -#define RI_ONLINE 0x100 -#define RI_PATH0_DUMP 0x200 -#define RI_PATH1_DUMP 0x400 -#define RI_E1_OFFLINE (RI_E1) #define RI_E1_ONLINE (RI_E1 | RI_ONLINE) -#define RI_E1H_OFFLINE (RI_E1H) #define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) -#define RI_E2_OFFLINE (RI_E2) -#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) -#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) #define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) -#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) -#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) -#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) -#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) -#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) -#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) +#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) +#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE) +#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE) +#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) +#define RI_E3_ONLINE (RI_E3 | RI_ONLINE) +#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE) +#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE) +#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE) +#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE) +#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE) +#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE) +#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE) +#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1E1HE2E3E3B0_ONLINE \ + (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE) +#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE) +#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE) +#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE) +#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE) +#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE) +#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE) +#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE) +#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE) +#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE) +#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE) +#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE) +#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE) +#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE) +#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE) +#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE) +#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_E1E1HE2E3E3B0_OFFLINE \ + (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE) +#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE +#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE + +#define DBG_DMP_TRACE_BUFFER_SIZE 0x800 +#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \ + ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE) struct dump_sign { u32 time_stamp; @@ -86,628 +146,1011 @@ struct wreg_addr { u16 info; }; -#define REGS_COUNT 834 -static const struct reg_addr reg_addrs[REGS_COUNT] = { - { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, - { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, - { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE }, - { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE }, - { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE }, - { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE }, - { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE }, - { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE }, - { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE }, - { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE }, - { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE }, - { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE }, - { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE }, - { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE }, - { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE }, - { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE }, - { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE }, - { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE }, - { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE }, - { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE }, - { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE }, - { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE }, - { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE }, - { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE }, - { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE }, - { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE }, - { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE }, - { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE }, - { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, - { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE }, - { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE }, - { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE }, - { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE }, - { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE }, - { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE }, - { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE }, - { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE }, - { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE }, - { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE }, - { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE }, - { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE }, - { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE }, - { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE }, - { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE }, - { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE }, - { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE }, - { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE }, - { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE }, - { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE }, - { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE }, - { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE }, - { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE }, - { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE }, - { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE }, - { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE }, - { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE }, - { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE }, - { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE }, - { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE }, - { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE }, - { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE }, - { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, - { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, - { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, - { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE }, - { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE }, - { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE }, - { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE }, - { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE }, - { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE }, - { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE }, - { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE }, - { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE }, - { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE }, - { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE }, - { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE }, - { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE }, - { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, - { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, - { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE }, - { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE }, - { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE }, - { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE }, - { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE }, - { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE }, - { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, - { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE }, - { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, - { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE }, - { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE }, - { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE }, - { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE }, - { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, - { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, - { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE }, - { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE }, - { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE }, - { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE }, - { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, - { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, - { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE }, - { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE }, - { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE }, - { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE }, - { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE }, - { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE }, - { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE }, - { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE }, - { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, - { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE }, - { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, - { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, - { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, - { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, - { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE }, - { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE }, - { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE }, - { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, - { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE }, - { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE }, - { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE }, - { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE }, - { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, - { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE }, - { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE }, - { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE }, - { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE }, - { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE }, - { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE }, - { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE }, - { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE }, - { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE }, - { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE }, - { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE }, - { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE }, - { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE }, - { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE }, - { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE }, - { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE }, - { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE }, - { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE }, - { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE }, - { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE }, - { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE }, - { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE }, - { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE }, - { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE }, - { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE }, - { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE }, - { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE }, - { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE }, - { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE }, - { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE }, - { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE }, - { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE }, - { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE }, - { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE }, - { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE }, - { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE }, - { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE }, - { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE }, - { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE }, - { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE }, - { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE }, - { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE }, - { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE }, - { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, - { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE }, - { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE }, - { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE }, - { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE }, - { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE }, - { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE }, - { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE }, - { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE }, - { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, - { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, - { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, - { 0x164238, 1, RI_ALL_ONLINE }, { 0x164240, 1, RI_ALL_ONLINE }, - { 0x164248, 1, RI_ALL_ONLINE }, { 0x164250, 1, RI_ALL_ONLINE }, - { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, - { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, - { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, - { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE }, - { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, - { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE }, - { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE }, - { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE }, - { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE }, - { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE }, - { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE }, - { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE }, - { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE }, - { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE }, - { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE }, - { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, - { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE }, - { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE }, - { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE }, - { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE }, - { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE }, - { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE }, - { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE }, - { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE }, - { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE }, - { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE }, - { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE }, - { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE }, - { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE }, - { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE }, - { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, - { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, - { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE }, - { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE }, - { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE }, - { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE }, - { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE }, - { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE }, - { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE }, - { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE }, - { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE }, - { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE }, - { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE }, - { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE }, - { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, - { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, - { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE }, - { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE }, - { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, - { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, - { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE }, - { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE }, - { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE }, - { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE }, - { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE }, - { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE }, - { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE }, - { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE }, - { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE }, - { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE }, - { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, - { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE }, - { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE }, - { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE }, - { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE }, - { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE }, - { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE }, - { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE }, - { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE }, - { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE }, - { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE }, - { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE }, - { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE }, - { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE }, - { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE }, - { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE }, - { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE }, - { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, - { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE }, - { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE }, - { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE }, - { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE }, - { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE }, - { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, - { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE }, - { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE}, - { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE }, - { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE }, - { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE }, - { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE }, - { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE }, - { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE }, - { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE }, - { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE }, - { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, - { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE }, - { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE }, - { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, - { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, - { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE }, - { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE }, - { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE }, - { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE }, - { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE }, - { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE }, - { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE }, - { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE }, - { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE }, - { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE }, - { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE }, - { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE }, - { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE }, - { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE }, - { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE }, - { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE }, - { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE }, - { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE }, - { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE }, - { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE }, - { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE }, - { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE }, - { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE }, - { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE }, - { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE }, - { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE }, - { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE }, - { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE }, - { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE }, - { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE }, - { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE }, - { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE }, - { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE }, - { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE }, - { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE }, - { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, - { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE }, - { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE}, - { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE }, - { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE }, - { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE }, - { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE }, - { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE }, - { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE }, - { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE }, - { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE }, - { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, - { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE }, - { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE }, - { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, - { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE }, - { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE }, - { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE }, - { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE }, - { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE }, - { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE }, - { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE }, - { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE }, - { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE }, - { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE }, - { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE }, - { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE }, - { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE }, - { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE }, - { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE }, - { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE }, - { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE }, - { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE }, - { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE }, - { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE }, - { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE }, - { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE }, - { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE }, - { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE }, - { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE }, - { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE }, - { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE }, - { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE }, - { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE }, - { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE }, - { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE }, - { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, - { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE }, - { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, - { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE }, - { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE }, - { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, - { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, - { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE }, - { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE }, - { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE }, - { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE }, - { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE }, - { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE }, - { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE }, - { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE }, - { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE }, - { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE }, - { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE }, - { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE }, - { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE }, - { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, - { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE }, - { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE }, - { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE }, - { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, - { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE }, - { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE }, - { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE }, - { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE }, - { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE }, - { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE }, - { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE }, - { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE }, - { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE }, - { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE }, - { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, - { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE }, - { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE }, - { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE }, - { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE }, - { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE }, - { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE }, - { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE }, - { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE }, - { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE }, - { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE }, - { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE }, - { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE }, - { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE }, - { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE }, - { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }, -}; - -#define IDLE_REGS_COUNT 237 -static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { - { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE }, - { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, - { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE }, - { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE }, - { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE }, - { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE }, - { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE }, - { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE }, - { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, - { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE }, - { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE }, - { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE }, - { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE }, - { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE }, - { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE }, - { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE }, - { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE }, - { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE }, - { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE }, - { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE }, - { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE }, - { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE }, - { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE }, - { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE }, - { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE }, - { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE }, - { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE }, - { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE }, - { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE }, - { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE }, - { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE }, - { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE }, - { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE }, - { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE }, - { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE }, - { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE }, - { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE }, - { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE }, - { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE }, - { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE }, - { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE }, - { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE }, - { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE }, - { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE }, - { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE }, - { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE }, - { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, - { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, - { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, - { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, - { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE }, - { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE }, - { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE }, - { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE }, - { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE }, - { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE }, - { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE }, - { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE }, - { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, - { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, - { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, - { 0x120848, 1, RI_ALL_ONLINE }, { 0x120850, 1, RI_ALL_ONLINE }, - { 0x120858, 1, RI_ALL_ONLINE }, { 0x120860, 1, RI_ALL_ONLINE }, - { 0x120868, 1, RI_ALL_ONLINE }, { 0x120870, 1, RI_ALL_ONLINE }, - { 0x120878, 1, RI_ALL_ONLINE }, { 0x120880, 1, RI_ALL_ONLINE }, - { 0x120888, 1, RI_ALL_ONLINE }, { 0x120890, 1, RI_ALL_ONLINE }, - { 0x120898, 1, RI_ALL_ONLINE }, { 0x1208a0, 1, RI_ALL_ONLINE }, - { 0x1208a8, 1, RI_ALL_ONLINE }, { 0x1208b0, 1, RI_ALL_ONLINE }, - { 0x1208b8, 1, RI_ALL_ONLINE }, { 0x1208c0, 1, RI_ALL_ONLINE }, - { 0x1208c8, 1, RI_ALL_ONLINE }, { 0x1208d0, 1, RI_ALL_ONLINE }, - { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, - { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, - { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, - { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE }, - { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE }, - { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE }, - { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE }, - { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE }, - { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE }, - { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE }, - { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE }, - { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE }, - { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE }, - { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE }, - { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE }, - { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE }, - { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE }, - { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE }, - { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE }, - { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE }, - { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE }, - { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, - { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, - { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, - { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE }, - { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE }, - { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE }, - { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE }, - { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE }, - { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE }, - { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE }, - { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE }, - { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE }, - { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE }, - { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE }, - { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE }, - { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE }, - { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE }, - { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE }, - { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE }, - { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE }, - { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE }, - { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE }, - { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE }, - { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, - { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, - { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE }, - { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, - { 0x3380c0, 1, RI_ALL_ONLINE } -}; - -#define WREGS_COUNT_E1 1 -static const u32 read_reg_e1_0[] = { 0x1b1000 }; - -static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = { - { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } -}; - -#define WREGS_COUNT_E1H 1 -static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; - -static const struct wreg_addr wreg_addrs_e1h[WREGS_COUNT_E1H] = { - { 0x1b0c00, 256, 2, read_reg_e1h_0, RI_E1H_OFFLINE } +static const struct reg_addr reg_addrs[] = { + { 0x2000, 341, RI_ALL_ONLINE }, + { 0x2800, 103, RI_ALL_ONLINE }, + { 0x3000, 287, RI_ALL_ONLINE }, + { 0x3800, 331, RI_ALL_ONLINE }, + { 0x8800, 6, RI_ALL_ONLINE }, + { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x9000, 147, RI_E2E3E3B0_ONLINE }, + { 0x924c, 1, RI_E2_ONLINE }, + { 0x9250, 16, RI_E2E3E3B0_ONLINE }, + { 0x9400, 33, RI_E2E3E3B0_ONLINE }, + { 0x9484, 5, RI_E3E3B0_ONLINE }, + { 0xa000, 27, RI_ALL_ONLINE }, + { 0xa06c, 1, RI_E1E1H_ONLINE }, + { 0xa070, 71, RI_ALL_ONLINE }, + { 0xa18c, 4, RI_E1E1H_ONLINE }, + { 0xa19c, 62, RI_ALL_ONLINE }, + { 0xa294, 2, RI_E1E1H_ONLINE }, + { 0xa29c, 2, RI_ALL_ONLINE }, + { 0xa2a4, 2, RI_E1E1HE2_ONLINE }, + { 0xa2ac, 52, RI_ALL_ONLINE }, + { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3b8, 2, RI_E3E3B0_ONLINE }, + { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa400, 40, RI_ALL_ONLINE }, + { 0xa4a0, 1, RI_E1E1HE2_ONLINE }, + { 0xa4a4, 2, RI_ALL_ONLINE }, + { 0xa4ac, 2, RI_E1E1H_ONLINE }, + { 0xa4b4, 1, RI_E1E1HE2_ONLINE }, + { 0xa4b8, 2, RI_E1E1H_ONLINE }, + { 0xa4c0, 3, RI_ALL_ONLINE }, + { 0xa4cc, 5, RI_E1E1H_ONLINE }, + { 0xa4e0, 3, RI_ALL_ONLINE }, + { 0xa4fc, 2, RI_ALL_ONLINE }, + { 0xa504, 1, RI_E1E1H_ONLINE }, + { 0xa508, 3, RI_ALL_ONLINE }, + { 0xa518, 1, RI_ALL_ONLINE }, + { 0xa520, 1, RI_ALL_ONLINE }, + { 0xa528, 1, RI_ALL_ONLINE }, + { 0xa530, 1, RI_ALL_ONLINE }, + { 0xa538, 1, RI_ALL_ONLINE }, + { 0xa540, 1, RI_ALL_ONLINE }, + { 0xa548, 1, RI_E1E1H_ONLINE }, + { 0xa550, 1, RI_E1E1H_ONLINE }, + { 0xa558, 1, RI_E1E1H_ONLINE }, + { 0xa560, 1, RI_E1E1H_ONLINE }, + { 0xa568, 1, RI_E1E1H_ONLINE }, + { 0xa570, 1, RI_ALL_ONLINE }, + { 0xa580, 1, RI_ALL_ONLINE }, + { 0xa590, 1, RI_ALL_ONLINE }, + { 0xa5a0, 1, RI_E1E1HE2_ONLINE }, + { 0xa5c0, 1, RI_ALL_ONLINE }, + { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0xa5f8, 1, RI_E1HE2_ONLINE }, + { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE }, + { 0xa620, 6, RI_E2E3E3B0_ONLINE }, + { 0xa638, 20, RI_E2_ONLINE }, + { 0xa688, 42, RI_E2E3E3B0_ONLINE }, + { 0xa730, 1, RI_E2_ONLINE }, + { 0xa734, 2, RI_E2E3E3B0_ONLINE }, + { 0xa73c, 4, RI_E2_ONLINE }, + { 0xa74c, 5, RI_E2E3E3B0_ONLINE }, + { 0xa760, 5, RI_E2_ONLINE }, + { 0xa774, 7, RI_E2E3E3B0_ONLINE }, + { 0xa790, 15, RI_E2_ONLINE }, + { 0xa7cc, 4, RI_E2E3E3B0_ONLINE }, + { 0xa7e0, 6, RI_E3E3B0_ONLINE }, + { 0xa800, 18, RI_E2_ONLINE }, + { 0xa848, 33, RI_E2E3E3B0_ONLINE }, + { 0xa8cc, 2, RI_E3E3B0_ONLINE }, + { 0xa8d4, 4, RI_E2E3E3B0_ONLINE }, + { 0xa8e4, 1, RI_E3E3B0_ONLINE }, + { 0xa8e8, 1, RI_E2E3E3B0_ONLINE }, + { 0xa8f0, 1, RI_E2E3E3B0_ONLINE }, + { 0xa8f8, 30, RI_E3E3B0_ONLINE }, + { 0xa974, 73, RI_E3E3B0_ONLINE }, + { 0xac30, 1, RI_E3E3B0_ONLINE }, + { 0xac40, 1, RI_E3E3B0_ONLINE }, + { 0xac50, 1, RI_E3E3B0_ONLINE }, + { 0xac60, 1, RI_E3B0_ONLINE }, + { 0x10000, 9, RI_ALL_ONLINE }, + { 0x10024, 1, RI_E1E1HE2_ONLINE }, + { 0x10028, 5, RI_ALL_ONLINE }, + { 0x1003c, 6, RI_E1E1HE2_ONLINE }, + { 0x10054, 20, RI_ALL_ONLINE }, + { 0x100a4, 4, RI_E1E1HE2_ONLINE }, + { 0x100b4, 11, RI_ALL_ONLINE }, + { 0x100e0, 4, RI_E1E1HE2_ONLINE }, + { 0x100f0, 8, RI_ALL_ONLINE }, + { 0x10110, 6, RI_E1E1HE2_ONLINE }, + { 0x10128, 110, RI_ALL_ONLINE }, + { 0x102e0, 4, RI_E1E1HE2_ONLINE }, + { 0x102f0, 18, RI_ALL_ONLINE }, + { 0x10338, 20, RI_E1E1HE2_ONLINE }, + { 0x10388, 10, RI_ALL_ONLINE }, + { 0x10400, 6, RI_E1E1HE2_ONLINE }, + { 0x10418, 6, RI_ALL_ONLINE }, + { 0x10430, 10, RI_E1E1HE2_ONLINE }, + { 0x10458, 22, RI_ALL_ONLINE }, + { 0x104b0, 12, RI_E1E1HE2_ONLINE }, + { 0x104e0, 1, RI_ALL_ONLINE }, + { 0x104e8, 2, RI_ALL_ONLINE }, + { 0x104f4, 2, RI_ALL_ONLINE }, + { 0x10500, 146, RI_ALL_ONLINE }, + { 0x10750, 2, RI_E1E1HE2_ONLINE }, + { 0x10760, 2, RI_E1E1HE2_ONLINE }, + { 0x10770, 2, RI_E1E1HE2_ONLINE }, + { 0x10780, 2, RI_E1E1HE2_ONLINE }, + { 0x10790, 2, RI_ALL_ONLINE }, + { 0x107a0, 2, RI_E1E1HE2_ONLINE }, + { 0x107b0, 2, RI_E1E1HE2_ONLINE }, + { 0x107c0, 2, RI_E1E1HE2_ONLINE }, + { 0x107d0, 2, RI_E1E1HE2_ONLINE }, + { 0x107e0, 2, RI_ALL_ONLINE }, + { 0x10880, 2, RI_ALL_ONLINE }, + { 0x10900, 2, RI_ALL_ONLINE }, + { 0x16000, 1, RI_E1HE2_ONLINE }, + { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE }, + { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE }, + { 0x16090, 4, RI_E1HE2E3_ONLINE }, + { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0x160dc, 2, RI_E1HE2_ONLINE }, + { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE }, + { 0x1610c, 2, RI_E1HE2_ONLINE }, + { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE }, + { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x18010, 35, RI_E2E3E3B0_ONLINE }, + { 0x180a4, 2, RI_E2E3E3B0_ONLINE }, + { 0x180c0, 9, RI_E2E3E3B0_ONLINE }, + { 0x180e4, 1, RI_E2E3_ONLINE }, + { 0x180e8, 2, RI_E2E3E3B0_ONLINE }, + { 0x180f0, 1, RI_E2E3_ONLINE }, + { 0x180f4, 79, RI_E2E3E3B0_ONLINE }, + { 0x18230, 1, RI_E2E3_ONLINE }, + { 0x18234, 2, RI_E2E3E3B0_ONLINE }, + { 0x1823c, 1, RI_E2E3_ONLINE }, + { 0x18240, 13, RI_E2E3E3B0_ONLINE }, + { 0x18274, 1, RI_E2_ONLINE }, + { 0x18278, 81, RI_E2E3E3B0_ONLINE }, + { 0x18440, 63, RI_E2E3E3B0_ONLINE }, + { 0x18570, 42, RI_E3E3B0_ONLINE }, + { 0x18618, 25, RI_E3B0_ONLINE }, + { 0x18680, 44, RI_E3B0_ONLINE }, + { 0x18748, 12, RI_E3B0_ONLINE }, + { 0x18788, 1, RI_E3B0_ONLINE }, + { 0x1879c, 6, RI_E3B0_ONLINE }, + { 0x187c4, 51, RI_E3B0_ONLINE }, + { 0x18a00, 48, RI_E3B0_ONLINE }, + { 0x20000, 24, RI_ALL_ONLINE }, + { 0x20060, 8, RI_ALL_ONLINE }, + { 0x20080, 94, RI_ALL_ONLINE }, + { 0x201f8, 1, RI_E1E1H_ONLINE }, + { 0x201fc, 1, RI_ALL_ONLINE }, + { 0x20200, 1, RI_E1E1H_ONLINE }, + { 0x20204, 1, RI_ALL_ONLINE }, + { 0x20208, 1, RI_E1E1H_ONLINE }, + { 0x2020c, 39, RI_ALL_ONLINE }, + { 0x202c8, 1, RI_E2E3E3B0_ONLINE }, + { 0x202d8, 4, RI_E2E3E3B0_ONLINE }, + { 0x202f0, 1, RI_E3B0_ONLINE }, + { 0x20400, 2, RI_ALL_ONLINE }, + { 0x2040c, 8, RI_ALL_ONLINE }, + { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0x20480, 1, RI_ALL_ONLINE }, + { 0x20500, 1, RI_ALL_ONLINE }, + { 0x20600, 1, RI_ALL_ONLINE }, + { 0x28000, 1, RI_ALL_ONLINE }, + { 0x28004, 8191, RI_ALL_OFFLINE }, + { 0x30000, 1, RI_ALL_ONLINE }, + { 0x30004, 16383, RI_ALL_OFFLINE }, + { 0x40000, 98, RI_ALL_ONLINE }, + { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE }, + { 0x401c8, 1, RI_E1H_ONLINE }, + { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x401d4, 2, RI_E2E3E3B0_ONLINE }, + { 0x40200, 4, RI_ALL_ONLINE }, + { 0x40220, 6, RI_E2E3E3B0_ONLINE }, + { 0x40238, 8, RI_E2E3_ONLINE }, + { 0x40258, 4, RI_E2E3E3B0_ONLINE }, + { 0x40268, 2, RI_E3E3B0_ONLINE }, + { 0x40270, 17, RI_E3B0_ONLINE }, + { 0x40400, 43, RI_ALL_ONLINE }, + { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE }, + { 0x404e0, 1, RI_E2E3E3B0_ONLINE }, + { 0x40500, 2, RI_ALL_ONLINE }, + { 0x40510, 2, RI_ALL_ONLINE }, + { 0x40520, 2, RI_ALL_ONLINE }, + { 0x40530, 2, RI_ALL_ONLINE }, + { 0x40540, 2, RI_ALL_ONLINE }, + { 0x40550, 10, RI_E2E3E3B0_ONLINE }, + { 0x40610, 2, RI_E2E3E3B0_ONLINE }, + { 0x42000, 164, RI_ALL_ONLINE }, + { 0x422c0, 4, RI_E2E3E3B0_ONLINE }, + { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x422e8, 1, RI_E2E3E3B0_ONLINE }, + { 0x42400, 49, RI_ALL_ONLINE }, + { 0x424c8, 38, RI_ALL_ONLINE }, + { 0x42568, 2, RI_ALL_ONLINE }, + { 0x42640, 5, RI_E2E3E3B0_ONLINE }, + { 0x42800, 1, RI_ALL_ONLINE }, + { 0x50000, 1, RI_ALL_ONLINE }, + { 0x50004, 19, RI_ALL_ONLINE }, + { 0x50050, 8, RI_ALL_ONLINE }, + { 0x50070, 88, RI_ALL_ONLINE }, + { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE }, + { 0x50200, 2, RI_ALL_ONLINE }, + { 0x5020c, 7, RI_ALL_ONLINE }, + { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x50240, 1, RI_ALL_ONLINE }, + { 0x50280, 1, RI_ALL_ONLINE }, + { 0x50300, 1, RI_E2E3E3B0_ONLINE }, + { 0x5030c, 1, RI_E2E3E3B0_ONLINE }, + { 0x50318, 1, RI_E2E3E3B0_ONLINE }, + { 0x5031c, 1, RI_E2E3E3B0_ONLINE }, + { 0x50320, 2, RI_E2E3E3B0_ONLINE }, + { 0x50330, 1, RI_E3B0_ONLINE }, + { 0x52000, 1, RI_ALL_ONLINE }, + { 0x54000, 1, RI_ALL_ONLINE }, + { 0x54004, 3327, RI_ALL_OFFLINE }, + { 0x58000, 1, RI_ALL_ONLINE }, + { 0x58004, 8191, RI_E1E1H_OFFLINE }, + { 0x60000, 26, RI_ALL_ONLINE }, + { 0x60068, 8, RI_E1E1H_ONLINE }, + { 0x60088, 12, RI_ALL_ONLINE }, + { 0x600b8, 9, RI_E1E1H_ONLINE }, + { 0x600dc, 1, RI_ALL_ONLINE }, + { 0x600e0, 5, RI_E1E1H_ONLINE }, + { 0x600f4, 1, RI_E1E1HE2_ONLINE }, + { 0x600f8, 1, RI_E1E1H_ONLINE }, + { 0x600fc, 8, RI_ALL_ONLINE }, + { 0x6013c, 24, RI_E1H_ONLINE }, + { 0x6019c, 2, RI_E2E3E3B0_ONLINE }, + { 0x601ac, 18, RI_E2E3E3B0_ONLINE }, + { 0x60200, 1, RI_ALL_ONLINE }, + { 0x60204, 2, RI_ALL_OFFLINE }, + { 0x60210, 13, RI_E2E3E3B0_ONLINE }, + { 0x60244, 16, RI_E3B0_ONLINE }, + { 0x61000, 1, RI_ALL_ONLINE }, + { 0x61004, 511, RI_ALL_OFFLINE }, + { 0x61800, 512, RI_E3E3B0_OFFLINE }, + { 0x70000, 8, RI_ALL_ONLINE }, + { 0x70020, 8184, RI_ALL_OFFLINE }, + { 0x78000, 8192, RI_E3E3B0_OFFLINE }, + { 0x85000, 3, RI_ALL_ONLINE }, + { 0x8501c, 7, RI_ALL_ONLINE }, + { 0x85048, 1, RI_ALL_ONLINE }, + { 0x85200, 32, RI_ALL_ONLINE }, + { 0xb0000, 16384, RI_E1H_ONLINE }, + { 0xc1000, 7, RI_ALL_ONLINE }, + { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, + { 0xc1800, 2, RI_ALL_ONLINE }, + { 0xc2000, 164, RI_ALL_ONLINE }, + { 0xc22c0, 5, RI_E2E3E3B0_ONLINE }, + { 0xc22d8, 4, RI_E2E3E3B0_ONLINE }, + { 0xc2400, 49, RI_ALL_ONLINE }, + { 0xc24c8, 38, RI_ALL_ONLINE }, + { 0xc2568, 2, RI_ALL_ONLINE }, + { 0xc2600, 1, RI_ALL_ONLINE }, + { 0xc4000, 165, RI_ALL_ONLINE }, + { 0xc42d8, 2, RI_E2E3E3B0_ONLINE }, + { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE }, + { 0xc42fc, 1, RI_E2E3E3B0_ONLINE }, + { 0xc4400, 51, RI_ALL_ONLINE }, + { 0xc44d0, 38, RI_ALL_ONLINE }, + { 0xc4570, 2, RI_ALL_ONLINE }, + { 0xc4578, 5, RI_E2E3E3B0_ONLINE }, + { 0xc4600, 1, RI_ALL_ONLINE }, + { 0xd0000, 19, RI_ALL_ONLINE }, + { 0xd004c, 8, RI_ALL_ONLINE }, + { 0xd006c, 91, RI_ALL_ONLINE }, + { 0xd01fc, 1, RI_E2E3E3B0_ONLINE }, + { 0xd0200, 2, RI_ALL_ONLINE }, + { 0xd020c, 7, RI_ALL_ONLINE }, + { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0xd0280, 1, RI_ALL_ONLINE }, + { 0xd0300, 1, RI_ALL_ONLINE }, + { 0xd0400, 1, RI_ALL_ONLINE }, + { 0xd0818, 1, RI_E3B0_ONLINE }, + { 0xd4000, 1, RI_ALL_ONLINE }, + { 0xd4004, 2559, RI_ALL_OFFLINE }, + { 0xd8000, 1, RI_ALL_ONLINE }, + { 0xd8004, 8191, RI_ALL_OFFLINE }, + { 0xe0000, 21, RI_ALL_ONLINE }, + { 0xe0054, 8, RI_ALL_ONLINE }, + { 0xe0074, 49, RI_ALL_ONLINE }, + { 0xe0138, 1, RI_E1E1H_ONLINE }, + { 0xe013c, 35, RI_ALL_ONLINE }, + { 0xe01f4, 1, RI_E2_ONLINE }, + { 0xe01f8, 1, RI_E2E3E3B0_ONLINE }, + { 0xe0200, 2, RI_ALL_ONLINE }, + { 0xe020c, 8, RI_ALL_ONLINE }, + { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE }, + { 0xe0280, 1, RI_ALL_ONLINE }, + { 0xe0300, 1, RI_ALL_ONLINE }, + { 0xe0400, 1, RI_E3B0_ONLINE }, + { 0xe1000, 1, RI_ALL_ONLINE }, + { 0xe2000, 1, RI_ALL_ONLINE }, + { 0xe2004, 2047, RI_ALL_OFFLINE }, + { 0xf0000, 1, RI_ALL_ONLINE }, + { 0xf0004, 16383, RI_ALL_OFFLINE }, + { 0x101000, 12, RI_ALL_ONLINE }, + { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x101054, 3, RI_E2E3E3B0_ONLINE }, + { 0x101100, 1, RI_ALL_ONLINE }, + { 0x101800, 8, RI_ALL_ONLINE }, + { 0x102000, 18, RI_ALL_ONLINE }, + { 0x102068, 6, RI_E2E3E3B0_ONLINE }, + { 0x102080, 17, RI_ALL_ONLINE }, + { 0x1020c8, 8, RI_E1H_ONLINE }, + { 0x1020e8, 9, RI_E2E3E3B0_ONLINE }, + { 0x102400, 1, RI_ALL_ONLINE }, + { 0x103000, 26, RI_ALL_ONLINE }, + { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE }, + { 0x1030ac, 2, RI_E2E3E3B0_ONLINE }, + { 0x1030b4, 1, RI_E2_ONLINE }, + { 0x1030b8, 7, RI_E2E3E3B0_ONLINE }, + { 0x1030d8, 8, RI_E2E3E3B0_ONLINE }, + { 0x103400, 1, RI_E2E3E3B0_ONLINE }, + { 0x103404, 135, RI_E2E3E3B0_OFFLINE }, + { 0x103800, 8, RI_ALL_ONLINE }, + { 0x104000, 63, RI_ALL_ONLINE }, + { 0x10411c, 16, RI_E2E3E3B0_ONLINE }, + { 0x104200, 17, RI_ALL_ONLINE }, + { 0x104400, 64, RI_ALL_ONLINE }, + { 0x104500, 192, RI_ALL_OFFLINE }, + { 0x104800, 64, RI_ALL_ONLINE }, + { 0x104900, 192, RI_ALL_OFFLINE }, + { 0x105000, 256, RI_ALL_ONLINE }, + { 0x105400, 768, RI_ALL_OFFLINE }, + { 0x107000, 7, RI_E2E3E3B0_ONLINE }, + { 0x10701c, 1, RI_E3E3B0_ONLINE }, + { 0x108000, 33, RI_E1E1H_ONLINE }, + { 0x1080ac, 5, RI_E1H_ONLINE }, + { 0x108100, 5, RI_E1E1H_ONLINE }, + { 0x108120, 5, RI_E1E1H_ONLINE }, + { 0x108200, 74, RI_E1E1H_ONLINE }, + { 0x108400, 74, RI_E1E1H_ONLINE }, + { 0x108800, 152, RI_E1E1H_ONLINE }, + { 0x110000, 111, RI_E2E3E3B0_ONLINE }, + { 0x1101dc, 1, RI_E3E3B0_ONLINE }, + { 0x110200, 4, RI_E2E3E3B0_ONLINE }, + { 0x120000, 2, RI_ALL_ONLINE }, + { 0x120008, 4, RI_ALL_ONLINE }, + { 0x120018, 3, RI_ALL_ONLINE }, + { 0x120024, 4, RI_ALL_ONLINE }, + { 0x120034, 3, RI_ALL_ONLINE }, + { 0x120040, 4, RI_ALL_ONLINE }, + { 0x120050, 3, RI_ALL_ONLINE }, + { 0x12005c, 4, RI_ALL_ONLINE }, + { 0x12006c, 3, RI_ALL_ONLINE }, + { 0x120078, 4, RI_ALL_ONLINE }, + { 0x120088, 3, RI_ALL_ONLINE }, + { 0x120094, 4, RI_ALL_ONLINE }, + { 0x1200a4, 3, RI_ALL_ONLINE }, + { 0x1200b0, 4, RI_ALL_ONLINE }, + { 0x1200c0, 3, RI_ALL_ONLINE }, + { 0x1200cc, 4, RI_ALL_ONLINE }, + { 0x1200dc, 3, RI_ALL_ONLINE }, + { 0x1200e8, 4, RI_ALL_ONLINE }, + { 0x1200f8, 3, RI_ALL_ONLINE }, + { 0x120104, 4, RI_ALL_ONLINE }, + { 0x120114, 1, RI_ALL_ONLINE }, + { 0x120118, 22, RI_ALL_ONLINE }, + { 0x120170, 2, RI_E1E1H_ONLINE }, + { 0x120178, 243, RI_ALL_ONLINE }, + { 0x120544, 4, RI_E1E1H_ONLINE }, + { 0x120554, 6, RI_ALL_ONLINE }, + { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE }, + { 0x1205f4, 1, RI_E1HE2_ONLINE }, + { 0x1205f8, 4, RI_E2E3E3B0_ONLINE }, + { 0x120618, 1, RI_E2E3E3B0_ONLINE }, + { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE }, + { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE }, + { 0x120698, 3, RI_E2E3E3B0_ONLINE }, + { 0x1206a4, 1, RI_E2_ONLINE }, + { 0x1206a8, 1, RI_E2E3E3B0_ONLINE }, + { 0x1206b0, 75, RI_E2E3E3B0_ONLINE }, + { 0x1207dc, 1, RI_E2_ONLINE }, + { 0x1207fc, 1, RI_E2E3E3B0_ONLINE }, + { 0x12080c, 65, RI_ALL_ONLINE }, + { 0x120910, 7, RI_E2E3E3B0_ONLINE }, + { 0x120930, 9, RI_E2E3E3B0_ONLINE }, + { 0x12095c, 37, RI_E3E3B0_ONLINE }, + { 0x120a00, 2, RI_E1E1HE2_ONLINE }, + { 0x120b00, 1, RI_E3E3B0_ONLINE }, + { 0x122000, 2, RI_ALL_ONLINE }, + { 0x122008, 2046, RI_E1_OFFLINE }, + { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE }, + { 0x130000, 35, RI_E2E3E3B0_ONLINE }, + { 0x130100, 29, RI_E2E3E3B0_ONLINE }, + { 0x130180, 1, RI_E2E3E3B0_ONLINE }, + { 0x130200, 1, RI_E2E3E3B0_ONLINE }, + { 0x130280, 1, RI_E2E3E3B0_ONLINE }, + { 0x130300, 5, RI_E2E3E3B0_ONLINE }, + { 0x130380, 1, RI_E2E3E3B0_ONLINE }, + { 0x130400, 1, RI_E2E3E3B0_ONLINE }, + { 0x130480, 5, RI_E2E3E3B0_ONLINE }, + { 0x130800, 72, RI_E2E3E3B0_ONLINE }, + { 0x131000, 136, RI_E2E3E3B0_ONLINE }, + { 0x132000, 148, RI_E2E3E3B0_ONLINE }, + { 0x134000, 544, RI_E2E3E3B0_ONLINE }, + { 0x140000, 1, RI_ALL_ONLINE }, + { 0x140004, 9, RI_E1E1HE2E3_ONLINE }, + { 0x140028, 8, RI_ALL_ONLINE }, + { 0x140048, 10, RI_E1E1HE2E3_ONLINE }, + { 0x140070, 1, RI_ALL_ONLINE }, + { 0x140074, 10, RI_E1E1HE2E3_ONLINE }, + { 0x14009c, 1, RI_ALL_ONLINE }, + { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE }, + { 0x1400b4, 7, RI_ALL_ONLINE }, + { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE }, + { 0x1400f8, 2, RI_ALL_ONLINE }, + { 0x140100, 5, RI_E1E1H_ONLINE }, + { 0x140114, 5, RI_E1E1HE2E3_ONLINE }, + { 0x140128, 7, RI_ALL_ONLINE }, + { 0x140144, 9, RI_E1E1HE2E3_ONLINE }, + { 0x140168, 8, RI_ALL_ONLINE }, + { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, + { 0x140194, 13, RI_ALL_ONLINE }, + { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, + { 0x140220, 4, RI_E2E3_ONLINE }, + { 0x140240, 4, RI_E2E3_ONLINE }, + { 0x140260, 4, RI_E2E3_ONLINE }, + { 0x140280, 4, RI_E2E3_ONLINE }, + { 0x1402a0, 4, RI_E2E3_ONLINE }, + { 0x1402c0, 4, RI_E2E3_ONLINE }, + { 0x1402e0, 2, RI_E2E3_ONLINE }, + { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, + { 0x1402f0, 9, RI_E2E3_ONLINE }, + { 0x140314, 44, RI_E3B0_ONLINE }, + { 0x1403d0, 70, RI_E3B0_ONLINE }, + { 0x144000, 4, RI_E1E1H_ONLINE }, + { 0x148000, 4, RI_E1E1H_ONLINE }, + { 0x14c000, 4, RI_E1E1H_ONLINE }, + { 0x150000, 4, RI_E1E1H_ONLINE }, + { 0x154000, 4, RI_E1E1H_ONLINE }, + { 0x158000, 4, RI_E1E1H_ONLINE }, + { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x15c008, 5, RI_E1H_ONLINE }, + { 0x15c020, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c040, 1, RI_E2E3_ONLINE }, + { 0x15c044, 2, RI_E2E3E3B0_ONLINE }, + { 0x15c04c, 8, RI_E2E3_ONLINE }, + { 0x15c06c, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c090, 13, RI_E2E3E3B0_ONLINE }, + { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE }, + { 0x15c128, 2, RI_E2E3_ONLINE }, + { 0x15c130, 8, RI_E2E3E3B0_ONLINE }, + { 0x15c150, 2, RI_E3E3B0_ONLINE }, + { 0x15c158, 2, RI_E3_ONLINE }, + { 0x15c160, 149, RI_E3B0_ONLINE }, + { 0x161000, 7, RI_ALL_ONLINE }, + { 0x16103c, 2, RI_E2E3E3B0_ONLINE }, + { 0x161800, 2, RI_ALL_ONLINE }, + { 0x162000, 54, RI_E3E3B0_ONLINE }, + { 0x162200, 60, RI_E3E3B0_ONLINE }, + { 0x162400, 54, RI_E3E3B0_ONLINE }, + { 0x162600, 60, RI_E3E3B0_ONLINE }, + { 0x162800, 54, RI_E3E3B0_ONLINE }, + { 0x162a00, 60, RI_E3E3B0_ONLINE }, + { 0x162c00, 54, RI_E3E3B0_ONLINE }, + { 0x162e00, 60, RI_E3E3B0_ONLINE }, + { 0x164000, 60, RI_ALL_ONLINE }, + { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x164118, 15, RI_E2E3E3B0_ONLINE }, + { 0x164200, 1, RI_ALL_ONLINE }, + { 0x164208, 1, RI_ALL_ONLINE }, + { 0x164210, 1, RI_ALL_ONLINE }, + { 0x164218, 1, RI_ALL_ONLINE }, + { 0x164220, 1, RI_ALL_ONLINE }, + { 0x164228, 1, RI_ALL_ONLINE }, + { 0x164230, 1, RI_ALL_ONLINE }, + { 0x164238, 1, RI_ALL_ONLINE }, + { 0x164240, 1, RI_ALL_ONLINE }, + { 0x164248, 1, RI_ALL_ONLINE }, + { 0x164250, 1, RI_ALL_ONLINE }, + { 0x164258, 1, RI_ALL_ONLINE }, + { 0x164260, 1, RI_ALL_ONLINE }, + { 0x164270, 2, RI_ALL_ONLINE }, + { 0x164280, 2, RI_ALL_ONLINE }, + { 0x164800, 2, RI_ALL_ONLINE }, + { 0x165000, 2, RI_ALL_ONLINE }, + { 0x166000, 164, RI_ALL_ONLINE }, + { 0x1662cc, 7, RI_E2E3E3B0_ONLINE }, + { 0x166400, 49, RI_ALL_ONLINE }, + { 0x1664c8, 38, RI_ALL_ONLINE }, + { 0x166568, 2, RI_ALL_ONLINE }, + { 0x166570, 5, RI_E2E3E3B0_ONLINE }, + { 0x166800, 1, RI_ALL_ONLINE }, + { 0x168000, 137, RI_ALL_ONLINE }, + { 0x168224, 2, RI_E1E1H_ONLINE }, + { 0x16822c, 29, RI_ALL_ONLINE }, + { 0x1682a0, 12, RI_E1E1H_ONLINE }, + { 0x1682d0, 12, RI_ALL_ONLINE }, + { 0x168300, 2, RI_E1E1H_ONLINE }, + { 0x168308, 68, RI_ALL_ONLINE }, + { 0x168418, 2, RI_E1E1H_ONLINE }, + { 0x168420, 6, RI_ALL_ONLINE }, + { 0x168800, 19, RI_ALL_ONLINE }, + { 0x168900, 1, RI_ALL_ONLINE }, + { 0x168a00, 128, RI_ALL_ONLINE }, + { 0x16a000, 1, RI_ALL_ONLINE }, + { 0x16a004, 1535, RI_ALL_OFFLINE }, + { 0x16c000, 1, RI_ALL_ONLINE }, + { 0x16c004, 1535, RI_ALL_OFFLINE }, + { 0x16e000, 16, RI_E1H_ONLINE }, + { 0x16e040, 8, RI_E2E3E3B0_ONLINE }, + { 0x16e100, 1, RI_E1H_ONLINE }, + { 0x16e200, 2, RI_E1H_ONLINE }, + { 0x16e400, 161, RI_E1H_ONLINE }, + { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x16e68c, 12, RI_E1H_ONLINE }, + { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE }, + { 0x16e6cc, 4, RI_E1H_ONLINE }, + { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE }, + { 0x16e6e8, 5, RI_E2E3_ONLINE }, + { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE }, + { 0x16e768, 17, RI_E2E3E3B0_ONLINE }, + { 0x16e7ac, 12, RI_E3B0_ONLINE }, + { 0x170000, 24, RI_ALL_ONLINE }, + { 0x170060, 4, RI_E1E1H_ONLINE }, + { 0x170070, 65, RI_ALL_ONLINE }, + { 0x170194, 11, RI_E2E3E3B0_ONLINE }, + { 0x1701c4, 1, RI_E2E3E3B0_ONLINE }, + { 0x1701cc, 7, RI_E2E3E3B0_ONLINE }, + { 0x1701e8, 1, RI_E3E3B0_ONLINE }, + { 0x1701ec, 1, RI_E2E3E3B0_ONLINE }, + { 0x1701f4, 1, RI_E2E3E3B0_ONLINE }, + { 0x170200, 4, RI_ALL_ONLINE }, + { 0x170214, 1, RI_ALL_ONLINE }, + { 0x170218, 77, RI_E2E3E3B0_ONLINE }, + { 0x170400, 64, RI_E2E3E3B0_ONLINE }, + { 0x178000, 1, RI_ALL_ONLINE }, + { 0x180000, 61, RI_ALL_ONLINE }, + { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x180200, 58, RI_ALL_ONLINE }, + { 0x180340, 4, RI_ALL_ONLINE }, + { 0x180380, 1, RI_E2E3E3B0_ONLINE }, + { 0x180388, 1, RI_E2E3E3B0_ONLINE }, + { 0x180390, 1, RI_E2E3E3B0_ONLINE }, + { 0x180398, 1, RI_E2E3E3B0_ONLINE }, + { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, + { 0x1803b4, 2, RI_E3E3B0_ONLINE }, + { 0x180400, 1, RI_ALL_ONLINE }, + { 0x180404, 255, RI_E1E1H_OFFLINE }, + { 0x181000, 4, RI_ALL_ONLINE }, + { 0x181010, 1020, RI_ALL_OFFLINE }, + { 0x182000, 4, RI_E3E3B0_ONLINE }, + { 0x1a0000, 1, RI_ALL_ONLINE }, + { 0x1a0004, 5631, RI_ALL_OFFLINE }, + { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x1a8000, 1, RI_ALL_ONLINE }, + { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x1b0000, 1, RI_ALL_ONLINE }, + { 0x1b0004, 15, RI_E1H_OFFLINE }, + { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b0044, 239, RI_E1H_OFFLINE }, + { 0x1b0400, 1, RI_ALL_ONLINE }, + { 0x1b0404, 255, RI_E1H_OFFLINE }, + { 0x1b0800, 1, RI_ALL_ONLINE }, + { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b0c00, 1, RI_ALL_ONLINE }, + { 0x1b1000, 1, RI_ALL_ONLINE }, + { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1400, 1, RI_ALL_ONLINE }, + { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b1800, 128, RI_ALL_OFFLINE }, + { 0x1b1c00, 128, RI_ALL_OFFLINE }, + { 0x1b2000, 1, RI_ALL_ONLINE }, + { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x1b8000, 1, RI_ALL_ONLINE }, + { 0x1b8040, 1, RI_ALL_ONLINE }, + { 0x1b8080, 1, RI_ALL_ONLINE }, + { 0x1b80c0, 1, RI_ALL_ONLINE }, + { 0x1b8100, 1, RI_ALL_ONLINE }, + { 0x1b8140, 1, RI_ALL_ONLINE }, + { 0x1b8180, 1, RI_ALL_ONLINE }, + { 0x1b81c0, 1, RI_ALL_ONLINE }, + { 0x1b8200, 1, RI_ALL_ONLINE }, + { 0x1b8240, 1, RI_ALL_ONLINE }, + { 0x1b8280, 1, RI_ALL_ONLINE }, + { 0x1b82c0, 1, RI_ALL_ONLINE }, + { 0x1b8300, 1, RI_ALL_ONLINE }, + { 0x1b8340, 1, RI_ALL_ONLINE }, + { 0x1b8380, 1, RI_ALL_ONLINE }, + { 0x1b83c0, 1, RI_ALL_ONLINE }, + { 0x1b8400, 1, RI_ALL_ONLINE }, + { 0x1b8440, 1, RI_ALL_ONLINE }, + { 0x1b8480, 1, RI_ALL_ONLINE }, + { 0x1b84c0, 1, RI_ALL_ONLINE }, + { 0x1b8500, 1, RI_ALL_ONLINE }, + { 0x1b8540, 1, RI_ALL_ONLINE }, + { 0x1b8580, 1, RI_ALL_ONLINE }, + { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x1b8800, 1, RI_ALL_ONLINE }, + { 0x1b8840, 1, RI_ALL_ONLINE }, + { 0x1b8880, 1, RI_ALL_ONLINE }, + { 0x1b88c0, 1, RI_ALL_ONLINE }, + { 0x1b8900, 1, RI_ALL_ONLINE }, + { 0x1b8940, 1, RI_ALL_ONLINE }, + { 0x1b8980, 1, RI_ALL_ONLINE }, + { 0x1b89c0, 1, RI_ALL_ONLINE }, + { 0x1b8a00, 1, RI_ALL_ONLINE }, + { 0x1b8a40, 1, RI_ALL_ONLINE }, + { 0x1b8a80, 1, RI_ALL_ONLINE }, + { 0x1b8ac0, 1, RI_ALL_ONLINE }, + { 0x1b8b00, 1, RI_ALL_ONLINE }, + { 0x1b8b40, 1, RI_ALL_ONLINE }, + { 0x1b8b80, 1, RI_ALL_ONLINE }, + { 0x1b8bc0, 1, RI_ALL_ONLINE }, + { 0x1b8c00, 1, RI_ALL_ONLINE }, + { 0x1b8c40, 1, RI_ALL_ONLINE }, + { 0x1b8c80, 1, RI_ALL_ONLINE }, + { 0x1b8cc0, 1, RI_ALL_ONLINE }, + { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b8d00, 1, RI_ALL_ONLINE }, + { 0x1b8d40, 1, RI_ALL_ONLINE }, + { 0x1b8d80, 1, RI_ALL_ONLINE }, + { 0x1b8dc0, 1, RI_ALL_ONLINE }, + { 0x1b8e00, 1, RI_ALL_ONLINE }, + { 0x1b8e40, 1, RI_ALL_ONLINE }, + { 0x1b8e80, 1, RI_ALL_ONLINE }, + { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x1b8fe8, 2, RI_E3E3B0_ONLINE }, + { 0x1b9000, 1, RI_E2E3E3B0_ONLINE }, + { 0x1b9040, 3, RI_E2E3E3B0_ONLINE }, + { 0x1b905c, 1, RI_E3E3B0_ONLINE }, + { 0x1b9064, 1, RI_E3B0_ONLINE }, + { 0x1b9080, 10, RI_E3B0_ONLINE }, + { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, + { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, + { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x1c0000, 2, RI_ALL_ONLINE }, + { 0x200000, 65, RI_ALL_ONLINE }, + { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x200200, 58, RI_ALL_ONLINE }, + { 0x200340, 4, RI_ALL_ONLINE }, + { 0x200380, 1, RI_E2E3E3B0_ONLINE }, + { 0x200388, 1, RI_E2E3E3B0_ONLINE }, + { 0x200390, 1, RI_E2E3E3B0_ONLINE }, + { 0x200398, 1, RI_E2E3E3B0_ONLINE }, + { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x200400, 1, RI_ALL_ONLINE }, + { 0x200404, 255, RI_E1E1H_OFFLINE }, + { 0x202000, 4, RI_ALL_ONLINE }, + { 0x202010, 2044, RI_ALL_OFFLINE }, + { 0x204000, 4, RI_E3E3B0_ONLINE }, + { 0x220000, 1, RI_ALL_ONLINE }, + { 0x220004, 5631, RI_ALL_OFFLINE }, + { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x228000, 1, RI_ALL_ONLINE }, + { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x230000, 1, RI_ALL_ONLINE }, + { 0x230004, 15, RI_E1H_OFFLINE }, + { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x230044, 239, RI_E1H_OFFLINE }, + { 0x230400, 1, RI_ALL_ONLINE }, + { 0x230404, 255, RI_E1H_OFFLINE }, + { 0x230800, 1, RI_ALL_ONLINE }, + { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x230c00, 1, RI_ALL_ONLINE }, + { 0x231000, 1, RI_ALL_ONLINE }, + { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231400, 1, RI_ALL_ONLINE }, + { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x231800, 128, RI_ALL_OFFLINE }, + { 0x231c00, 128, RI_ALL_OFFLINE }, + { 0x232000, 1, RI_ALL_ONLINE }, + { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x232404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x238000, 1, RI_ALL_ONLINE }, + { 0x238040, 1, RI_ALL_ONLINE }, + { 0x238080, 1, RI_ALL_ONLINE }, + { 0x2380c0, 1, RI_ALL_ONLINE }, + { 0x238100, 1, RI_ALL_ONLINE }, + { 0x238140, 1, RI_ALL_ONLINE }, + { 0x238180, 1, RI_ALL_ONLINE }, + { 0x2381c0, 1, RI_ALL_ONLINE }, + { 0x238200, 1, RI_ALL_ONLINE }, + { 0x238240, 1, RI_ALL_ONLINE }, + { 0x238280, 1, RI_ALL_ONLINE }, + { 0x2382c0, 1, RI_ALL_ONLINE }, + { 0x238300, 1, RI_ALL_ONLINE }, + { 0x238340, 1, RI_ALL_ONLINE }, + { 0x238380, 1, RI_ALL_ONLINE }, + { 0x2383c0, 1, RI_ALL_ONLINE }, + { 0x238400, 1, RI_ALL_ONLINE }, + { 0x238440, 1, RI_ALL_ONLINE }, + { 0x238480, 1, RI_ALL_ONLINE }, + { 0x2384c0, 1, RI_ALL_ONLINE }, + { 0x238500, 1, RI_ALL_ONLINE }, + { 0x238540, 1, RI_ALL_ONLINE }, + { 0x238580, 1, RI_ALL_ONLINE }, + { 0x2385c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x238800, 1, RI_ALL_ONLINE }, + { 0x238840, 1, RI_ALL_ONLINE }, + { 0x238880, 1, RI_ALL_ONLINE }, + { 0x2388c0, 1, RI_ALL_ONLINE }, + { 0x238900, 1, RI_ALL_ONLINE }, + { 0x238940, 1, RI_ALL_ONLINE }, + { 0x238980, 1, RI_ALL_ONLINE }, + { 0x2389c0, 1, RI_ALL_ONLINE }, + { 0x238a00, 1, RI_ALL_ONLINE }, + { 0x238a40, 1, RI_ALL_ONLINE }, + { 0x238a80, 1, RI_ALL_ONLINE }, + { 0x238ac0, 1, RI_ALL_ONLINE }, + { 0x238b00, 1, RI_ALL_ONLINE }, + { 0x238b40, 1, RI_ALL_ONLINE }, + { 0x238b80, 1, RI_ALL_ONLINE }, + { 0x238bc0, 1, RI_ALL_ONLINE }, + { 0x238c00, 1, RI_ALL_ONLINE }, + { 0x238c40, 1, RI_ALL_ONLINE }, + { 0x238c80, 1, RI_ALL_ONLINE }, + { 0x238cc0, 1, RI_ALL_ONLINE }, + { 0x238cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x238d00, 1, RI_ALL_ONLINE }, + { 0x238d40, 1, RI_ALL_ONLINE }, + { 0x238d80, 1, RI_ALL_ONLINE }, + { 0x238dc0, 1, RI_ALL_ONLINE }, + { 0x238e00, 1, RI_ALL_ONLINE }, + { 0x238e40, 1, RI_ALL_ONLINE }, + { 0x238e80, 1, RI_ALL_ONLINE }, + { 0x238e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x238fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x238fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x238fe8, 2, RI_E3E3B0_ONLINE }, + { 0x239000, 1, RI_E2E3E3B0_ONLINE }, + { 0x239040, 3, RI_E2E3E3B0_ONLINE }, + { 0x23905c, 1, RI_E3E3B0_ONLINE }, + { 0x239064, 1, RI_E3B0_ONLINE }, + { 0x239080, 10, RI_E3B0_ONLINE }, + { 0x240000, 2, RI_ALL_ONLINE }, + { 0x280000, 65, RI_ALL_ONLINE }, + { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x280200, 58, RI_ALL_ONLINE }, + { 0x280340, 4, RI_ALL_ONLINE }, + { 0x280380, 1, RI_E2E3E3B0_ONLINE }, + { 0x280388, 1, RI_E2E3E3B0_ONLINE }, + { 0x280390, 1, RI_E2E3E3B0_ONLINE }, + { 0x280398, 1, RI_E2E3E3B0_ONLINE }, + { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x280400, 1, RI_ALL_ONLINE }, + { 0x280404, 255, RI_E1E1H_OFFLINE }, + { 0x282000, 4, RI_ALL_ONLINE }, + { 0x282010, 2044, RI_ALL_OFFLINE }, + { 0x284000, 4, RI_E3E3B0_ONLINE }, + { 0x2a0000, 1, RI_ALL_ONLINE }, + { 0x2a0004, 5631, RI_ALL_OFFLINE }, + { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x2a8000, 1, RI_ALL_ONLINE }, + { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x2b0000, 1, RI_ALL_ONLINE }, + { 0x2b0004, 15, RI_E1H_OFFLINE }, + { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b0044, 239, RI_E1H_OFFLINE }, + { 0x2b0400, 1, RI_ALL_ONLINE }, + { 0x2b0404, 255, RI_E1H_OFFLINE }, + { 0x2b0800, 1, RI_ALL_ONLINE }, + { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b0c00, 1, RI_ALL_ONLINE }, + { 0x2b1000, 1, RI_ALL_ONLINE }, + { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1400, 1, RI_ALL_ONLINE }, + { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b1800, 128, RI_ALL_OFFLINE }, + { 0x2b1c00, 128, RI_ALL_OFFLINE }, + { 0x2b2000, 1, RI_ALL_ONLINE }, + { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x2b8000, 1, RI_ALL_ONLINE }, + { 0x2b8040, 1, RI_ALL_ONLINE }, + { 0x2b8080, 1, RI_ALL_ONLINE }, + { 0x2b80c0, 1, RI_ALL_ONLINE }, + { 0x2b8100, 1, RI_ALL_ONLINE }, + { 0x2b8140, 1, RI_ALL_ONLINE }, + { 0x2b8180, 1, RI_ALL_ONLINE }, + { 0x2b81c0, 1, RI_ALL_ONLINE }, + { 0x2b8200, 1, RI_ALL_ONLINE }, + { 0x2b8240, 1, RI_ALL_ONLINE }, + { 0x2b8280, 1, RI_ALL_ONLINE }, + { 0x2b82c0, 1, RI_ALL_ONLINE }, + { 0x2b8300, 1, RI_ALL_ONLINE }, + { 0x2b8340, 1, RI_ALL_ONLINE }, + { 0x2b8380, 1, RI_ALL_ONLINE }, + { 0x2b83c0, 1, RI_ALL_ONLINE }, + { 0x2b8400, 1, RI_ALL_ONLINE }, + { 0x2b8440, 1, RI_ALL_ONLINE }, + { 0x2b8480, 1, RI_ALL_ONLINE }, + { 0x2b84c0, 1, RI_ALL_ONLINE }, + { 0x2b8500, 1, RI_ALL_ONLINE }, + { 0x2b8540, 1, RI_ALL_ONLINE }, + { 0x2b8580, 1, RI_ALL_ONLINE }, + { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x2b8800, 1, RI_ALL_ONLINE }, + { 0x2b8840, 1, RI_ALL_ONLINE }, + { 0x2b8880, 1, RI_ALL_ONLINE }, + { 0x2b88c0, 1, RI_ALL_ONLINE }, + { 0x2b8900, 1, RI_ALL_ONLINE }, + { 0x2b8940, 1, RI_ALL_ONLINE }, + { 0x2b8980, 1, RI_ALL_ONLINE }, + { 0x2b89c0, 1, RI_ALL_ONLINE }, + { 0x2b8a00, 1, RI_ALL_ONLINE }, + { 0x2b8a40, 1, RI_ALL_ONLINE }, + { 0x2b8a80, 1, RI_ALL_ONLINE }, + { 0x2b8ac0, 1, RI_ALL_ONLINE }, + { 0x2b8b00, 1, RI_ALL_ONLINE }, + { 0x2b8b40, 1, RI_ALL_ONLINE }, + { 0x2b8b80, 1, RI_ALL_ONLINE }, + { 0x2b8bc0, 1, RI_ALL_ONLINE }, + { 0x2b8c00, 1, RI_ALL_ONLINE }, + { 0x2b8c40, 1, RI_ALL_ONLINE }, + { 0x2b8c80, 1, RI_ALL_ONLINE }, + { 0x2b8cc0, 1, RI_ALL_ONLINE }, + { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b8d00, 1, RI_ALL_ONLINE }, + { 0x2b8d40, 1, RI_ALL_ONLINE }, + { 0x2b8d80, 1, RI_ALL_ONLINE }, + { 0x2b8dc0, 1, RI_ALL_ONLINE }, + { 0x2b8e00, 1, RI_ALL_ONLINE }, + { 0x2b8e40, 1, RI_ALL_ONLINE }, + { 0x2b8e80, 1, RI_ALL_ONLINE }, + { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x2b8fe8, 2, RI_E3E3B0_ONLINE }, + { 0x2b9000, 1, RI_E2E3E3B0_ONLINE }, + { 0x2b9040, 3, RI_E2E3E3B0_ONLINE }, + { 0x2b905c, 1, RI_E3E3B0_ONLINE }, + { 0x2b9064, 1, RI_E3B0_ONLINE }, + { 0x2b9080, 10, RI_E3B0_ONLINE }, + { 0x2b9400, 14, RI_E2E3E3B0_ONLINE }, + { 0x2b943c, 19, RI_E2E3E3B0_ONLINE }, + { 0x2b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x2c0000, 2, RI_ALL_ONLINE }, + { 0x300000, 65, RI_ALL_ONLINE }, + { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE }, + { 0x300200, 58, RI_ALL_ONLINE }, + { 0x300340, 4, RI_ALL_ONLINE }, + { 0x300380, 1, RI_E2E3E3B0_ONLINE }, + { 0x300388, 1, RI_E2E3E3B0_ONLINE }, + { 0x300390, 1, RI_E2E3E3B0_ONLINE }, + { 0x300398, 1, RI_E2E3E3B0_ONLINE }, + { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, + { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, + { 0x300400, 1, RI_ALL_ONLINE }, + { 0x300404, 255, RI_E1E1H_OFFLINE }, + { 0x302000, 4, RI_ALL_ONLINE }, + { 0x302010, 2044, RI_ALL_OFFLINE }, + { 0x304000, 4, RI_E3E3B0_ONLINE }, + { 0x320000, 1, RI_ALL_ONLINE }, + { 0x320004, 5631, RI_ALL_OFFLINE }, + { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE }, + { 0x328000, 1, RI_ALL_ONLINE }, + { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE }, + { 0x330000, 1, RI_ALL_ONLINE }, + { 0x330004, 15, RI_E1H_OFFLINE }, + { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x330044, 239, RI_E1H_OFFLINE }, + { 0x330400, 1, RI_ALL_ONLINE }, + { 0x330404, 255, RI_E1H_OFFLINE }, + { 0x330800, 1, RI_ALL_ONLINE }, + { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x330c00, 1, RI_ALL_ONLINE }, + { 0x331000, 1, RI_ALL_ONLINE }, + { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331400, 1, RI_ALL_ONLINE }, + { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x331800, 128, RI_ALL_OFFLINE }, + { 0x331c00, 128, RI_ALL_OFFLINE }, + { 0x332000, 1, RI_ALL_ONLINE }, + { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x332404, 5631, RI_E2E3E3B0_OFFLINE }, + { 0x338000, 1, RI_ALL_ONLINE }, + { 0x338040, 1, RI_ALL_ONLINE }, + { 0x338080, 1, RI_ALL_ONLINE }, + { 0x3380c0, 1, RI_ALL_ONLINE }, + { 0x338100, 1, RI_ALL_ONLINE }, + { 0x338140, 1, RI_ALL_ONLINE }, + { 0x338180, 1, RI_ALL_ONLINE }, + { 0x3381c0, 1, RI_ALL_ONLINE }, + { 0x338200, 1, RI_ALL_ONLINE }, + { 0x338240, 1, RI_ALL_ONLINE }, + { 0x338280, 1, RI_ALL_ONLINE }, + { 0x3382c0, 1, RI_ALL_ONLINE }, + { 0x338300, 1, RI_ALL_ONLINE }, + { 0x338340, 1, RI_ALL_ONLINE }, + { 0x338380, 1, RI_ALL_ONLINE }, + { 0x3383c0, 1, RI_ALL_ONLINE }, + { 0x338400, 1, RI_ALL_ONLINE }, + { 0x338440, 1, RI_ALL_ONLINE }, + { 0x338480, 1, RI_ALL_ONLINE }, + { 0x3384c0, 1, RI_ALL_ONLINE }, + { 0x338500, 1, RI_ALL_ONLINE }, + { 0x338540, 1, RI_ALL_ONLINE }, + { 0x338580, 1, RI_ALL_ONLINE }, + { 0x3385c0, 19, RI_E2E3E3B0_ONLINE }, + { 0x338800, 1, RI_ALL_ONLINE }, + { 0x338840, 1, RI_ALL_ONLINE }, + { 0x338880, 1, RI_ALL_ONLINE }, + { 0x3388c0, 1, RI_ALL_ONLINE }, + { 0x338900, 1, RI_ALL_ONLINE }, + { 0x338940, 1, RI_ALL_ONLINE }, + { 0x338980, 1, RI_ALL_ONLINE }, + { 0x3389c0, 1, RI_ALL_ONLINE }, + { 0x338a00, 1, RI_ALL_ONLINE }, + { 0x338a40, 1, RI_ALL_ONLINE }, + { 0x338a80, 1, RI_ALL_ONLINE }, + { 0x338ac0, 1, RI_ALL_ONLINE }, + { 0x338b00, 1, RI_ALL_ONLINE }, + { 0x338b40, 1, RI_ALL_ONLINE }, + { 0x338b80, 1, RI_ALL_ONLINE }, + { 0x338bc0, 1, RI_ALL_ONLINE }, + { 0x338c00, 1, RI_ALL_ONLINE }, + { 0x338c40, 1, RI_ALL_ONLINE }, + { 0x338c80, 1, RI_ALL_ONLINE }, + { 0x338cc0, 1, RI_ALL_ONLINE }, + { 0x338cc4, 1, RI_E2E3E3B0_ONLINE }, + { 0x338d00, 1, RI_ALL_ONLINE }, + { 0x338d40, 1, RI_ALL_ONLINE }, + { 0x338d80, 1, RI_ALL_ONLINE }, + { 0x338dc0, 1, RI_ALL_ONLINE }, + { 0x338e00, 1, RI_ALL_ONLINE }, + { 0x338e40, 1, RI_ALL_ONLINE }, + { 0x338e80, 1, RI_ALL_ONLINE }, + { 0x338e84, 1, RI_E2E3E3B0_ONLINE }, + { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE }, + { 0x338fc4, 2, RI_E2E3E3B0_ONLINE }, + { 0x338fd0, 6, RI_E2E3E3B0_ONLINE }, + { 0x338fe8, 2, RI_E3E3B0_ONLINE }, + { 0x339000, 1, RI_E2E3E3B0_ONLINE }, + { 0x339040, 3, RI_E2E3E3B0_ONLINE }, + { 0x33905c, 1, RI_E3E3B0_ONLINE }, + { 0x339064, 1, RI_E3B0_ONLINE }, + { 0x339080, 10, RI_E3B0_ONLINE }, + { 0x340000, 2, RI_ALL_ONLINE }, }; +#define REGS_COUNT ARRAY_SIZE(reg_addrs) -#define WREGS_COUNT_E2 1 -static const u32 read_reg_e2_0[] = { 0x1b1040, 0x1b1000 }; - -static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = { - { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } -}; - -static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a }; - -#define TIMER_REGS_COUNT_E1 2 - -static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = { - 0x164014, 0x164018 }; -static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = { - 0x1640d0, 0x1640d4 }; - -#define TIMER_REGS_COUNT_E1H 2 +static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a }; -static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = { - 0x164014, 0x164018 }; -static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = { - 0x1640d0, 0x1640d4 }; +static const u32 page_vals_e2[] = { 0, 128 }; +#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2) -#define TIMER_REGS_COUNT_E2 2 +static const u32 page_write_regs_e2[] = { 328476 }; +#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2) -static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = { - 0x164014, 0x164018 }; -static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = { - 0x1640d0, 0x1640d4 }; - -#define PAGE_MODE_VALUES_E1 0 - -#define PAGE_READ_REGS_E1 0 - -#define PAGE_WRITE_REGS_E1 0 - -static const u32 page_vals_e1[] = { 0 }; - -static const u32 page_write_regs_e1[] = { 0 }; - -static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } }; - -#define PAGE_MODE_VALUES_E1H 0 - -#define PAGE_READ_REGS_E1H 0 - -#define PAGE_WRITE_REGS_E1H 0 - -static const u32 page_vals_e1h[] = { 0 }; - -static const u32 page_write_regs_e1h[] = { 0 }; - -static const struct reg_addr page_read_regs_e1h[] = { - { 0x0, 0, RI_E1H_ONLINE } }; - -#define PAGE_MODE_VALUES_E2 2 - -#define PAGE_READ_REGS_E2 1 - -#define PAGE_WRITE_REGS_E2 1 +static const struct reg_addr page_read_regs_e2[] = { + { 0x58000, 4608, RI_E2_ONLINE } }; +#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2) -static const u32 page_vals_e2[PAGE_MODE_VALUES_E2] = { 0, 128 }; +static const u32 page_vals_e3[] = { 0, 128 }; +#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3) -static const u32 page_write_regs_e2[PAGE_WRITE_REGS_E2] = { 328476 }; +static const u32 page_write_regs_e3[] = { 328476 }; +#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3) -static const struct reg_addr page_read_regs_e2[PAGE_READ_REGS_E2] = { - { 0x58000, 4608, RI_E2_ONLINE } }; +static const struct reg_addr page_read_regs_e3[] = { + { 0x58000, 4608, RI_E3E3B0_ONLINE } }; +#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3) #endif /* BNX2X_DUMP_H */ diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 727fe89ff37..221863059da 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -25,6 +25,7 @@ #include "bnx2x_cmn.h" #include "bnx2x_dump.h" #include "bnx2x_init.h" +#include "bnx2x_sp.h" /* Note: in the format strings below %s is replaced by the queue-name which is * either its index or 'fcoe' for the fcoe queue. Make sure the format string @@ -37,8 +38,6 @@ static const struct { char string[ETH_GSTRING_LEN]; } bnx2x_q_stats_arr[] = { /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" }, - { Q_STATS_OFFSET32(error_bytes_received_hi), - 8, "[%s]: rx_error_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "[%s]: rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), @@ -52,13 +51,18 @@ static const struct { 4, "[%s]: rx_skb_alloc_discard" }, { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" }, -/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, - { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), + { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" }, +/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "[%s]: tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "[%s]: tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), - 8, "[%s]: tx_bcast_packets" } + 8, "[%s]: tx_bcast_packets" }, + { Q_STATS_OFFSET32(total_tpa_aggregations_hi), + 8, "[%s]: tpa_aggregations" }, + { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, "[%s]: tpa_aggregated_frames"}, + { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} }; #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) @@ -98,8 +102,8 @@ static const struct { 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, - { STATS_OFFSET32(xxoverflow_discard), - 4, STATS_FLAGS_PORT, "rx_fw_discards" }, + { STATS_OFFSET32(mf_tag_discard), + 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), @@ -158,10 +162,43 @@ static const struct { { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), - 8, STATS_FLAGS_PORT, "tx_pause_frames" } + 8, STATS_FLAGS_PORT, "tx_pause_frames" }, + { STATS_OFFSET32(total_tpa_aggregations_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, + { STATS_OFFSET32(total_tpa_aggregated_frames_hi), + 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, + { STATS_OFFSET32(total_tpa_bytes_hi), + 8, STATS_FLAGS_FUNC, "tpa_bytes"} }; #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr) +static int bnx2x_get_port_type(struct bnx2x *bp) +{ + int port_type; + u32 phy_idx = bnx2x_get_cur_phy_idx(bp); + switch (bp->link_params.phy[phy_idx].media_type) { + case ETH_PHY_SFP_FIBER: + case ETH_PHY_XFP_FIBER: + case ETH_PHY_KR: + case ETH_PHY_CX4: + port_type = PORT_FIBRE; + break; + case ETH_PHY_DA_TWINAX: + port_type = PORT_DA; + break; + case ETH_PHY_BASE_T: + port_type = PORT_TP; + break; + case ETH_PHY_NOT_PRESENT: + port_type = PORT_NONE; + break; + case ETH_PHY_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -188,12 +225,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (IS_MF(bp)) ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); - if (bp->port.supported[cfg_idx] & SUPPORTED_TP) - cmd->port = PORT_TP; - else if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) - cmd->port = PORT_FIBRE; - else - BNX2X_ERR("XGXS PHY Failure detected\n"); + cmd->port = bnx2x_get_port_type(bp); cmd->phy_address = bp->mdio.prtad; cmd->transceiver = XCVR_INTERNAL; @@ -468,78 +500,179 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE) #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE) #define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE) +#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) +#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) + +static inline bool bnx2x_is_reg_online(struct bnx2x *bp, + const struct reg_addr *reg_info) +{ + if (CHIP_IS_E1(bp)) + return IS_E1_ONLINE(reg_info->info); + else if (CHIP_IS_E1H(bp)) + return IS_E1H_ONLINE(reg_info->info); + else if (CHIP_IS_E2(bp)) + return IS_E2_ONLINE(reg_info->info); + else if (CHIP_IS_E3A0(bp)) + return IS_E3_ONLINE(reg_info->info); + else if (CHIP_IS_E3B0(bp)) + return IS_E3B0_ONLINE(reg_info->info); + else + return false; +} + +/******* Paged registers info selectors ********/ +static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_vals_e2; + else if (CHIP_IS_E3(bp)) + return page_vals_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_MODE_VALUES_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_MODE_VALUES_E3; + else + return 0; +} + +static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_write_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_write_regs_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_WRITE_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_WRITE_REGS_E3; + else + return 0; +} + +static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return page_read_regs_e2; + else if (CHIP_IS_E3(bp)) + return page_read_regs_e3; + else + return NULL; +} + +static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) +{ + if (CHIP_IS_E2(bp)) + return PAGE_READ_REGS_E2; + else if (CHIP_IS_E3(bp)) + return PAGE_READ_REGS_E3; + else + return 0; +} + +static inline int __bnx2x_get_regs_len(struct bnx2x *bp) +{ + int num_pages = __bnx2x_get_page_reg_num(bp); + int page_write_num = __bnx2x_get_page_write_num(bp); + const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp); + int page_read_num = __bnx2x_get_page_read_num(bp); + int regdump_len = 0; + int i, j, k; + + for (i = 0; i < REGS_COUNT; i++) + if (bnx2x_is_reg_online(bp, ®_addrs[i])) + regdump_len += reg_addrs[i].size; + + for (i = 0; i < num_pages; i++) + for (j = 0; j < page_write_num; j++) + for (k = 0; k < page_read_num; k++) + if (bnx2x_is_reg_online(bp, &page_read_addr[k])) + regdump_len += page_read_addr[k].size; + + return regdump_len; +} static int bnx2x_get_regs_len(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); int regdump_len = 0; - int i, j, k; - if (CHIP_IS_E1(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1_ONLINE(reg_addrs[i].info)) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < WREGS_COUNT_E1; i++) - if (IS_E1_ONLINE(wreg_addrs_e1[i].info)) - regdump_len += wreg_addrs_e1[i].size * - (1 + wreg_addrs_e1[i].read_regs_count); - - } else if (CHIP_IS_E1H(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1H_ONLINE(reg_addrs[i].info)) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < WREGS_COUNT_E1H; i++) - if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info)) - regdump_len += wreg_addrs_e1h[i].size * - (1 + wreg_addrs_e1h[i].read_regs_count); - } else if (CHIP_IS_E2(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E2_ONLINE(reg_addrs[i].info)) - regdump_len += reg_addrs[i].size; - - for (i = 0; i < WREGS_COUNT_E2; i++) - if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) - regdump_len += wreg_addrs_e2[i].size * - (1 + wreg_addrs_e2[i].read_regs_count); - - for (i = 0; i < PAGE_MODE_VALUES_E2; i++) - for (j = 0; j < PAGE_WRITE_REGS_E2; j++) { - for (k = 0; k < PAGE_READ_REGS_E2; k++) - if (IS_E2_ONLINE(page_read_regs_e2[k]. - info)) - regdump_len += - page_read_regs_e2[k].size; - } - } + regdump_len = __bnx2x_get_regs_len(bp); regdump_len *= 4; regdump_len += sizeof(struct dump_hdr); return regdump_len; } -static inline void bnx2x_read_pages_regs_e2(struct bnx2x *bp, u32 *p) +/** + * bnx2x_read_pages_regs - read "paged" registers + * + * @bp device handle + * @p output buffer + * + * Reads "paged" memories: memories that may only be read by first writing to a + * specific address ("write address") and then reading from a specific address + * ("read address"). There may be more than one write address per "page" and + * more than one read address per write address. + */ +static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) { u32 i, j, k, n; - - for (i = 0; i < PAGE_MODE_VALUES_E2; i++) { - for (j = 0; j < PAGE_WRITE_REGS_E2; j++) { - REG_WR(bp, page_write_regs_e2[j], page_vals_e2[i]); - for (k = 0; k < PAGE_READ_REGS_E2; k++) - if (IS_E2_ONLINE(page_read_regs_e2[k].info)) + /* addresses of the paged registers */ + const u32 *page_addr = __bnx2x_get_page_addr_ar(bp); + /* number of paged registers */ + int num_pages = __bnx2x_get_page_reg_num(bp); + /* write addresses */ + const u32 *write_addr = __bnx2x_get_page_write_ar(bp); + /* number of write addresses */ + int write_num = __bnx2x_get_page_write_num(bp); + /* read addresses info */ + const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp); + /* number of read addresses */ + int read_num = __bnx2x_get_page_read_num(bp); + + for (i = 0; i < num_pages; i++) { + for (j = 0; j < write_num; j++) { + REG_WR(bp, write_addr[j], page_addr[i]); + for (k = 0; k < read_num; k++) + if (bnx2x_is_reg_online(bp, &read_addr[k])) for (n = 0; n < - page_read_regs_e2[k].size; n++) + read_addr[k].size; n++) *p++ = REG_RD(bp, - page_read_regs_e2[k].addr + n*4); + read_addr[k].addr + n*4); } } } +static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) +{ + u32 i, j; + + /* Read the regular registers */ + for (i = 0; i < REGS_COUNT; i++) + if (bnx2x_is_reg_online(bp, ®_addrs[i])) + for (j = 0; j < reg_addrs[i].size; j++) + *p++ = REG_RD(bp, reg_addrs[i].addr + j*4); + + /* Read "paged" registes */ + bnx2x_read_pages_regs(bp, p); +} + static void bnx2x_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { - u32 *p = _p, i, j; + u32 *p = _p; struct bnx2x *bp = netdev_priv(dev); struct dump_hdr dump_hdr = {0}; @@ -566,44 +699,21 @@ static void bnx2x_get_regs(struct net_device *dev, dump_hdr.info = RI_E1_ONLINE; else if (CHIP_IS_E1H(bp)) dump_hdr.info = RI_E1H_ONLINE; - else if (CHIP_IS_E2(bp)) + else if (!CHIP_IS_E1x(bp)) dump_hdr.info = RI_E2_ONLINE | (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP); memcpy(p, &dump_hdr, sizeof(struct dump_hdr)); p += dump_hdr.hdr_size + 1; - if (CHIP_IS_E1(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1_ONLINE(reg_addrs[i].info)) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, - reg_addrs[i].addr + j*4); - - } else if (CHIP_IS_E1H(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E1H_ONLINE(reg_addrs[i].info)) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, - reg_addrs[i].addr + j*4); - - } else if (CHIP_IS_E2(bp)) { - for (i = 0; i < REGS_COUNT; i++) - if (IS_E2_ONLINE(reg_addrs[i].info)) - for (j = 0; j < reg_addrs[i].size; j++) - *p++ = REG_RD(bp, - reg_addrs[i].addr + j*4); - - bnx2x_read_pages_regs_e2(bp, p); - } + /* Actually read the registers */ + __bnx2x_get_regs(bp, p); + /* Re-enable parity attentions */ bnx2x_clear_blocks_parity(bp); - if (CHIP_PARITY_ENABLED(bp)) - bnx2x_enable_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); } -#define PHY_FW_VER_LEN 20 - static void bnx2x_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -682,8 +792,12 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level) { struct bnx2x *bp = netdev_priv(dev); - if (capable(CAP_NET_ADMIN)) + if (capable(CAP_NET_ADMIN)) { + /* dump MCP trace */ + if (level & BNX2X_MSG_MCP) + bnx2x_fw_dump_lvl(bp, KERN_INFO); bp->msg_enable = level; + } } static int bnx2x_nway_reset(struct net_device *dev) @@ -725,7 +839,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) u32 val = 0; /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; + count = BNX2X_NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(bp)) count *= 100; @@ -756,7 +870,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp) u32 val = 0; /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; + count = BNX2X_NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(bp)) count *= 100; @@ -824,7 +938,7 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val, REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; + count = BNX2X_NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(bp)) count *= 100; @@ -947,7 +1061,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ - count = NVRAM_TIMEOUT_COUNT; + count = BNX2X_NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(bp)) count *= 100; @@ -1051,9 +1165,9 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(u32))) cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) + else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0) cmd_flags |= MCPR_NVM_COMMAND_LAST; - else if ((offset % NVRAM_PAGE_SIZE) == 0) + else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0) cmd_flags |= MCPR_NVM_COMMAND_FIRST; memcpy(&val, data_buf, 4); @@ -1212,7 +1326,6 @@ static int bnx2x_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { struct bnx2x *bp = netdev_priv(dev); - int rc = 0; if (bp->recovery_state != BNX2X_RECOVERY_DONE) { printk(KERN_ERR "Handling parity error recovery. Try again later\n"); @@ -1229,12 +1342,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, bp->rx_ring_size = ering->rx_pending; bp->tx_ring_size = ering->tx_pending; - if (netif_running(dev)) { - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - } - - return rc; + return bnx2x_reload_if_running(dev); } static void bnx2x_get_pauseparam(struct net_device *dev, @@ -1313,60 +1421,129 @@ static const struct { { "idle check (online)" } }; +enum { + BNX2X_CHIP_E1_OFST = 0, + BNX2X_CHIP_E1H_OFST, + BNX2X_CHIP_E2_OFST, + BNX2X_CHIP_E3_OFST, + BNX2X_CHIP_E3B0_OFST, + BNX2X_CHIP_MAX_OFST +}; + +#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST) +#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST) +#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST) +#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST) +#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST) + +#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1) +#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H) + static int bnx2x_test_registers(struct bnx2x *bp) { int idx, i, rc = -ENODEV; - u32 wr_val = 0; + u32 wr_val = 0, hw; int port = BP_PORT(bp); static const struct { + u32 hw; u32 offset0; u32 offset1; u32 mask; } reg_tbl[] = { -/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, - { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, - { HC_REG_AGG_INT_0, 4, 0x000003ff }, - { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, - { PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, - { PRS_REG_CID_PORT_0, 4, 0x00ffffff }, - { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, - { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, - { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, - { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, -/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, - { QM_REG_CONNNUM_0, 4, 0x000fffff }, - { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, - { SRC_REG_KEYRSS0_0, 40, 0xffffffff }, - { SRC_REG_KEYRSS0_7, 40, 0xffffffff }, - { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, - { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, - { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, - { NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, - { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, -/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, - { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, - { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, - { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, - { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, - { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, - { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, - { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, - { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, - { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, -/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, - { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, - { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, - { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 }, - { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, - { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, - { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, - - { 0xffffffff, 0, 0x00000000 } +/* 0 */ { BNX2X_CHIP_MASK_ALL, + BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + DORQ_REG_DB_ADDR0, 4, 0xffffffff }, + { BNX2X_CHIP_MASK_E1X, + HC_REG_AGG_INT_0, 4, 0x000003ff }, + { BNX2X_CHIP_MASK_ALL, + PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3, + PBF_REG_P0_INIT_CRD, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_E3B0, + PBF_REG_INIT_CRD_Q0, 4, 0x000007ff }, + { BNX2X_CHIP_MASK_ALL, + PRS_REG_CID_PORT_0, 4, 0x00ffffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff }, +/* 10 */ { BNX2X_CHIP_MASK_ALL, + PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + QM_REG_CONNNUM_0, 4, 0x000fffff }, + { BNX2X_CHIP_MASK_ALL, + TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_0, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + SRC_REG_KEYRSS0_7, 40, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 }, + { BNX2X_CHIP_MASK_ALL, + XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_T_BIT, 4, 0x00000001 }, +/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_EMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_BMAC0_IN_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_XCM0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_BRB0_OUT_EN, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 }, +/* 30 */ { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 }, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001}, + { BNX2X_CHIP_MASK_ALL, + NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 }, + { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2, + NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f }, + + { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 } }; if (!netif_running(bp->dev)) return rc; + if (CHIP_IS_E1(bp)) + hw = BNX2X_CHIP_MASK_E1; + else if (CHIP_IS_E1H(bp)) + hw = BNX2X_CHIP_MASK_E1H; + else if (CHIP_IS_E2(bp)) + hw = BNX2X_CHIP_MASK_E2; + else if (CHIP_IS_E3B0(bp)) + hw = BNX2X_CHIP_MASK_E3B0; + else /* e3 A0 */ + hw = BNX2X_CHIP_MASK_E3; + /* Repeat the test twice: First by writing 0x00000000, second by writing 0xffffffff */ for (idx = 0; idx < 2; idx++) { @@ -1382,8 +1559,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { u32 offset, mask, save_val, val; - if (CHIP_IS_E2(bp) && - reg_tbl[i].offset0 == HC_REG_AGG_INT_0) + if (!(hw & reg_tbl[i].hw)) continue; offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; @@ -1400,7 +1576,7 @@ static int bnx2x_test_registers(struct bnx2x *bp) /* verify value is as expected */ if ((val & mask) != (wr_val & mask)) { - DP(NETIF_MSG_PROBE, + DP(NETIF_MSG_HW, "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n", offset, val, wr_val, mask); goto test_reg_exit; @@ -1417,7 +1593,7 @@ test_reg_exit: static int bnx2x_test_memory(struct bnx2x *bp) { int i, j, rc = -ENODEV; - u32 val; + u32 val, index; static const struct { u32 offset; int size; @@ -1432,32 +1608,44 @@ static int bnx2x_test_memory(struct bnx2x *bp) { 0xffffffff, 0 } }; + static const struct { char *name; u32 offset; - u32 e1_mask; - u32 e1h_mask; - u32 e2_mask; + u32 hw_mask[BNX2X_CHIP_MAX_OFST]; } prty_tbl[] = { - { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0, 0 }, - { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2, 0 }, - { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0, 0 }, - { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0, 0 }, - { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0, 0 }, - { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0, 0 }, - - { NULL, 0xffffffff, 0, 0, 0 } + { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, + {0x2, 0x2, 0, 0} }, + { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, + {0, 0, 0, 0} }, + { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, + {0x3ffc0, 0, 0, 0} }, + { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, + {0x3ffc1, 0, 0, 0} }, + + { NULL, 0xffffffff, {0, 0, 0, 0} } }; if (!netif_running(bp->dev)) return rc; + if (CHIP_IS_E1(bp)) + index = BNX2X_CHIP_E1_OFST; + else if (CHIP_IS_E1H(bp)) + index = BNX2X_CHIP_E1H_OFST; + else if (CHIP_IS_E2(bp)) + index = BNX2X_CHIP_E2_OFST; + else /* e3 */ + index = BNX2X_CHIP_E3_OFST; + /* pre-Check the parity status */ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); - if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || - (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || - (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { + if (val & ~(prty_tbl[i].hw_mask[index])) { DP(NETIF_MSG_HW, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; @@ -1472,9 +1660,7 @@ static int bnx2x_test_memory(struct bnx2x *bp) /* Check the parity status */ for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { val = REG_RD(bp, prty_tbl[i].offset); - if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) || - (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask))) || - (CHIP_IS_E2(bp) && (val & ~(prty_tbl[i].e2_mask)))) { + if (val & ~(prty_tbl[i].hw_mask[index])) { DP(NETIF_MSG_HW, "%s is 0x%x\n", prty_tbl[i].name, val); goto test_mem_exit; @@ -1491,28 +1677,33 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) { int cnt = 1400; - if (link_up) + if (link_up) { while (bnx2x_link_test(bp, is_serdes) && cnt--) - msleep(10); + msleep(20); + + if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) + DP(NETIF_MSG_LINK, "Timeout waiting for link up\n"); + } } -static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) +static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) { unsigned int pkt_size, num_pkts, i; struct sk_buff *skb; unsigned char *packet; struct bnx2x_fastpath *fp_rx = &bp->fp[0]; struct bnx2x_fastpath *fp_tx = &bp->fp[0]; + struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; - u16 pkt_prod, bd_prod; + u16 pkt_prod, bd_prod, rx_comp_cons; struct sw_tx_bd *tx_buf; struct eth_tx_start_bd *tx_start_bd; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; dma_addr_t mapping; union eth_rx_cqe *cqe; - u8 cqe_fp_flags; + u8 cqe_fp_flags, cqe_fp_type; struct sw_rx_bd *rx_buf; u16 len; int rc = -ENODEV; @@ -1524,7 +1715,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) return -EINVAL; break; case BNX2X_MAC_LOOPBACK: - bp->link_params.loopback_mode = LOOPBACK_BMAC; + bp->link_params.loopback_mode = CHIP_IS_E3(bp) ? + LOOPBACK_XMAC : LOOPBACK_BMAC; bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; default: @@ -1545,22 +1737,28 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN)); for (i = ETH_HLEN; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); + mapping = dma_map_single(&bp->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + rc = -ENOMEM; + dev_kfree_skb(skb); + BNX2X_ERR("Unable to map SKB\n"); + goto test_loopback_exit; + } /* send the loopback packet */ num_pkts = 0; - tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb); + tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb); rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb); - pkt_prod = fp_tx->tx_pkt_prod++; - tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)]; - tx_buf->first_bd = fp_tx->tx_bd_prod; + pkt_prod = txdata->tx_pkt_prod++; + tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)]; + tx_buf->first_bd = txdata->tx_bd_prod; tx_buf->skb = skb; tx_buf->flags = 0; - bd_prod = TX_BD(fp_tx->tx_bd_prod); - tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; - mapping = dma_map_single(&bp->pdev->dev, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); + bd_prod = TX_BD(txdata->tx_bd_prod); + tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd; tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ @@ -1577,26 +1775,27 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); - pbd_e1x = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e1x; - pbd_e2 = &fp_tx->tx_desc_ring[bd_prod].parse_bd_e2; + pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x; + pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); wmb(); - fp_tx->tx_db.data.prod += 2; + txdata->tx_db.data.prod += 2; barrier(); - DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw); + DOORBELL(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); + barrier(); num_pkts++; - fp_tx->tx_bd_prod += 2; /* start + pbd */ + txdata->tx_bd_prod += 2; /* start + pbd */ udelay(100); - tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb); + tx_idx = le16_to_cpu(*txdata->tx_cons_sb); if (tx_idx != tx_start_idx + num_pkts) goto test_loopback_exit; @@ -1610,7 +1809,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) * bnx2x_tx_int()), as both are taking netif_tx_lock(). */ local_bh_disable(); - bnx2x_tx_int(fp_tx); + bnx2x_tx_int(bp, txdata); local_bh_enable(); } @@ -1618,9 +1817,11 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) if (rx_idx != rx_start_idx + num_pkts) goto test_loopback_exit; - cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; + rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons); + cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)]; cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; - if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) + cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; + if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) goto test_loopback_rx_exit; len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); @@ -1628,6 +1829,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) goto test_loopback_rx_exit; rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)]; + dma_sync_single_for_cpu(&bp->pdev->dev, + dma_unmap_addr(rx_buf, mapping), + fp_rx->rx_buf_size, DMA_FROM_DEVICE); skb = rx_buf->skb; skb_reserve(skb, cqe->fast_path_cqe.placement_offset); for (i = ETH_HLEN; i < pkt_size; i++) @@ -1653,7 +1857,7 @@ test_loopback_exit: return rc; } -static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) +static int bnx2x_test_loopback(struct bnx2x *bp) { int rc = 0, res; @@ -1666,13 +1870,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up) bnx2x_netif_stop(bp, 1); bnx2x_acquire_phy_lock(bp); - res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up); + res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK); if (res) { DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res); rc |= BNX2X_PHY_LOOPBACK_FAILED; } - res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up); + res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK); if (res) { DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res); rc |= BNX2X_MAC_LOOPBACK_FAILED; @@ -1744,39 +1948,20 @@ test_nvram_exit: return rc; } +/* Send an EMPTY ramrod on the first queue */ static int bnx2x_test_intr(struct bnx2x *bp) { - struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); - int i, rc; + struct bnx2x_queue_state_params params = {0}; if (!netif_running(bp->dev)) return -ENODEV; - config->hdr.length = 0; - if (CHIP_IS_E1(bp)) - config->hdr.offset = (BP_PORT(bp) ? 32 : 0); - else - config->hdr.offset = BP_FUNC(bp); - config->hdr.client_id = bp->fp->cl_id; - config->hdr.reserved1 = 0; - - bp->set_mac_pending = 1; - smp_wmb(); - rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mac_config)), - U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); - if (rc == 0) { - for (i = 0; i < 10; i++) { - if (!bp->set_mac_pending) - break; - smp_rmb(); - msleep_interruptible(10); - } - if (i == 10) - rc = -ENODEV; - } + params.q_obj = &bp->fp->q_obj; + params.cmd = BNX2X_Q_CMD_EMPTY; - return rc; + __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); + + return bnx2x_queue_state_change(bp, ¶ms); } static void bnx2x_self_test(struct net_device *dev, @@ -1815,7 +2000,7 @@ static void bnx2x_self_test(struct net_device *dev, bnx2x_nic_unload(bp, UNLOAD_NORMAL); bnx2x_nic_load(bp, LOAD_DIAG); /* wait until link state is restored */ - bnx2x_wait_for_link(bp, link_up, is_serdes); + bnx2x_wait_for_link(bp, 1, is_serdes); if (bnx2x_test_registers(bp) != 0) { buf[0] = 1; @@ -1826,7 +2011,7 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; } - buf[2] = bnx2x_test_loopback(bp, link_up); + buf[2] = bnx2x_test_loopback(bp); if (buf[2] != 0) etest->flags |= ETH_TEST_FL_FAILED; @@ -1864,6 +2049,14 @@ static void bnx2x_self_test(struct net_device *dev, #define IS_MF_MODE_STAT(bp) \ (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) +/* ethtool statistics are displayed for all regular ethernet queues and the + * fcoe L2 queue if not disabled + */ +static inline int bnx2x_num_stat_queues(struct bnx2x *bp) +{ + return BNX2X_NUM_ETH_QUEUES(bp); +} + static int bnx2x_get_sset_count(struct net_device *dev, int stringset) { struct bnx2x *bp = netdev_priv(dev); @@ -1872,7 +2065,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) switch (stringset) { case ETH_SS_STATS: if (is_multi(bp)) { - num_stats = BNX2X_NUM_STAT_QUEUES(bp) * + num_stats = bnx2x_num_stat_queues(bp) * BNX2X_NUM_Q_STATS; if (!IS_MF_MODE_STAT(bp)) num_stats += BNX2X_NUM_STATS; @@ -1905,14 +2098,9 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) case ETH_SS_STATS: if (is_multi(bp)) { k = 0; - for_each_napi_queue(bp, i) { + for_each_eth_queue(bp, i) { memset(queue_name, 0, sizeof(queue_name)); - - if (IS_FCOE_IDX(i)) - sprintf(queue_name, "fcoe"); - else - sprintf(queue_name, "%d", i); - + sprintf(queue_name, "%d", i); for (j = 0; j < BNX2X_NUM_Q_STATS; j++) snprintf(buf + (k + j)*ETH_GSTRING_LEN, ETH_GSTRING_LEN, @@ -1951,7 +2139,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, if (is_multi(bp)) { k = 0; - for_each_napi_queue(bp, i) { + for_each_eth_queue(bp, i) { hw_stats = (u32 *)&bp->fp[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { if (bnx2x_q_stats_arr[j].size == 0) { @@ -2069,14 +2257,30 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); size_t copy_size = - min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE); + min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE); + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + size_t i; if (bp->multi_mode == ETH_RSS_MODE_DISABLED) return -EOPNOTSUPP; - indir->size = TSTORM_INDIRECTION_TABLE_SIZE; - memcpy(indir->ring_index, bp->rx_indir_table, - copy_size * sizeof(bp->rx_indir_table[0])); + /* Get the current configuration of the RSS indirection table */ + bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table); + + /* + * We can't use a memcpy() as an internal storage of an + * indirection table is a u8 array while indir->ring_index + * points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the returned table to the Client ID of the leading RSS + * queue. + */ + for (i = 0; i < copy_size; i++) + indir->ring_index[i] = ind_table[i] - bp->fp->cl_id; + + indir->size = T_ETH_INDIRECTION_TABLE_SIZE; + return 0; } @@ -2085,21 +2289,33 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); size_t i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; + u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); if (bp->multi_mode == ETH_RSS_MODE_DISABLED) return -EOPNOTSUPP; - /* Validate size and indices */ - if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE) + /* validate the size */ + if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE) return -EINVAL; - for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) - if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp)) + + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + /* validate the indices */ + if (indir->ring_index[i] >= num_eth_queues) return -EINVAL; + /* + * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy() + * as an internal storage of an indirection table is a u8 array + * while indir->ring_index points to an array of u32. + * + * Indirection table contains the FW Client IDs, so we need to + * align the received table to the Client ID of the leading RSS + * queue + */ + ind_table[i] = indir->ring_index[i] + bp->fp->cl_id; + } - memcpy(bp->rx_indir_table, indir->ring_index, - indir->size * sizeof(bp->rx_indir_table[0])); - bnx2x_push_indir_table(bp); - return 0; + return bnx2x_config_rss_pf(bp, ind_table, false); } static const struct ethtool_ops bnx2x_ethtool_ops = { diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h index 9fe367836a5..998652a1b85 100644 --- a/drivers/net/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x/bnx2x_fw_defs.h @@ -10,249 +10,221 @@ #ifndef BNX2X_FW_DEFS_H #define BNX2X_FW_DEFS_H -#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[142].base) +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[141].base + ((assertListEntry) * IRO[141].m1)) -#define CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ - (IRO[144].base + ((pfId) * IRO[144].m1)) + (IRO[147].base + ((assertListEntry) * IRO[147].m1)) #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ - (IRO[149].base + (((pfId)>>1) * IRO[149].m1) + (((pfId)&1) * \ - IRO[149].m2)) + (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ + IRO[153].m2)) #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ - (IRO[150].base + (((pfId)>>1) * IRO[150].m1) + (((pfId)&1) * \ - IRO[150].m2)) + (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ + IRO[154].m2)) #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ - (IRO[156].base + ((funcId) * IRO[156].m1)) + (IRO[159].base + ((funcId) * IRO[159].m1)) #define CSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[146].base + ((funcId) * IRO[146].m1)) -#define CSTORM_FUNCTION_MODE_OFFSET (IRO[153].base) -#define CSTORM_IGU_MODE_OFFSET (IRO[154].base) + (IRO[149].base + ((funcId) * IRO[149].m1)) +#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[311].base + ((pfId) * IRO[311].m1)) + (IRO[315].base + ((pfId) * IRO[315].m1)) #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[312].base + ((pfId) * IRO[312].m1)) - #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[304].base + ((pfId) * IRO[304].m1) + ((iscsiEqId) * \ - IRO[304].m2)) - #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[306].base + ((pfId) * IRO[306].m1) + ((iscsiEqId) * \ - IRO[306].m2)) - #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[305].base + ((pfId) * IRO[305].m1) + ((iscsiEqId) * \ - IRO[305].m2)) - #define \ - CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * \ - IRO[307].m2)) - #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[303].base + ((pfId) * IRO[303].m1) + ((iscsiEqId) * \ - IRO[303].m2)) - #define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * \ - IRO[309].m2)) - #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * \ - IRO[308].m2)) + (IRO[316].base + ((pfId) * IRO[316].m1)) +#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ + (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) +#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ + (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) +#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ + (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) +#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ + (IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) +#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ + (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[310].base + ((pfId) * IRO[310].m1)) + (IRO[314].base + ((pfId) * IRO[314].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) -#define CSTORM_PATH_ID_OFFSET (IRO[159].base) + (IRO[304].base + ((pfId) * IRO[304].m1)) +#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[151].base + ((funcId) * IRO[151].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ - (IRO[137].base + ((pfId) * IRO[137].m1)) + (IRO[142].base + ((pfId) * IRO[142].m1)) +#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ + (IRO[143].base + ((pfId) * IRO[143].m1)) #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ - (IRO[136].base + ((pfId) * IRO[136].m1)) -#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[136].size) + (IRO[141].base + ((pfId) * IRO[141].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ - (IRO[138].base + ((pfId) * IRO[138].m1)) -#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[138].size) -#define CSTORM_STATS_FLAGS_OFFSET(pfId) \ - (IRO[143].base + ((pfId) * IRO[143].m1)) + (IRO[144].base + ((pfId) * IRO[144].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) +#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ + (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ - (IRO[129].base + ((sbId) * IRO[129].m1)) + (IRO[133].base + ((sbId) * IRO[133].m1)) +#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ + (IRO[134].base + ((sbId) * IRO[134].m1)) +#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ + (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ - (IRO[128].base + ((sbId) * IRO[128].m1)) -#define CSTORM_STATUS_BLOCK_SIZE (IRO[128].size) -#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ (IRO[132].base + ((sbId) * IRO[132].m1)) -#define CSTORM_SYNC_BLOCK_SIZE (IRO[132].size) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) +#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ + (IRO[137].base + ((sbId) * IRO[137].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ - (IRO[151].base + ((vfId) * IRO[151].m1)) + (IRO[155].base + ((vfId) * IRO[155].m1)) #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ - (IRO[152].base + ((vfId) * IRO[152].m1)) + (IRO[156].base + ((vfId) * IRO[156].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[147].base + ((funcId) * IRO[147].m1)) -#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[199].base) + (IRO[150].base + ((funcId) * IRO[150].m1)) +#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ - (IRO[198].base + ((pfId) * IRO[198].m1)) -#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[99].base) + (IRO[203].base + ((pfId) * IRO[203].m1)) +#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[98].base + ((assertListEntry) * IRO[98].m1)) - #define TSTORM_CLIENT_CONFIG_OFFSET(portId, clientId) \ - (IRO[197].base + ((portId) * IRO[197].m1) + ((clientId) * \ - IRO[197].m2)) -#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[104].base) + (IRO[101].base + ((assertListEntry) * IRO[101].m1)) +#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base) #define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \ - (IRO[105].base) -#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ - (IRO[96].base + ((pfId) * IRO[96].m1)) -#define TSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[101].base + ((funcId) * IRO[101].m1)) + (IRO[108].base) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ - (IRO[195].base + ((pfId) * IRO[195].m1)) -#define TSTORM_FUNCTION_MODE_OFFSET (IRO[103].base) -#define TSTORM_INDIRECTION_TABLE_OFFSET(pfId) \ - (IRO[91].base + ((pfId) * IRO[91].m1)) -#define TSTORM_INDIRECTION_TABLE_SIZE (IRO[91].size) - #define \ - TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfId, iscsiConBufPblEntry) \ - (IRO[260].base + ((pfId) * IRO[260].m1) + ((iscsiConBufPblEntry) \ - * IRO[260].m2)) + (IRO[201].base + ((pfId) * IRO[201].m1)) +#define TSTORM_FUNC_EN_OFFSET(funcId) \ + (IRO[103].base + ((funcId) * IRO[103].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[264].base + ((pfId) * IRO[264].m1)) + (IRO[271].base + ((pfId) * IRO[271].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ - (IRO[265].base + ((pfId) * IRO[265].m1)) + (IRO[272].base + ((pfId) * IRO[272].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ - (IRO[266].base + ((pfId) * IRO[266].m1)) + (IRO[273].base + ((pfId) * IRO[273].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) + (IRO[274].base + ((pfId) * IRO[274].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[263].base + ((pfId) * IRO[263].m1)) + (IRO[270].base + ((pfId) * IRO[270].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[262].base + ((pfId) * IRO[262].m1)) + (IRO[269].base + ((pfId) * IRO[269].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[261].base + ((pfId) * IRO[261].m1)) + (IRO[268].base + ((pfId) * IRO[268].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[259].base + ((pfId) * IRO[259].m1)) + (IRO[267].base + ((pfId) * IRO[267].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) + (IRO[276].base + ((pfId) * IRO[276].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[256].base + ((pfId) * IRO[256].m1)) + (IRO[263].base + ((pfId) * IRO[263].m1)) #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[257].base + ((pfId) * IRO[257].m1)) + (IRO[264].base + ((pfId) * IRO[264].m1)) +#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ + (IRO[265].base + ((pfId) * IRO[265].m1)) #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[258].base + ((pfId) * IRO[258].m1)) + (IRO[266].base + ((pfId) * IRO[266].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ - (IRO[196].base + ((pfId) * IRO[196].m1)) - #define TSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, tStatCntId) \ - (IRO[100].base + ((portId) * IRO[100].m1) + ((tStatCntId) * \ - IRO[100].m2)) -#define TSTORM_STATS_FLAGS_OFFSET(pfId) \ - (IRO[95].base + ((pfId) * IRO[95].m1)) + (IRO[202].base + ((pfId) * IRO[202].m1)) +#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[105].base + ((funcId) * IRO[105].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[211].base + ((pfId) * IRO[211].m1)) + (IRO[216].base + ((pfId) * IRO[216].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[102].base + ((funcId) * IRO[102].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[201].base) -#define USTORM_AGG_DATA_SIZE (IRO[201].size) -#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[170].base) + (IRO[104].base + ((funcId) * IRO[104].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[206].base) +#define USTORM_AGG_DATA_SIZE (IRO[206].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[169].base + ((assertListEntry) * IRO[169].m1)) + (IRO[176].base + ((assertListEntry) * IRO[176].m1)) +#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \ + (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \ + IRO[205].m2)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ - (IRO[178].base + ((portId) * IRO[178].m1)) -#define USTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ - (IRO[172].base + ((pfId) * IRO[172].m1)) + (IRO[183].base + ((portId) * IRO[183].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[313].base + ((pfId) * IRO[313].m1)) + (IRO[317].base + ((pfId) * IRO[317].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[174].base + ((funcId) * IRO[174].m1)) -#define USTORM_FUNCTION_MODE_OFFSET (IRO[177].base) + (IRO[178].base + ((funcId) * IRO[178].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[277].base + ((pfId) * IRO[277].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) -#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ (IRO[282].base + ((pfId) * IRO[282].m1)) +#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[283].base + ((pfId) * IRO[283].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[275].base + ((pfId) * IRO[275].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[273].base + ((pfId) * IRO[273].m1)) + (IRO[277].base + ((pfId) * IRO[277].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[276].base + ((pfId) * IRO[276].m1)) -#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ (IRO[280].base + ((pfId) * IRO[280].m1)) +#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ + (IRO[284].base + ((pfId) * IRO[284].m1)) #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ - (IRO[176].base + ((pfId) * IRO[176].m1)) - #define USTORM_PER_COUNTER_ID_STATS_OFFSET(portId, uStatCntId) \ - (IRO[173].base + ((portId) * IRO[173].m1) + ((uStatCntId) * \ - IRO[173].m2)) - #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[204].base + ((portId) * IRO[204].m1) + ((clientId) * \ - IRO[204].m2)) + (IRO[182].base + ((pfId) * IRO[182].m1)) +#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[180].base + ((funcId) * IRO[180].m1)) +#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ + (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ + IRO[209].m2)) #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[205].base + ((qzoneId) * IRO[205].m1)) -#define USTORM_STATS_FLAGS_OFFSET(pfId) \ - (IRO[171].base + ((pfId) * IRO[171].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[202].base) -#define USTORM_TPA_BTR_SIZE (IRO[202].size) + (IRO[210].base + ((qzoneId) * IRO[210].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[207].base) +#define USTORM_TPA_BTR_SIZE (IRO[207].size) #define USTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[175].base + ((funcId) * IRO[175].m1)) -#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[59].base) -#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[58].base) -#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[54].base) + (IRO[179].base + ((funcId) * IRO[179].m1)) +#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) +#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) +#define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) #define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[53].base + ((assertListEntry) * IRO[53].m1)) + (IRO[50].base + ((assertListEntry) * IRO[50].m1)) #define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \ - (IRO[47].base + ((portId) * IRO[47].m1)) -#define XSTORM_E1HOV_OFFSET(pfId) \ - (IRO[55].base + ((pfId) * IRO[55].m1)) -#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(pfId) \ - (IRO[45].base + ((pfId) * IRO[45].m1)) + (IRO[43].base + ((portId) * IRO[43].m1)) #define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \ - (IRO[49].base + ((pfId) * IRO[49].m1)) + (IRO[45].base + ((pfId) * IRO[45].m1)) #define XSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[51].base + ((funcId) * IRO[51].m1)) -#define XSTORM_FUNCTION_MODE_OFFSET (IRO[56].base) + (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) -#define XSTORM_PATH_ID_OFFSET (IRO[65].base) - #define XSTORM_PER_COUNTER_ID_STATS_OFFSET(portId, xStatCntId) \ - (IRO[50].base + ((portId) * IRO[50].m1) + ((xStatCntId) * \ - IRO[50].m2)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ - (IRO[48].base + ((pfId) * IRO[48].m1)) + (IRO[44].base + ((pfId) * IRO[44].m1)) +#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ + (IRO[49].base + ((funcId) * IRO[49].m1)) #define XSTORM_SPQ_DATA_OFFSET(funcId) \ (IRO[32].base + ((funcId) * IRO[32].m1)) #define XSTORM_SPQ_DATA_SIZE (IRO[32].size) @@ -260,42 +232,37 @@ (IRO[30].base + ((funcId) * IRO[30].m1)) #define XSTORM_SPQ_PROD_OFFSET(funcId) \ (IRO[31].base + ((funcId) * IRO[31].m1)) -#define XSTORM_STATS_FLAGS_OFFSET(pfId) \ - (IRO[43].base + ((pfId) * IRO[43].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[206].base + ((portId) * IRO[206].m1)) + (IRO[211].base + ((portId) * IRO[211].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ - (IRO[207].base + ((portId) * IRO[207].m1)) + (IRO[212].base + ((portId) * IRO[212].m1)) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[209].base + (((pfId)>>1) * IRO[209].m1) + (((pfId)&1) * \ - IRO[209].m2)) + (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ + IRO[214].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[52].base + ((funcId) * IRO[52].m1)) + (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 -/* RSS hash types */ -#define DEFAULT_HASH_TYPE 0 -#define IPV4_HASH_TYPE 1 -#define TCP_IPV4_HASH_TYPE 2 -#define IPV6_HASH_TYPE 3 -#define TCP_IPV6_HASH_TYPE 4 -#define VLAN_PRI_HASH_TYPE 5 -#define E1HOV_PRI_HASH_TYPE 6 -#define DSCP_HASH_TYPE 7 +/** +* This file defines HSI constants for the ETH flow +*/ +#ifdef _EVEREST_MICROCODE +#include "Microcode\Generated\DataTypes\eth_rx_bd.h" +#include "Microcode\Generated\DataTypes\eth_tx_bd.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe.h" +#include "Microcode\Generated\DataTypes\eth_rx_sge.h" +#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h" +#endif /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 -#define FIRST_BD_IN_PKT 0 +#define FIRST_BD_IN_PKT 0 #define PARSE_BD_INDEX 1 #define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8)) #define U_ETH_NUM_OF_SGES_TO_FETCH 8 #define U_ETH_MAX_SGES_FOR_PACKET 3 -/*Tx params*/ -#define X_ETH_NO_VLAN 0 -#define X_ETH_OUTBAND_VLAN 1 -#define X_ETH_INBAND_VLAN 2 /* Rx ring params */ #define U_ETH_LOCAL_BD_RING_SIZE 8 #define U_ETH_LOCAL_SGE_RING_SIZE 10 @@ -311,79 +278,64 @@ #define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8)) #define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8)) -#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) -#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) +#define U_ETH_BDS_PER_PAGE_MASK (U_ETH_BDS_PER_PAGE-1) +#define U_ETH_CQE_PER_PAGE_MASK (TU_ETH_CQES_PER_PAGE-1) #define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1) #define U_ETH_UNDEFINED_Q 0xFF -/* values of command IDs in the ramrod message */ -#define RAMROD_CMD_ID_ETH_UNUSED 0 -#define RAMROD_CMD_ID_ETH_CLIENT_SETUP 1 -#define RAMROD_CMD_ID_ETH_UPDATE 2 -#define RAMROD_CMD_ID_ETH_HALT 3 -#define RAMROD_CMD_ID_ETH_FORWARD_SETUP 4 -#define RAMROD_CMD_ID_ETH_ACTIVATE 5 -#define RAMROD_CMD_ID_ETH_DEACTIVATE 6 -#define RAMROD_CMD_ID_ETH_EMPTY 7 -#define RAMROD_CMD_ID_ETH_TERMINATE 8 - -/* command values for set mac command */ -#define T_ETH_MAC_COMMAND_SET 0 -#define T_ETH_MAC_COMMAND_INVALIDATE 1 - #define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY 10 +#define ETH_NUM_OF_RSS_ENGINES_E2 72 + +#define FILTER_RULES_COUNT 16 +#define MULTICAST_RULES_COUNT 16 +#define CLASSIFY_RULES_COUNT 16 /*The CRC32 seed, that is used for the hash(reduction) multicast address */ -#define T_ETH_CRC32_HASH_SEED 0x00000000 +#define ETH_CRC32_HASH_SEED 0x00000000 + +#define ETH_CRC32_HASH_BIT_SIZE (8) +#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1) /* Maximal L2 clients supported */ #define ETH_MAX_RX_CLIENTS_E1 18 #define ETH_MAX_RX_CLIENTS_E1H 28 +#define ETH_MAX_RX_CLIENTS_E2 152 + +/* Maximal statistics client Ids */ +#define MAX_STAT_COUNTER_ID_E1 36 +#define MAX_STAT_COUNTER_ID_E1H 56 +#define MAX_STAT_COUNTER_ID_E2 140 + +#define MAX_MAC_CREDIT_E1 192 /* Per Chip */ +#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */ +#define MAX_MAC_CREDIT_E2 272 /* Per Path */ +#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */ +#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */ +#define MAX_VLAN_CREDIT_E2 272 /* Per Path */ -#define MAX_STAT_COUNTER_ID ETH_MAX_RX_CLIENTS_E1H /* Maximal aggregation queues supported */ #define ETH_MAX_AGGREGATION_QUEUES_E1 32 -#define ETH_MAX_AGGREGATION_QUEUES_E1H 64 +#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64 -/* ETH RSS modes */ -#define ETH_RSS_MODE_DISABLED 0 -#define ETH_RSS_MODE_REGULAR 1 -#define ETH_RSS_MODE_VLAN_PRI 2 -#define ETH_RSS_MODE_E1HOV_PRI 3 -#define ETH_RSS_MODE_IP_DSCP 4 -#define ETH_RSS_MODE_E2_INTEG 5 +#define ETH_NUM_OF_MCAST_BINS 256 +#define ETH_NUM_OF_MCAST_ENGINES_E2 72 -/* ETH vlan filtering modes */ -#define ETH_VLAN_FILTER_ANY_VLAN 0 /* Don't filter by vlan */ -#define ETH_VLAN_FILTER_SPECIFIC_VLAN \ - 1 /* Only the vlan_id is allowed */ -#define ETH_VLAN_FILTER_CLASSIFY \ - 2 /* vlan will be added to CAM for classification */ +#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3) +#define ETH_MIN_RX_CQES_WITH_TPA_E1 \ + (ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA) +#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \ + (ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA) -/* Fast path CQE selection */ -#define ETH_FP_CQE_REGULAR 0 -#define ETH_FP_CQE_SGL 1 -#define ETH_FP_CQE_RAW 2 +#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 /** -* This file defines HSI constants common to all microcode flows -*/ - -/* Connection types */ -#define ETH_CONNECTION_TYPE 0 -#define TOE_CONNECTION_TYPE 1 -#define RDMA_CONNECTION_TYPE 2 -#define ISCSI_CONNECTION_TYPE 3 -#define FCOE_CONNECTION_TYPE 4 -#define RESERVED_CONNECTION_TYPE_0 5 -#define RESERVED_CONNECTION_TYPE_1 6 -#define RESERVED_CONNECTION_TYPE_2 7 -#define NONE_CONNECTION_TYPE 8 - + * This file defines HSI constants common to all microcode flows + */ #define PROTOCOL_STATE_BIT_OFFSET 6 @@ -391,25 +343,9 @@ #define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) #define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET) -/* values of command IDs in the ramrod message */ -#define RAMROD_CMD_ID_COMMON_FUNCTION_START 1 -#define RAMROD_CMD_ID_COMMON_FUNCTION_STOP 2 -#define RAMROD_CMD_ID_COMMON_CFC_DEL 3 -#define RAMROD_CMD_ID_COMMON_CFC_DEL_WB 4 -#define RAMROD_CMD_ID_COMMON_SET_MAC 5 -#define RAMROD_CMD_ID_COMMON_STAT_QUERY 6 -#define RAMROD_CMD_ID_COMMON_STOP_TRAFFIC 7 -#define RAMROD_CMD_ID_COMMON_START_TRAFFIC 8 - /* microcode fixed page page size 4K (chains and ring segments) */ #define MC_PAGE_SIZE 4096 - -/* Host coalescing constants */ -#define HC_IGU_BC_MODE 0 -#define HC_IGU_NBC_MODE 1 -/* Host coalescing constants. E1 includes E1H as well */ - /* Number of indices per slow-path SB */ #define HC_SP_SB_MAX_INDICES 16 @@ -418,30 +354,17 @@ #define HC_SB_MAX_INDICES_E2 8 #define HC_SB_MAX_SB_E1X 32 -#define HC_SB_MAX_SB_E2 136 +#define HC_SB_MAX_SB_E2 136 #define HC_SP_SB_ID 0xde -#define HC_REGULAR_SEGMENT 0 -#define HC_DEFAULT_SEGMENT 1 #define HC_SB_MAX_SM 2 #define HC_SB_MAX_DYNAMIC_INDICES 4 -#define HC_FUNCTION_DISABLED 0xff -/* used by the driver to get the SB offset */ -#define USTORM_ID 0 -#define CSTORM_ID 1 -#define XSTORM_ID 2 -#define TSTORM_ID 3 -#define ATTENTION_ID 4 /* max number of slow path commands per port */ #define MAX_RAMRODS_PER_PORT 8 -/* values for RX ETH CQE type field */ -#define RX_ETH_CQE_TYPE_ETH_FASTPATH 0 -#define RX_ETH_CQE_TYPE_ETH_RAMROD 1 - /**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ @@ -451,7 +374,7 @@ #define XSEMI_CLK1_RESUL_CHIP (1e-3) -#define SDM_TIMER_TICK_RESUL_CHIP (4*(1e-6)) +#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ @@ -460,72 +383,28 @@ #define FW_LOG_LIST_SIZE 50 -#define NUM_OF_PROTOCOLS 4 #define NUM_OF_SAFC_BITS 16 #define MAX_COS_NUMBER 4 - -#define FAIRNESS_COS_WRR_MODE 0 -#define FAIRNESS_COS_ETS_MODE 1 - - -/* Priority Flow Control (PFC) */ +#define MAX_TRAFFIC_TYPES 8 #define MAX_PFC_PRIORITIES 8 -#define MAX_PFC_TRAFFIC_TYPES 8 - -/* Available Traffic Types for Link Layer Flow Control */ -#define LLFC_TRAFFIC_TYPE_NW 0 -#define LLFC_TRAFFIC_TYPE_FCOE 1 -#define LLFC_TRAFFIC_TYPE_ISCSI 2 - /***************** START OF E2 INTEGRATION \ - CODE***************************************/ -#define LLFC_TRAFFIC_TYPE_NW_COS1_E2INTEG 3 - /***************** END OF E2 INTEGRATION \ - CODE***************************************/ -#define LLFC_TRAFFIC_TYPE_MAX 4 /* used by array traffic_type_to_priority[] to mark traffic type \ that is not mapped to priority*/ #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF -#define LLFC_MODE_NONE 0 -#define LLFC_MODE_PFC 1 -#define LLFC_MODE_SAFC 2 - -#define DCB_DISABLED 0 -#define DCB_ENABLED 1 -#define UNKNOWN_ADDRESS 0 -#define UNICAST_ADDRESS 1 -#define MULTICAST_ADDRESS 2 -#define BROADCAST_ADDRESS 3 +#define C_ERES_PER_PAGE \ + (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) +#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) -#define SINGLE_FUNCTION 0 -#define MULTI_FUNCTION_SD 1 -#define MULTI_FUNCTION_SI 2 +#define STATS_QUERY_CMD_COUNT 16 -#define IP_V4 0 -#define IP_V6 1 +#define NIV_LIST_TABLE_SIZE 4096 +#define INVALID_VNIC_ID 0xFF -#define C_ERES_PER_PAGE \ - (PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem))) -#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1) -#define EVENT_RING_OPCODE_VF_PF_CHANNEL 0 -#define EVENT_RING_OPCODE_FUNCTION_START 1 -#define EVENT_RING_OPCODE_FUNCTION_STOP 2 -#define EVENT_RING_OPCODE_CFC_DEL 3 -#define EVENT_RING_OPCODE_CFC_DEL_WB 4 -#define EVENT_RING_OPCODE_SET_MAC 5 -#define EVENT_RING_OPCODE_STAT_QUERY 6 -#define EVENT_RING_OPCODE_STOP_TRAFFIC 7 -#define EVENT_RING_OPCODE_START_TRAFFIC 8 -#define EVENT_RING_OPCODE_FORWARD_SETUP 9 - -#define VF_PF_CHANNEL_STATE_READY 0 -#define VF_PF_CHANNEL_STATE_WAITING_FOR_ACK 1 - -#define VF_PF_CHANNEL_STATE_MAX_NUMBER 2 +#define UNDEF_IRO 0x80000000 #endif /* BNX2X_FW_DEFS_H */ diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index cdf19fe7c7f..06727f32e50 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h @@ -11,7 +11,7 @@ #include "bnx2x_fw_defs.h" -#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e struct license_key { u32 reserved[6]; @@ -33,201 +33,366 @@ struct license_key { u32 reserved_b[4]; }; -#define PORT_0 0 -#define PORT_1 1 -#define PORT_MAX 2 + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_MAX 2 /**************************************************************************** - * Shared HW configuration * + * Shared HW configuration * ****************************************************************************/ -struct shared_hw_cfg { /* NVRAM Offset */ +#define PIN_CFG_NA 0x00000000 +#define PIN_CFG_GPIO0_P0 0x00000001 +#define PIN_CFG_GPIO1_P0 0x00000002 +#define PIN_CFG_GPIO2_P0 0x00000003 +#define PIN_CFG_GPIO3_P0 0x00000004 +#define PIN_CFG_GPIO0_P1 0x00000005 +#define PIN_CFG_GPIO1_P1 0x00000006 +#define PIN_CFG_GPIO2_P1 0x00000007 +#define PIN_CFG_GPIO3_P1 0x00000008 +#define PIN_CFG_EPIO0 0x00000009 +#define PIN_CFG_EPIO1 0x0000000a +#define PIN_CFG_EPIO2 0x0000000b +#define PIN_CFG_EPIO3 0x0000000c +#define PIN_CFG_EPIO4 0x0000000d +#define PIN_CFG_EPIO5 0x0000000e +#define PIN_CFG_EPIO6 0x0000000f +#define PIN_CFG_EPIO7 0x00000010 +#define PIN_CFG_EPIO8 0x00000011 +#define PIN_CFG_EPIO9 0x00000012 +#define PIN_CFG_EPIO10 0x00000013 +#define PIN_CFG_EPIO11 0x00000014 +#define PIN_CFG_EPIO12 0x00000015 +#define PIN_CFG_EPIO13 0x00000016 +#define PIN_CFG_EPIO14 0x00000017 +#define PIN_CFG_EPIO15 0x00000018 +#define PIN_CFG_EPIO16 0x00000019 +#define PIN_CFG_EPIO17 0x0000001a +#define PIN_CFG_EPIO18 0x0000001b +#define PIN_CFG_EPIO19 0x0000001c +#define PIN_CFG_EPIO20 0x0000001d +#define PIN_CFG_EPIO21 0x0000001e +#define PIN_CFG_EPIO22 0x0000001f +#define PIN_CFG_EPIO23 0x00000020 +#define PIN_CFG_EPIO24 0x00000021 +#define PIN_CFG_EPIO25 0x00000022 +#define PIN_CFG_EPIO26 0x00000023 +#define PIN_CFG_EPIO27 0x00000024 +#define PIN_CFG_EPIO28 0x00000025 +#define PIN_CFG_EPIO29 0x00000026 +#define PIN_CFG_EPIO30 0x00000027 +#define PIN_CFG_EPIO31 0x00000028 + +/* EPIO definition */ +#define EPIO_CFG_NA 0x00000000 +#define EPIO_CFG_EPIO0 0x00000001 +#define EPIO_CFG_EPIO1 0x00000002 +#define EPIO_CFG_EPIO2 0x00000003 +#define EPIO_CFG_EPIO3 0x00000004 +#define EPIO_CFG_EPIO4 0x00000005 +#define EPIO_CFG_EPIO5 0x00000006 +#define EPIO_CFG_EPIO6 0x00000007 +#define EPIO_CFG_EPIO7 0x00000008 +#define EPIO_CFG_EPIO8 0x00000009 +#define EPIO_CFG_EPIO9 0x0000000a +#define EPIO_CFG_EPIO10 0x0000000b +#define EPIO_CFG_EPIO11 0x0000000c +#define EPIO_CFG_EPIO12 0x0000000d +#define EPIO_CFG_EPIO13 0x0000000e +#define EPIO_CFG_EPIO14 0x0000000f +#define EPIO_CFG_EPIO15 0x00000010 +#define EPIO_CFG_EPIO16 0x00000011 +#define EPIO_CFG_EPIO17 0x00000012 +#define EPIO_CFG_EPIO18 0x00000013 +#define EPIO_CFG_EPIO19 0x00000014 +#define EPIO_CFG_EPIO20 0x00000015 +#define EPIO_CFG_EPIO21 0x00000016 +#define EPIO_CFG_EPIO22 0x00000017 +#define EPIO_CFG_EPIO23 0x00000018 +#define EPIO_CFG_EPIO24 0x00000019 +#define EPIO_CFG_EPIO25 0x0000001a +#define EPIO_CFG_EPIO26 0x0000001b +#define EPIO_CFG_EPIO27 0x0000001c +#define EPIO_CFG_EPIO28 0x0000001d +#define EPIO_CFG_EPIO29 0x0000001e +#define EPIO_CFG_EPIO30 0x0000001f +#define EPIO_CFG_EPIO31 0x00000020 + + +struct shared_hw_cfg { /* NVRAM Offset */ /* Up to 16 bytes of NULL-terminated string */ - u8 part_num[16]; /* 0x104 */ + u8 part_num[16]; /* 0x104 */ + + u32 config; /* 0x114 */ + #define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 + #define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 + #define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 + #define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 + #define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 - u32 config; /* 0x114 */ -#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK 0x00000001 -#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT 0 -#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V 0x00000000 -#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V 0x00000001 -#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN 0x00000002 + #define SHARED_HW_CFG_PORT_SWAP 0x00000004 -#define SHARED_HW_CFG_PORT_SWAP 0x00000004 + #define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 -#define SHARED_HW_CFG_BEACON_WOL_EN 0x00000008 + #define SHARED_HW_CFG_PCIE_GEN3_DISABLED 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN3_ENABLED 0x00000010 -#define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 -#define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 + #define SHARED_HW_CFG_MFW_SELECT_MASK 0x00000700 + #define SHARED_HW_CFG_MFW_SELECT_SHIFT 8 /* Whatever MFW found in NVM (if multiple found, priority order is: NC-SI, UMP, IPMI) */ -#define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 -#define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 -#define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 -#define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 + #define SHARED_HW_CFG_MFW_SELECT_DEFAULT 0x00000000 + #define SHARED_HW_CFG_MFW_SELECT_NC_SI 0x00000100 + #define SHARED_HW_CFG_MFW_SELECT_UMP 0x00000200 + #define SHARED_HW_CFG_MFW_SELECT_IPMI 0x00000300 /* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ -#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI 0x00000400 /* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ -#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI 0x00000500 /* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP (can only be used when an add-in board, not BMC, pulls-down SPIO4) */ -#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 - -#define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 -#define SHARED_HW_CFG_LED_MODE_SHIFT 16 -#define SHARED_HW_CFG_LED_MAC1 0x00000000 -#define SHARED_HW_CFG_LED_PHY1 0x00010000 -#define SHARED_HW_CFG_LED_PHY2 0x00020000 -#define SHARED_HW_CFG_LED_PHY3 0x00030000 -#define SHARED_HW_CFG_LED_MAC2 0x00040000 -#define SHARED_HW_CFG_LED_PHY4 0x00050000 -#define SHARED_HW_CFG_LED_PHY5 0x00060000 -#define SHARED_HW_CFG_LED_PHY6 0x00070000 -#define SHARED_HW_CFG_LED_MAC3 0x00080000 -#define SHARED_HW_CFG_LED_PHY7 0x00090000 -#define SHARED_HW_CFG_LED_PHY9 0x000a0000 -#define SHARED_HW_CFG_LED_PHY11 0x000b0000 -#define SHARED_HW_CFG_LED_MAC4 0x000c0000 -#define SHARED_HW_CFG_LED_PHY8 0x000d0000 -#define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 - - -#define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 -#define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 -#define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 -#define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 -#define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 -#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 -#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 -#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 - - u32 config2; /* 0x118 */ + #define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP 0x00000600 + + #define SHARED_HW_CFG_LED_MODE_MASK 0x000f0000 + #define SHARED_HW_CFG_LED_MODE_SHIFT 16 + #define SHARED_HW_CFG_LED_MAC1 0x00000000 + #define SHARED_HW_CFG_LED_PHY1 0x00010000 + #define SHARED_HW_CFG_LED_PHY2 0x00020000 + #define SHARED_HW_CFG_LED_PHY3 0x00030000 + #define SHARED_HW_CFG_LED_MAC2 0x00040000 + #define SHARED_HW_CFG_LED_PHY4 0x00050000 + #define SHARED_HW_CFG_LED_PHY5 0x00060000 + #define SHARED_HW_CFG_LED_PHY6 0x00070000 + #define SHARED_HW_CFG_LED_MAC3 0x00080000 + #define SHARED_HW_CFG_LED_PHY7 0x00090000 + #define SHARED_HW_CFG_LED_PHY9 0x000a0000 + #define SHARED_HW_CFG_LED_PHY11 0x000b0000 + #define SHARED_HW_CFG_LED_MAC4 0x000c0000 + #define SHARED_HW_CFG_LED_PHY8 0x000d0000 + #define SHARED_HW_CFG_LED_EXTPHY1 0x000e0000 + + + #define SHARED_HW_CFG_AN_ENABLE_MASK 0x3f000000 + #define SHARED_HW_CFG_AN_ENABLE_SHIFT 24 + #define SHARED_HW_CFG_AN_ENABLE_CL37 0x01000000 + #define SHARED_HW_CFG_AN_ENABLE_CL73 0x02000000 + #define SHARED_HW_CFG_AN_ENABLE_BAM 0x04000000 + #define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 0x08000000 + #define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 0x10000000 + #define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 0x20000000 + + #define SHARED_HW_CFG_SRIOV_MASK 0x40000000 + #define SHARED_HW_CFG_SRIOV_DISABLED 0x00000000 + #define SHARED_HW_CFG_SRIOV_ENABLED 0x40000000 + + #define SHARED_HW_CFG_ATC_MASK 0x80000000 + #define SHARED_HW_CFG_ATC_DISABLED 0x00000000 + #define SHARED_HW_CFG_ATC_ENABLED 0x80000000 + + u32 config2; /* 0x118 */ /* one time auto detect grace period (in sec) */ -#define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff -#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 + #define SHARED_HW_CFG_GRACE_PERIOD_MASK 0x000000ff + #define SHARED_HW_CFG_GRACE_PERIOD_SHIFT 0 -#define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_ENABLED 0x00000100 + #define SHARED_HW_CFG_PCIE_GEN2_DISABLED 0x00000000 /* The default value for the core clock is 250MHz and it is achieved by setting the clock change to 4 */ -#define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 -#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 + #define SHARED_HW_CFG_CLOCK_CHANGE_MASK 0x00000e00 + #define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT 9 -#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 -#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_MASK 0x00001000 + #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 + #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 -#define SHARED_HW_CFG_HIDE_PORT1 0x00002000 + #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 - /* The fan failure mechanism is usually related to the PHY type - since the power consumption of the board is determined by the PHY. - Currently, fan is required for most designs with SFX7101, BCM8727 - and BCM8481. If a fan is not required for a board which uses one - of those PHYs, this field should be set to "Disabled". If a fan is - required for a different PHY type, this option should be set to - "Enabled". - The fan failure indication is expected on - SPIO5 */ -#define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 -#define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 -#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 -#define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 -#define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 - - /* Set the MDC/MDIO access for the first external phy */ -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 - - /* Set the MDC/MDIO access for the second external phy */ -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 -#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 - u32 power_dissipated; /* 0x11c */ -#define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 -#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 - -#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 -#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 -#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 -#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 -#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 -#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 - - u32 ump_nc_si_config; /* 0x120 */ -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 -#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 - -#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 -#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 - -#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 -#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 -#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 -#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 - - u32 board; /* 0x124 */ -#define SHARED_HW_CFG_BOARD_REV_MASK 0x00FF0000 -#define SHARED_HW_CFG_BOARD_REV_SHIFT 16 - -#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0F000000 -#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 - -#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xF0000000 -#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 - - u32 reserved; /* 0x128 */ + #define SHARED_HW_CFG_WOL_CAPABLE_MASK 0x00004000 + #define SHARED_HW_CFG_WOL_CAPABLE_DISABLED 0x00000000 + #define SHARED_HW_CFG_WOL_CAPABLE_ENABLED 0x00004000 + /* Output low when PERST is asserted */ + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK 0x00008000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED 0x00000000 + #define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED 0x00008000 + + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK 0x00070000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT 16 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW 0x00000000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB 0x00010000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB 0x00020000 + #define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB 0x00030000 + + /* The fan failure mechanism is usually related to the PHY type + since the power consumption of the board is determined by the PHY. + Currently, fan is required for most designs with SFX7101, BCM8727 + and BCM8481. If a fan is not required for a board which uses one + of those PHYs, this field should be set to "Disabled". If a fan is + required for a different PHY type, this option should be set to + "Enabled". The fan failure indication is expected on SPIO5 */ + #define SHARED_HW_CFG_FAN_FAILURE_MASK 0x00180000 + #define SHARED_HW_CFG_FAN_FAILURE_SHIFT 19 + #define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_FAN_FAILURE_DISABLED 0x00080000 + #define SHARED_HW_CFG_FAN_FAILURE_ENABLED 0x00100000 + + /* ASPM Power Management support */ + #define SHARED_HW_CFG_ASPM_SUPPORT_MASK 0x00600000 + #define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT 21 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED 0x00000000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED 0x00200000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED 0x00400000 + #define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED 0x00600000 + + /* The value of PM_TL_IGNORE_REQS (bit0) in PCI register + tl_control_0 (register 0x2800) */ + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK 0x00800000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED 0x00000000 + #define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED 0x00800000 + + #define SHARED_HW_CFG_PORT_MODE_MASK 0x01000000 + #define SHARED_HW_CFG_PORT_MODE_2 0x00000000 + #define SHARED_HW_CFG_PORT_MODE_4 0x01000000 + + #define SHARED_HW_CFG_PATH_SWAP_MASK 0x02000000 + #define SHARED_HW_CFG_PATH_SWAP_DISABLED 0x00000000 + #define SHARED_HW_CFG_PATH_SWAP_ENABLED 0x02000000 + + /* Set the MDC/MDIO access for the first external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK 0x1C000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT 26 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0 0x04000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1 0x08000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH 0x0c000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED 0x10000000 + + /* Set the MDC/MDIO access for the second external phy */ + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK 0xE0000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT 29 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE 0x00000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0 0x20000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1 0x40000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 + #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 + + + u32 power_dissipated; /* 0x11c */ + #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 + #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 + #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 + #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 + + #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 + #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 + + u32 ump_nc_si_config; /* 0x120 */ + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT 0 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY 0x00000001 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII 0x00000002 + + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK 0x00000f00 + #define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT 8 + + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK 0x00ff0000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT 16 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE 0x00000000 + #define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000 + + u32 board; /* 0x124 */ + #define SHARED_HW_CFG_E3_I2C_MUX0_MASK 0x0000003F + #define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT 0 + #define SHARED_HW_CFG_E3_I2C_MUX1_MASK 0x00000FC0 + #define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT 6 + /* Use the PIN_CFG_XXX defines on top */ + #define SHARED_HW_CFG_BOARD_REV_MASK 0x00ff0000 + #define SHARED_HW_CFG_BOARD_REV_SHIFT 16 + + #define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK 0x0f000000 + #define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT 24 + + #define SHARED_HW_CFG_BOARD_MINOR_VER_MASK 0xf0000000 + #define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT 28 + + u32 wc_lane_config; /* 0x128 */ + #define SHARED_HW_CFG_LANE_SWAP_CFG_MASK 0x0000FFFF + #define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_32103210 0x00001b1b + #define SHARED_HW_CFG_LANE_SWAP_CFG_32100123 0x00001be4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_01233210 0x0000e41b + #define SHARED_HW_CFG_LANE_SWAP_CFG_01230123 0x0000e4e4 + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000FF + #define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000FF00 + #define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED 0x00010000 + #define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED 0x00020000 + #define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED 0x00040000 + #define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED 0x00080000 + /* TX lane Polarity swap */ + #define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED 0x00100000 + #define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED 0x00200000 + #define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED 0x00400000 + #define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED 0x00800000 + + /* Selects the port layout of the board */ + #define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK 0x0F000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT 24 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01 0x00000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10 0x01000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123 0x02000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032 0x03000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301 0x04000000 + #define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210 0x05000000 }; /**************************************************************************** - * Port HW configuration * + * Port HW configuration * ****************************************************************************/ -struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ +struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u32 pci_id; -#define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 -#define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff + #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff u32 pci_sub_id; -#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 -#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff + #define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK 0xffff0000 + #define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK 0x0000ffff u32 power_dissipated; -#define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 -#define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 -#define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 -#define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 -#define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 -#define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 -#define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff -#define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_DIS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_DIS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_DIS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_DIS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_DIS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_DIS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_DIS_D3_SHIFT 24 u32 power_consumed; -#define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 -#define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 -#define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 -#define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 -#define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 -#define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 -#define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff -#define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D0_MASK 0x000000ff + #define PORT_HW_CFG_POWER_CONS_D0_SHIFT 0 + #define PORT_HW_CFG_POWER_CONS_D1_MASK 0x0000ff00 + #define PORT_HW_CFG_POWER_CONS_D1_SHIFT 8 + #define PORT_HW_CFG_POWER_CONS_D2_MASK 0x00ff0000 + #define PORT_HW_CFG_POWER_CONS_D2_SHIFT 16 + #define PORT_HW_CFG_POWER_CONS_D3_MASK 0xff000000 + #define PORT_HW_CFG_POWER_CONS_D3_SHIFT 24 u32 mac_upper; -#define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff -#define PORT_HW_CFG_UPPERMAC_SHIFT 0 + #define PORT_HW_CFG_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_UPPERMAC_SHIFT 0 u32 mac_lower; u32 iscsi_mac_upper; /* Upper 16 bits are always zeroes */ @@ -237,642 +402,807 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u32 rdma_mac_lower; u32 serdes_config; -#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000FFFF -#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 - -#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xFFFF0000 -#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 - - - u32 Reserved0[3]; /* 0x158 */ - /* Controls the TX laser of the SFP+ module */ - u32 sfp_ctrl; /* 0x164 */ -#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF -#define PORT_HW_CFG_TX_LASER_SHIFT 0 -#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 -#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 -#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 -#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 -#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 - - /* Controls the fault module LED of the SFP+ */ -#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 -#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 -#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 -#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 - u32 Reserved01[12]; /* 0x158 */ - /* for external PHY, or forced mode or during AN */ - u16 xgxs_config_rx[4]; /* 0x198 */ - - u16 xgxs_config_tx[4]; /* 0x1A0 */ - - u32 Reserved1[56]; /* 0x1A8 */ - u32 default_cfg; /* 0x288 */ -#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 -#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 -#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 -#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 -#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 - -#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C -#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 -#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 -#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 -#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c - -#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 -#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 -#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 -#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 -#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 - -#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 -#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 -#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 -#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 -#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 -#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff + #define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT 0 + + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK 0xffff0000 + #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 + + + /* Default values: 2P-64, 4P-32 */ + u32 pf_config; /* 0x158 */ + #define PORT_HW_CFG_PF_NUM_VF_MASK 0x0000007F + #define PORT_HW_CFG_PF_NUM_VF_SHIFT 0 + + /* Default values: 17 */ + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK 0x00007F00 + #define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT 8 + + #define PORT_HW_CFG_ENABLE_FLR_MASK 0x00010000 + #define PORT_HW_CFG_FLR_ENABLED 0x00010000 + + u32 vf_config; /* 0x15C */ + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK 0x0000007F + #define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT 0 + + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK 0xFFFF0000 + #define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT 16 + + u32 mf_pci_id; /* 0x160 */ + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK 0x0000FFFF + #define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT 0 + + /* Controls the TX laser of the SFP+ module */ + u32 sfp_ctrl; /* 0x164 */ + #define PORT_HW_CFG_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_TX_LASER_SHIFT 0 + #define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 + #define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 + #define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 + #define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 + #define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ + #define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 + #define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 + #define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + + /* The output pin TX_DIS that controls the TX laser of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + u32 e3_sfp_ctrl; /* 0x168 */ + #define PORT_HW_CFG_E3_TX_LASER_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_LASER_SHIFT 0 + + /* The output pin for SFPP_TYPE which turns on the Fault module LED */ + #define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT 8 + + /* The input pin MOD_ABS that indicates whether SFP+ module is + present or not. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_MOD_ABS_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_MOD_ABS_SHIFT 16 + + /* The output pin PWRDIS_SFP_X which disable the power of the SFP+ + module. Use the PIN_CFG_XXX defines on top */ + #define PORT_HW_CFG_E3_PWR_DIS_MASK 0xFF000000 + #define PORT_HW_CFG_E3_PWR_DIS_SHIFT 24 /* - * When KR link is required to be set to force which is not - * KR-compliant, this parameter determine what is the trigger for it. - * When GPIO is selected, low input will force the speed. Currently - * default speed is 1G. In the future, it may be widen to select the - * forced speed in with another parameter. Note when force-1G is - * enabled, it override option 56: Link Speed option. + * The input pin which signals module transmit fault. Use the + * PIN_CFG_XXX defines on top */ -#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 -#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 -#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 -#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 -#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 - /* Enable to determine with which GPIO to reset the external phy */ -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 -#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + u32 e3_cmn_pin_cfg; /* 0x16C */ + #define PORT_HW_CFG_E3_TX_FAULT_MASK 0x000000FF + #define PORT_HW_CFG_E3_TX_FAULT_SHIFT 0 + + /* The output pin which reset the PHY. Use the PIN_CFG_XXX defines on + top */ + #define PORT_HW_CFG_E3_PHY_RESET_MASK 0x0000FF00 + #define PORT_HW_CFG_E3_PHY_RESET_SHIFT 8 + + /* + * The output pin which powers down the PHY. Use the PIN_CFG_XXX + * defines on top + */ + #define PORT_HW_CFG_E3_PWR_DOWN_MASK 0x00FF0000 + #define PORT_HW_CFG_E3_PWR_DOWN_SHIFT 16 + + /* The output pin values BSC_SEL which selects the I2C for this port + in the I2C Mux */ + #define PORT_HW_CFG_E3_I2C_MUX0_MASK 0x01000000 + #define PORT_HW_CFG_E3_I2C_MUX1_MASK 0x02000000 + + + /* + * The input pin I_FAULT which indicate over-current has occurred. + * Use the PIN_CFG_XXX defines on top + */ + u32 e3_cmn_pin_cfg1; /* 0x170 */ + #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF + #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 + u32 reserved0[7]; /* 0x174 */ + + u32 aeu_int_mask; /* 0x190 */ + + u32 media_type; /* 0x194 */ + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK 0x000000FF + #define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT 0 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK 0x0000FF00 + #define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT 8 + + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK 0x00FF0000 + #define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT 16 + + /* 4 times 16 bits for all 4 lanes. In case external PHY is present + (not direct mode), those values will not take effect on the 4 XGXS + lanes. For some external PHYs (such as 8706 and 8726) the values + will be used to configure the external PHY in those cases, not + all 4 values are needed. */ + u16 xgxs_config_rx[4]; /* 0x198 */ + u16 xgxs_config_tx[4]; /* 0x1A0 */ + + /* For storing FCOE mac on shared memory */ + u32 fcoe_fip_mac_upper; + #define PORT_HW_CFG_FCOE_UPPERMAC_MASK 0x0000ffff + #define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT 0 + u32 fcoe_fip_mac_lower; + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 Reserved1[49]; /* 0x1C0 */ + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default), + 84833 only */ + u32 xgbt_phy_cfg; /* 0x284 */ + #define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK 0x000000FF + #define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT 0 + + u32 default_cfg; /* 0x288 */ + #define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 + #define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 + #define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 + #define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 + #define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + + #define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C + #define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 + #define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 + #define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 + #define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + + #define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 + #define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 + #define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 + #define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 + #define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + + #define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 + #define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 + #define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 + #define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 + #define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 + #define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* When KR link is required to be set to force which is not + KR-compliant, this parameter determine what is the trigger for it. + When GPIO is selected, low input will force the speed. Currently + default speed is 1G. In the future, it may be widen to select the + forced speed in with another parameter. Note when force-1G is + enabled, it override option 56: Link Speed option. */ + #define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 + #define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 + #define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 + #define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 + #define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 + #define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 + /* Enable BAM on KR */ -#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 -#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 -#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 -#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 /* Enable Common Mode Sense */ -#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 -#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 -#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 -#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 + #define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 + #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 + #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + + /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */ + #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000 + #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22 + #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000 + #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000 + + /* Determine the Serdes electrical interface */ + #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 + #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 + #define PORT_HW_CFG_NET_SERDES_IF_SGMII 0x00000000 + #define PORT_HW_CFG_NET_SERDES_IF_XFI 0x01000000 + #define PORT_HW_CFG_NET_SERDES_IF_SFI 0x02000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR 0x03000000 + #define PORT_HW_CFG_NET_SERDES_IF_DXGXS 0x04000000 + #define PORT_HW_CFG_NET_SERDES_IF_KR2 0x05000000 + u32 speed_capability_mask2; /* 0x28C */ -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12G 0x00000080 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_12_DOT_5G 0x00000100 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_13G 0x00000200 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_15G 0x00000400 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_16G 0x00000800 - -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12G 0x00800000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_12_DOT_5G 0x01000000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_13G 0x02000000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_15G 0x04000000 -#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_16G 0x08000000 - - /* In the case where two media types (e.g. copper and fiber) are - present and electrically active at the same time, PHY Selection - will determine which of the two PHYs will be designated as the - Active PHY and used for a connection to the network. */ - u32 multi_phy_config; /* 0x290 */ -#define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 -#define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 -#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 -#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 -#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 -#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 -#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 - - /* When enabled, all second phy nvram parameters will be swapped - with the first phy parameters */ -#define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 -#define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 -#define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 -#define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 - - - /* Address of the second external phy */ - u32 external_phy_config2; /* 0x294 */ -#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF -#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 - - /* The second XGXS external PHY type */ -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 -#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 - - /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as - 8706, 8726 and 8727) not all 4 values are needed. */ - u16 xgxs_config2_rx[4]; /* 0x296 */ - u16 xgxs_config2_tx[4]; /* 0x2A0 */ + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3__ 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3___ 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G 0x00000080 + + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK 0xFFFF0000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0__ 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0___ 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G 0x00800000 + + + /* In the case where two media types (e.g. copper and fiber) are + present and electrically active at the same time, PHY Selection + will determine which of the two PHYs will be designated as the + Active PHY and used for a connection to the network. */ + u32 multi_phy_config; /* 0x290 */ + #define PORT_HW_CFG_PHY_SELECTION_MASK 0x00000007 + #define PORT_HW_CFG_PHY_SELECTION_SHIFT 0 + #define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT 0x00000000 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY 0x00000001 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY 0x00000002 + #define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003 + #define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004 + + /* When enabled, all second phy nvram parameters will be swapped + with the first phy parameters */ + #define PORT_HW_CFG_PHY_SWAPPED_MASK 0x00000008 + #define PORT_HW_CFG_PHY_SWAPPED_SHIFT 3 + #define PORT_HW_CFG_PHY_SWAPPED_DISABLED 0x00000000 + #define PORT_HW_CFG_PHY_SWAPPED_ENABLED 0x00000008 + + + /* Address of the second external phy */ + u32 external_phy_config2; /* 0x294 */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK 0x000000FF + #define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT 0 + + /* The second XGXS external PHY type */ + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK 0x0000FF00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 + + + /* 4 times 16 bits for all 4 lanes. For some external PHYs (such as + 8706, 8726 and 8727) not all 4 values are needed. */ + u16 xgxs_config2_rx[4]; /* 0x296 */ + u16 xgxs_config2_tx[4]; /* 0x2A0 */ u32 lane_config; -#define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff -#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 - -#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff -#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 -#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 -#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 -#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 -#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 - /* AN and forced */ -#define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b - /* forced only */ -#define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 - /* forced only */ -#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 - /* forced only */ -#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 - /* Indicate whether to swap the external phy polarity */ -#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 -#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 -#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASK 0x0000ffff + #define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT 0 + /* AN and forced */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01230123 0x00001b1b + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_01233210 0x00001be4 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 + /* forced only */ + #define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK 0x000000ff + #define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT 0 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK 0x0000ff00 + #define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT 8 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK 0x0000c000 + #define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT 14 + + /* Indicate whether to swap the external phy polarity */ + #define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000 + #define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000 + u32 external_phy_config; -#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 -#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 -#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 -#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 -#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 - -#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 -#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 - -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 -#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 - -#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff -#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff + #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT 0 + + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK 0x0000ff00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT 8 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071 0x00000100 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072 0x00000200 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073 0x00000300 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705 0x00000400 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726 0x00000600 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640 0x00000c00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 + #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 + + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK 0x00ff0000 + #define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT 16 + + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT 24 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT 0x00000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482 0x01000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD 0x02000000 + #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN 0xff000000 u32 speed_capability_mask; -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12G 0x00800000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_12_5G 0x01000000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_13G 0x02000000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_15G 0x04000000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_16G 0x08000000 -#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 - -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12G 0x00000080 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_12_5G 0x00000100 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_13G 0x00000200 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_15G 0x00000400 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_16G 0x00000800 -#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 - - u32 reserved[2]; + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK 0x0000ffff + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT 0 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL 0x00000001 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF 0x00000002 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF 0x00000004 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL 0x00000008 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G 0x00000010 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G 0x00000020 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G 0x00000040 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G 0x00000080 + #define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED 0x0000f000 + + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK 0xffff0000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT 16 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL 0x00010000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF 0x00020000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF 0x00040000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL 0x00080000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G 0x00100000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G 0x00200000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G 0x00400000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G 0x00800000 + #define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED 0xf0000000 + + /* A place to hold the original MAC address as a backup */ + u32 backup_mac_upper; /* 0x2B4 */ + u32 backup_mac_lower; /* 0x2B8 */ }; /**************************************************************************** - * Shared Feature configuration * + * Shared Feature configuration * ****************************************************************************/ -struct shared_feat_cfg { /* NVRAM Offset */ +struct shared_feat_cfg { /* NVRAM Offset */ + + u32 config; /* 0x450 */ + #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + + /* Use NVRAM values instead of HW default values */ + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \ + 0x00000002 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \ + 0x00000000 + #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \ + 0x00000002 - u32 config; /* 0x450 */ -#define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK 0x00000008 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO 0x00000000 + #define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM 0x00000008 - /* Use the values from options 47 and 48 instead of the HW default - values */ -#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 -#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 + #define SHARED_FEAT_CFG_NCSI_ID_MASK 0x00000030 + #define SHARED_FEAT_CFG_NCSI_ID_SHIFT 4 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 -#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + /* Override the OTP back to single function mode. When using GPIO, + high means only SF, 0 is according to CLP configuration */ + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 + + /* The interval in seconds between sending LLDP packets. Set to zero + to disable the feature */ + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK 0x00ff0000 + #define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT 16 + + /* The assigned device type ID for LLDP usage */ + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK 0xff000000 + #define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT 24 }; /**************************************************************************** - * Port Feature configuration * + * Port Feature configuration * ****************************************************************************/ -struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ +struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ u32 config; -#define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f -#define PORT_FEATURE_BAR1_SIZE_SHIFT 0 -#define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 -#define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 -#define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 -#define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 -#define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 -#define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 -#define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 -#define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 -#define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 -#define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 -#define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a -#define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b -#define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c -#define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d -#define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e -#define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f -#define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 -#define PORT_FEATURE_BAR2_SIZE_SHIFT 4 -#define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 -#define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 -#define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 -#define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 -#define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 -#define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 -#define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 -#define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 -#define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 -#define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 -#define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 -#define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 -#define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 -#define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 -#define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 -#define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 -#define PORT_FEATURE_EN_SIZE_MASK 0x07000000 -#define PORT_FEATURE_EN_SIZE_SHIFT 24 -#define PORT_FEATURE_WOL_ENABLED 0x01000000 -#define PORT_FEATURE_MBA_ENABLED 0x02000000 -#define PORT_FEATURE_MFW_ENABLED 0x04000000 - - /* Reserved bits: 28-29 */ - /* Check the optic vendor via i2c against a list of approved modules - in a separate nvram image */ -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xE0000000 -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT 0x00000000 -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER 0x20000000 -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 -#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 - + #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f + #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 + #define PORT_FEATURE_BAR1_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR1_SIZE_64K 0x00000001 + #define PORT_FEATURE_BAR1_SIZE_128K 0x00000002 + #define PORT_FEATURE_BAR1_SIZE_256K 0x00000003 + #define PORT_FEATURE_BAR1_SIZE_512K 0x00000004 + #define PORT_FEATURE_BAR1_SIZE_1M 0x00000005 + #define PORT_FEATURE_BAR1_SIZE_2M 0x00000006 + #define PORT_FEATURE_BAR1_SIZE_4M 0x00000007 + #define PORT_FEATURE_BAR1_SIZE_8M 0x00000008 + #define PORT_FEATURE_BAR1_SIZE_16M 0x00000009 + #define PORT_FEATURE_BAR1_SIZE_32M 0x0000000a + #define PORT_FEATURE_BAR1_SIZE_64M 0x0000000b + #define PORT_FEATURE_BAR1_SIZE_128M 0x0000000c + #define PORT_FEATURE_BAR1_SIZE_256M 0x0000000d + #define PORT_FEATURE_BAR1_SIZE_512M 0x0000000e + #define PORT_FEATURE_BAR1_SIZE_1G 0x0000000f + #define PORT_FEATURE_BAR2_SIZE_MASK 0x000000f0 + #define PORT_FEATURE_BAR2_SIZE_SHIFT 4 + #define PORT_FEATURE_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_BAR2_SIZE_64K 0x00000010 + #define PORT_FEATURE_BAR2_SIZE_128K 0x00000020 + #define PORT_FEATURE_BAR2_SIZE_256K 0x00000030 + #define PORT_FEATURE_BAR2_SIZE_512K 0x00000040 + #define PORT_FEATURE_BAR2_SIZE_1M 0x00000050 + #define PORT_FEATURE_BAR2_SIZE_2M 0x00000060 + #define PORT_FEATURE_BAR2_SIZE_4M 0x00000070 + #define PORT_FEATURE_BAR2_SIZE_8M 0x00000080 + #define PORT_FEATURE_BAR2_SIZE_16M 0x00000090 + #define PORT_FEATURE_BAR2_SIZE_32M 0x000000a0 + #define PORT_FEATURE_BAR2_SIZE_64M 0x000000b0 + #define PORT_FEATURE_BAR2_SIZE_128M 0x000000c0 + #define PORT_FEATURE_BAR2_SIZE_256M 0x000000d0 + #define PORT_FEATURE_BAR2_SIZE_512M 0x000000e0 + #define PORT_FEATURE_BAR2_SIZE_1G 0x000000f0 + + #define PORT_FEAT_CFG_DCBX_MASK 0x00000100 + #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 + #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 + + #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200 + #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9 + #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000 + #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200 + + #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 + #define PORT_FEATURE_EN_SIZE_SHIFT 24 + #define PORT_FEATURE_WOL_ENABLED 0x01000000 + #define PORT_FEATURE_MBA_ENABLED 0x02000000 + #define PORT_FEATURE_MFW_ENABLED 0x04000000 + + /* Advertise expansion ROM even if MBA is disabled */ + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK 0x08000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED 0x00000000 + #define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED 0x08000000 + + /* Check the optic vendor via i2c against a list of approved modules + in a separate nvram image */ + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK 0xe0000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT 29 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \ + 0x00000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \ + 0x20000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG 0x40000000 + #define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN 0x60000000 u32 wol_config; /* Default is used when driver sets to "auto" mode */ -#define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 -#define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 -#define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 -#define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 -#define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 -#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 -#define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 -#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 -#define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 + #define PORT_FEATURE_WOL_DEFAULT_MASK 0x00000003 + #define PORT_FEATURE_WOL_DEFAULT_SHIFT 0 + #define PORT_FEATURE_WOL_DEFAULT_DISABLE 0x00000000 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC 0x00000001 + #define PORT_FEATURE_WOL_DEFAULT_ACPI 0x00000002 + #define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI 0x00000003 + #define PORT_FEATURE_WOL_RES_PAUSE_CAP 0x00000004 + #define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP 0x00000008 + #define PORT_FEATURE_WOL_ACPI_UPON_MGMT 0x00000010 u32 mba_config; -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000003 -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 -#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 -#define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 -#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 -#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 -#define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 -#define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 -#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 -#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 -#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 -#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 -#define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 -#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 -#define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 -#define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 -#define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 -#define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 -#define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 -#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 -#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 -#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 -#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KX4 0x20000000 -#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_KR 0x24000000 -#define PORT_FEATURE_MBA_LINK_SPEED_12GBPS 0x28000000 -#define PORT_FEATURE_MBA_LINK_SPEED_12_5GBPS 0x2c000000 -#define PORT_FEATURE_MBA_LINK_SPEED_13GBPS 0x30000000 -#define PORT_FEATURE_MBA_LINK_SPEED_15GBPS 0x34000000 -#define PORT_FEATURE_MBA_LINK_SPEED_16GBPS 0x38000000 - + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK 0x00000007 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT 0 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE 0x00000000 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL 0x00000001 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP 0x00000002 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB 0x00000003 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT 0x00000004 + #define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE 0x00000007 + + #define PORT_FEATURE_MBA_BOOT_RETRY_MASK 0x00000038 + #define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT 3 + + #define PORT_FEATURE_MBA_RES_PAUSE_CAP 0x00000100 + #define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP 0x00000200 + #define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE 0x00000400 + #define PORT_FEATURE_MBA_HOTKEY_MASK 0x00000800 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_S 0x00000000 + #define PORT_FEATURE_MBA_HOTKEY_CTRL_B 0x00000800 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK 0x000ff000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT 12 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED 0x00000000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K 0x00001000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K 0x00002000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K 0x00003000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K 0x00004000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K 0x00005000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K 0x00006000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K 0x00007000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K 0x00008000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K 0x00009000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M 0x0000a000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M 0x0000b000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M 0x0000c000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M 0x0000d000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M 0x0000e000 + #define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M 0x0000f000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK 0x00f00000 + #define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT 20 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK 0x03000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT 24 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO 0x00000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS 0x01000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H 0x02000000 + #define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H 0x03000000 + #define PORT_FEATURE_MBA_LINK_SPEED_MASK 0x3c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_SHIFT 26 + #define PORT_FEATURE_MBA_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10HD 0x04000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10FD 0x08000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100HD 0x0c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_100FD 0x10000000 + #define PORT_FEATURE_MBA_LINK_SPEED_1GBPS 0x14000000 + #define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS 0x18000000 + #define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4 0x1c000000 + #define PORT_FEATURE_MBA_LINK_SPEED_20GBPS 0x20000000 u32 bmc_config; -#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 -#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK 0x00000001 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT 0x00000000 + #define PORT_FEATURE_BMC_LINK_OVERRIDE_EN 0x00000001 u32 mba_vlan_cfg; -#define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff -#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 -#define PORT_FEATURE_MBA_VLAN_EN 0x00010000 + #define PORT_FEATURE_MBA_VLAN_TAG_MASK 0x0000ffff + #define PORT_FEATURE_MBA_VLAN_TAG_SHIFT 0 + #define PORT_FEATURE_MBA_VLAN_EN 0x00010000 u32 resource_cfg; -#define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 -#define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 -#define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 -#define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 -#define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 + #define PORT_FEATURE_RESOURCE_CFG_VALID 0x00000001 + #define PORT_FEATURE_RESOURCE_CFG_DIAG 0x00000002 + #define PORT_FEATURE_RESOURCE_CFG_L2 0x00000004 + #define PORT_FEATURE_RESOURCE_CFG_ISCSI 0x00000008 + #define PORT_FEATURE_RESOURCE_CFG_RDMA 0x00000010 u32 smbus_config; - /* Obsolete */ -#define PORT_FEATURE_SMBUS_EN 0x00000001 -#define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe -#define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 - - u32 reserved1; + #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe + #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 + + u32 vf_config; + #define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK 0x0000000f + #define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT 0 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED 0x00000000 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4K 0x00000001 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8K 0x00000002 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16K 0x00000003 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32K 0x00000004 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64K 0x00000005 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_128K 0x00000006 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_256K 0x00000007 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_512K 0x00000008 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_1M 0x00000009 + #define PORT_FEAT_CFG_VF_BAR2_SIZE_2M 0x0000000a + #define PORT_FEAT_CFG_VF_BAR2_SIZE_4M 0x0000000b + #define PORT_FEAT_CFG_VF_BAR2_SIZE_8M 0x0000000c + #define PORT_FEAT_CFG_VF_BAR2_SIZE_16M 0x0000000d + #define PORT_FEAT_CFG_VF_BAR2_SIZE_32M 0x0000000e + #define PORT_FEAT_CFG_VF_BAR2_SIZE_64M 0x0000000f u32 link_config; /* Used as HW defaults for the driver */ -#define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 -#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 - /* (forced) low speed switch (< 10G) */ -#define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 - /* (forced) high speed switch (>= 10G) */ -#define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 -#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 -#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 - -#define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 -#define PORT_FEATURE_LINK_SPEED_SHIFT 16 -#define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 -#define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 -#define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 -#define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 -#define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 -#define PORT_FEATURE_LINK_SPEED_1G 0x00050000 -#define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 -#define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 -#define PORT_FEATURE_LINK_SPEED_10G_KX4 0x00080000 -#define PORT_FEATURE_LINK_SPEED_10G_KR 0x00090000 -#define PORT_FEATURE_LINK_SPEED_12G 0x000a0000 -#define PORT_FEATURE_LINK_SPEED_12_5G 0x000b0000 -#define PORT_FEATURE_LINK_SPEED_13G 0x000c0000 -#define PORT_FEATURE_LINK_SPEED_15G 0x000d0000 -#define PORT_FEATURE_LINK_SPEED_16G 0x000e0000 - -#define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 -#define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 -#define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 -#define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 -#define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 -#define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 -#define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 + #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 + #define PORT_FEATURE_CONNECTED_SWITCH_SHIFT 24 + /* (forced) low speed switch (< 10G) */ + #define PORT_FEATURE_CON_SWITCH_1G_SWITCH 0x00000000 + /* (forced) high speed switch (>= 10G) */ + #define PORT_FEATURE_CON_SWITCH_10G_SWITCH 0x01000000 + #define PORT_FEATURE_CON_SWITCH_AUTO_DETECT 0x02000000 + #define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT 0x03000000 + + #define PORT_FEATURE_LINK_SPEED_MASK 0x000f0000 + #define PORT_FEATURE_LINK_SPEED_SHIFT 16 + #define PORT_FEATURE_LINK_SPEED_AUTO 0x00000000 + #define PORT_FEATURE_LINK_SPEED_10M_FULL 0x00010000 + #define PORT_FEATURE_LINK_SPEED_10M_HALF 0x00020000 + #define PORT_FEATURE_LINK_SPEED_100M_HALF 0x00030000 + #define PORT_FEATURE_LINK_SPEED_100M_FULL 0x00040000 + #define PORT_FEATURE_LINK_SPEED_1G 0x00050000 + #define PORT_FEATURE_LINK_SPEED_2_5G 0x00060000 + #define PORT_FEATURE_LINK_SPEED_10G_CX4 0x00070000 + #define PORT_FEATURE_LINK_SPEED_20G 0x00080000 + + #define PORT_FEATURE_FLOW_CONTROL_MASK 0x00000700 + #define PORT_FEATURE_FLOW_CONTROL_SHIFT 8 + #define PORT_FEATURE_FLOW_CONTROL_AUTO 0x00000000 + #define PORT_FEATURE_FLOW_CONTROL_TX 0x00000100 + #define PORT_FEATURE_FLOW_CONTROL_RX 0x00000200 + #define PORT_FEATURE_FLOW_CONTROL_BOTH 0x00000300 + #define PORT_FEATURE_FLOW_CONTROL_NONE 0x00000400 /* The default for MCP link configuration, - uses the same defines as link_config */ + uses the same defines as link_config */ u32 mfw_wol_link_cfg; + /* The default for the driver of the second external phy, - uses the same defines as link_config */ - u32 link_config2; /* 0x47C */ + uses the same defines as link_config */ + u32 link_config2; /* 0x47C */ /* The default for MCP of the second external phy, - uses the same defines as link_config */ - u32 mfw_wol_link_cfg2; /* 0x480 */ + uses the same defines as link_config */ + u32 mfw_wol_link_cfg2; /* 0x480 */ - u32 Reserved2[17]; /* 0x484 */ + u32 Reserved2[17]; /* 0x484 */ }; /**************************************************************************** - * Device Information * + * Device Information * ****************************************************************************/ -struct shm_dev_info { /* size */ +struct shm_dev_info { /* size */ u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ - struct shared_hw_cfg shared_hw_config; /* 40 */ + struct shared_hw_cfg shared_hw_config; /* 40 */ - struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ + struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ - struct shared_feat_cfg shared_feature_config; /* 4 */ + struct shared_feat_cfg shared_feature_config; /* 4 */ - struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ + struct port_feat_cfg port_feature_config[PORT_MAX];/* 116*2=232 */ }; -#define FUNC_0 0 -#define FUNC_1 1 -#define FUNC_2 2 -#define FUNC_3 3 -#define FUNC_4 4 -#define FUNC_5 5 -#define FUNC_6 6 -#define FUNC_7 7 -#define E1_FUNC_MAX 2 -#define E1H_FUNC_MAX 8 -#define E2_FUNC_MAX 4 /* per path */ - -#define VN_0 0 -#define VN_1 1 -#define VN_2 2 -#define VN_3 3 -#define E1VN_MAX 1 -#define E1HVN_MAX 4 +#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) + #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." +#endif -#define E2_VF_MAX 64 +#define FUNC_0 0 +#define FUNC_1 1 +#define FUNC_2 2 +#define FUNC_3 3 +#define FUNC_4 4 +#define FUNC_5 5 +#define FUNC_6 6 +#define FUNC_7 7 +#define E1_FUNC_MAX 2 +#define E1H_FUNC_MAX 8 +#define E2_FUNC_MAX 4 /* per path */ + +#define VN_0 0 +#define VN_1 1 +#define VN_2 2 +#define VN_3 3 +#define E1VN_MAX 1 +#define E1HVN_MAX 4 + +#define E2_VF_MAX 64 /* HC_REG_VF_CONFIGURATION_SIZE */ /* This value (in milliseconds) determines the frequency of the driver * issuing the PULSE message code. The firmware monitors this periodic * pulse to determine when to switch to an OS-absent mode. */ -#define DRV_PULSE_PERIOD_MS 250 +#define DRV_PULSE_PERIOD_MS 250 /* This value (in milliseconds) determines how long the driver should * wait for an acknowledgement from the firmware before timing out. Once * the firmware has timed out, the driver will assume there is no firmware * running and there won't be any firmware-driver synchronization during a * driver reset. */ -#define FW_ACK_TIME_OUT_MS 5000 +#define FW_ACK_TIME_OUT_MS 5000 -#define FW_ACK_POLL_TIME_MS 1 +#define FW_ACK_POLL_TIME_MS 1 -#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) +#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) /* LED Blink rate that will achieve ~15.9Hz */ -#define LED_BLINK_RATE_VAL 480 +#define LED_BLINK_RATE_VAL 480 /**************************************************************************** - * Driver <-> FW Mailbox * + * Driver <-> FW Mailbox * ****************************************************************************/ struct drv_port_mb { u32 link_status; /* Driver should update this field on any link change event */ -#define LINK_STATUS_LINK_FLAG_MASK 0x00000001 -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E -#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1) -#define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1) - -#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 - -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 - -#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 - -#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 - -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) - -#define LINK_STATUS_SERDES_LINK 0x00100000 - -#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 -#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 -#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 -#define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000 -#define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000 -#define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000 -#define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 -#define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 + #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 + #define LINK_STATUS_LINK_UP 0x00000001 + #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E + #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD (11<<1) + #define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD (11<<1) + + #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 + #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + + #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 + #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 + #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + + #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 + #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 + #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 + #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 + #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 + #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 + #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 + + #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 + #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 + + #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 + #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 + + #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 + #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) + #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) + #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) + #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) + + #define LINK_STATUS_SERDES_LINK 0x00100000 + + #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 + #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 + #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 + #define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE 0x10000000 + + #define LINK_STATUS_PFC_ENABLED 0x20000000 u32 port_stx; @@ -887,138 +1217,159 @@ struct drv_port_mb { struct drv_func_mb { u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 -#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 -#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_DCC_OK 0x30000000 -#define DRV_MSG_CODE_DCC_FAILURE 0x31000000 -#define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 -#define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 -#define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 -#define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 -#define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 -#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 -#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 - /* - * The optic module verification commands require bootcode - * v5.0.6 or later - */ -#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 -#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_MASK 0xffff0000 + #define DRV_MSG_CODE_LOAD_REQ 0x10000000 + #define DRV_MSG_CODE_LOAD_DONE 0x11000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 + #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 + #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 + #define DRV_MSG_CODE_DCC_OK 0x30000000 + #define DRV_MSG_CODE_DCC_FAILURE 0x31000000 + #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 + #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 + #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 + #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 + #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 + #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 + #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 /* - * The specific optic module verification command requires bootcode - * v5.2.12 or later + * The optic module verification command requires bootcode + * v5.0.6 or later, te specific optic module verification command + * requires bootcode v5.2.12 or later */ -#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 -#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL 0xa0000000 + #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 + #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 + #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 + #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 -#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 -#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 -#define DRV_MSG_CODE_SET_MF_BW 0xe0000000 -#define REQ_BC_VER_4_SET_MF_BW 0x00060202 -#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 -#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 -#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 -#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 -#define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 + #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 + + #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 + #define REQ_BC_VER_4_SET_MF_BW 0x00060202 + #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 + + #define DRV_MSG_CODE_LINK_STATUS_CHANGED 0x01000000 + + #define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 + #define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define BIOS_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; + #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 + #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 - /* Load common chip is supported from bc 6.0.0 */ -#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 -#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_DCC_DONE 0x30100000 -#define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 -#define FW_MSG_CODE_DIAG_REFUSE 0x50200000 -#define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 -#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 -#define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 -#define FW_MSG_CODE_GET_KEY_DONE 0x80100000 -#define FW_MSG_CODE_NO_KEY 0x80f00000 -#define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 -#define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 -#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 -#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 -#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 -#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 -#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 -#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 -#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 - -#define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 -#define FW_MSG_CODE_LIC_RESPONSE 0xff020000 -#define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 -#define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 - -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + #define FW_MSG_CODE_MASK 0xffff0000 + #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 + #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 + #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 + /* Load common chip is supported from bc 6.0.0 */ + #define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP 0x00060000 + #define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP 0x10130000 + + #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 + #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 + #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 + #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 + #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 + #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 + #define FW_MSG_CODE_DCC_DONE 0x30100000 + #define FW_MSG_CODE_LLDP_DONE 0x40100000 + #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 + #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 + #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 + #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 + #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 + #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 + #define FW_MSG_CODE_NO_KEY 0x80f00000 + #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 + #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 + #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 + #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 + #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 + #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 + #define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS 0xa0100000 + #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 + #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 + #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 + + #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 + #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 + + #define FW_MSG_CODE_LINK_CHANGED_ACK 0x01100000 + + #define FW_MSG_CODE_LIC_CHALLENGE 0xff010000 + #define FW_MSG_CODE_LIC_RESPONSE 0xff020000 + #define FW_MSG_CODE_VIRT_MAC_PRIM 0xff030000 + #define FW_MSG_CODE_VIRT_MAC_ISCSI 0xff040000 + + #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 fw_mb_param; u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 - /* The system time is in the format of - * (year-2001)*12*32 + month*32 + day. */ -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - /* Indicate to the firmware not to go into the + #define DRV_PULSE_SEQ_MASK 0x00007fff + #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ + #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the * OS-absent when it is not getting driver pulse. - * This is used for debugging as well for PXE(MBA). */ + * This is used for debugging as well for PXE(MBA). + */ u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + #define MCP_PULSE_SEQ_MASK 0x00007fff + #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 /* Indicates to the driver not to assert due to lack * of MCP response */ -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + #define MCP_EVENT_MASK 0xffff0000 + #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 u32 iscsi_boot_signature; u32 iscsi_boot_block_offset; u32 drv_status; -#define DRV_STATUS_PMF 0x00000001 -#define DRV_STATUS_SET_MF_BW 0x00000004 - -#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 -#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 -#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 -#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 -#define DRV_STATUS_DCC_RESERVED1 0x00000800 -#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 -#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 -#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 -#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 + #define DRV_STATUS_PMF 0x00000001 + #define DRV_STATUS_VF_DISABLED 0x00000002 + #define DRV_STATUS_SET_MF_BW 0x00000004 + #define DRV_STATUS_LINK_EVENT 0x00000008 + + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 + #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 + #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 + #define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS 0x00000400 + #define DRV_STATUS_DCC_RESERVED1 0x00000800 + #define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 + #define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 + + #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 + #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 u32 virt_mac_upper; -#define VIRT_MAC_SIGN_MASK 0xffff0000 -#define VIRT_MAC_SIGNATURE 0x564d0000 + #define VIRT_MAC_SIGN_MASK 0xffff0000 + #define VIRT_MAC_SIGNATURE 0x564d0000 u32 virt_mac_lower; }; /**************************************************************************** - * Management firmware state * + * Management firmware state * ****************************************************************************/ /* Allocate 440 bytes for management firmware */ -#define MGMTFW_STATE_WORD_SIZE 110 +#define MGMTFW_STATE_WORD_SIZE 110 struct mgmtfw_state { u32 opaque[MGMTFW_STATE_WORD_SIZE]; @@ -1026,25 +1377,25 @@ struct mgmtfw_state { /**************************************************************************** - * Multi-Function configuration * + * Multi-Function configuration * ****************************************************************************/ struct shared_mf_cfg { u32 clp_mb; -#define SHARED_MF_CLP_SET_DEFAULT 0x00000000 + #define SHARED_MF_CLP_SET_DEFAULT 0x00000000 /* set by CLP */ -#define SHARED_MF_CLP_EXIT 0x00000001 + #define SHARED_MF_CLP_EXIT 0x00000001 /* set by MCP */ -#define SHARED_MF_CLP_EXIT_DONE 0x00010000 + #define SHARED_MF_CLP_EXIT_DONE 0x00010000 }; struct port_mf_cfg { - u32 dynamic_cfg; /* device control channel */ -#define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 -#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK + u32 dynamic_cfg; /* device control channel */ + #define PORT_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 + #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK u32 reserved[3]; @@ -1055,57 +1406,58 @@ struct func_mf_cfg { u32 config; /* E/R/I/D */ /* function 0 of each port cannot be hidden */ -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 + #define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PROTOCOL_MASK 0x00000007 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 -#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 -#define FUNC_MF_CFG_PROTOCOL_DEFAULT\ - FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA + #define FUNC_MF_CFG_PROTOCOL_MASK 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000000 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000002 + #define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004 + #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000006 + #define FUNC_MF_CFG_PROTOCOL_DEFAULT \ + FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA -#define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DISABLED 0x00000008 + #define FUNC_MF_CFG_FUNC_DELETED 0x00000010 /* PRI */ /* 0 - low priority, 3 - high priority */ -#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 -#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 -#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK 0x00000300 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT 8 + #define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT 0x00000000 /* MINBW, MAXBW */ /* value range - 0..100, increments in 100Mbps */ -#define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MIN_BW_SHIFT 16 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 24 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 - - u32 mac_upper; /* MAC */ -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + #define FUNC_MF_CFG_MIN_BW_MASK 0x00ff0000 + #define FUNC_MF_CFG_MIN_BW_SHIFT 16 + #define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 + #define FUNC_MF_CFG_MAX_BW_MASK 0xff000000 + #define FUNC_MF_CFG_MAX_BW_SHIFT 24 + #define FUNC_MF_CFG_MAX_BW_DEFAULT 0x64000000 + + u32 mac_upper; /* MAC */ + #define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff + #define FUNC_MF_CFG_UPPERMAC_SHIFT 0 + #define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + #define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff u32 e1hov_tag; /* VNI */ -#define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff -#define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 -#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK + #define FUNC_MF_CFG_E1HOV_TAG_MASK 0x0000ffff + #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 + #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK u32 reserved[2]; - }; /* This structure is not applicable and should not be accessed on 57711 */ struct func_ext_cfg { u32 func_cfg; -#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF -#define MACP_FUNC_CFG_FLAGS_SHIFT 0 -#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 -#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 -#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 -#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 + #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF + #define MACP_FUNC_CFG_FLAGS_SHIFT 0 + #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 + #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 + #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 + #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 u32 iscsi_mac_addr_upper; u32 iscsi_mac_addr_lower; @@ -1120,73 +1472,99 @@ struct func_ext_cfg { u32 fcoe_wwn_node_name_lower; u32 preserve_data; -#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) -#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) -#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) -#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) -#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0) + #define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1) + #define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3) + #define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4) + #define MF_FUNC_CFG_PRESERVE_TX_BW (1<<5) }; struct mf_cfg { - struct shared_mf_cfg shared_mf_config; - struct port_mf_cfg port_mf_config[PORT_MAX]; - struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; - - struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; -}; - + struct shared_mf_cfg shared_mf_config; /* 0x4 */ + struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ + /* for all chips, there are 8 mf functions */ + struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ + /* + * Extended configuration per function - this array does not exist and + * should not be accessed on 57711 + */ + struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/ +}; /* 0x224 */ /**************************************************************************** - * Shared Memory Region * + * Shared Memory Region * ****************************************************************************/ -struct shmem_region { /* SharedMem Offset (size) */ +struct shmem_region { /* SharedMem Offset (size) */ - u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ -#define SHR_MEM_FORMAT_REV_ID ('A'<<24) -#define SHR_MEM_FORMAT_REV_MASK 0xff000000 + u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ + #define SHR_MEM_FORMAT_REV_MASK 0xff000000 + #define SHR_MEM_FORMAT_REV_ID ('A'<<24) /* validity bits */ -#define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 -#define SHR_MEM_VALIDITY_MB 0x00200000 -#define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 -#define SHR_MEM_VALIDITY_RESERVED 0x00000007 + #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 + #define SHR_MEM_VALIDITY_MB 0x00200000 + #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 + #define SHR_MEM_VALIDITY_RESERVED 0x00000007 /* One licensing bit should be set */ -#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 -#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 -#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 -#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 + #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 + #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 + #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 /* Active MFW */ -#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 -#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 -#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 -#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 -#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 -#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 + #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 - struct shm_dev_info dev_info; /* 0x8 (0x438) */ + struct shm_dev_info dev_info; /* 0x8 (0x438) */ - struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ + struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */ /* FW information (for internal FW use) */ - u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ - struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ + struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ + + struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ - struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ - struct drv_func_mb func_mb[]; /* 0x684 - (44*2/4/8=0x58/0xb0/0x160) */ +#ifdef BMAPI + /* This is a variable length array */ + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[1]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#else + /* the number of function depends on the chip type */ + struct drv_func_mb func_mb[]; /* 0x684 (44*2/4/8=0x58/0xb0/0x160) */ +#endif /* BMAPI */ }; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */ +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 64 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ struct fw_flr_ack { - u32 pf_ack; - u32 vf_ack[1]; - u32 iov_dis_ack; + u32 pf_ack; + u32 vf_ack[1]; + u32 iov_dis_ack; }; struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - struct fw_flr_ack ack; + u32 aggint; + u32 opgen_addr; + struct fw_flr_ack ack; }; /**** SUPPORT FOR SHMEM ARRRAYS *** @@ -1210,36 +1588,36 @@ struct fw_flr_mb { * * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering: * - * | | | | - * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | - * | | | | + * | | | | + * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + * | | | | * * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte: * - * | | | | - * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | - * | | | | + * | | | | + * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 | + * | | | | * * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word: * - * | | | | - * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | - * | | | | + * | | | | + * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 | + * | | | | */ #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ (((i)%((fb)/(eb))) * (eb))) -#define SHMEM_ARRAY_GET(a, i, eb, fb) \ +#define SHMEM_ARRAY_GET(a, i, eb, fb) \ ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ SHMEM_ARRAY_MASK(eb)) -#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ +#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ do { \ a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ - SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ - SHMEM_ARRAY_BITPOS(i, eb, fb)); \ + SHMEM_ARRAY_BITPOS(i, eb, fb)); \ } while (0) @@ -1263,23 +1641,30 @@ do { \ #define ISCSI_APP_IDX 1 #define PREDEFINED_APP_IDX_MAX 2 + +/* Big/Little endian have the same representation. */ struct dcbx_ets_feature { + /* + * For Admin MIB - is this feature supported by the + * driver | For Local MIB - should this feature be enabled. + */ u32 enabled; u32 pg_bw_tbl[2]; u32 pri_pg_tbl[1]; }; +/* Driver structure in LE */ struct dcbx_pfc_feature { #ifdef __BIG_ENDIAN u8 pri_en_bitmap; -#define DCBX_PFC_PRI_0 0x01 -#define DCBX_PFC_PRI_1 0x02 -#define DCBX_PFC_PRI_2 0x04 -#define DCBX_PFC_PRI_3 0x08 -#define DCBX_PFC_PRI_4 0x10 -#define DCBX_PFC_PRI_5 0x20 -#define DCBX_PFC_PRI_6 0x40 -#define DCBX_PFC_PRI_7 0x80 + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 u8 pfc_caps; u8 reserved; u8 enabled; @@ -1288,39 +1673,41 @@ struct dcbx_pfc_feature { u8 reserved; u8 pfc_caps; u8 pri_en_bitmap; -#define DCBX_PFC_PRI_0 0x01 -#define DCBX_PFC_PRI_1 0x02 -#define DCBX_PFC_PRI_2 0x04 -#define DCBX_PFC_PRI_3 0x08 -#define DCBX_PFC_PRI_4 0x10 -#define DCBX_PFC_PRI_5 0x20 -#define DCBX_PFC_PRI_6 0x40 -#define DCBX_PFC_PRI_7 0x80 + #define DCBX_PFC_PRI_0 0x01 + #define DCBX_PFC_PRI_1 0x02 + #define DCBX_PFC_PRI_2 0x04 + #define DCBX_PFC_PRI_3 0x08 + #define DCBX_PFC_PRI_4 0x10 + #define DCBX_PFC_PRI_5 0x20 + #define DCBX_PFC_PRI_6 0x40 + #define DCBX_PFC_PRI_7 0x80 #endif }; struct dcbx_app_priority_entry { #ifdef __BIG_ENDIAN - u16 app_id; - u8 pri_bitmap; - u8 appBitfield; -#define DCBX_APP_ENTRY_VALID 0x01 -#define DCBX_APP_ENTRY_SF_MASK 0x30 -#define DCBX_APP_ENTRY_SF_SHIFT 4 -#define DCBX_APP_SF_ETH_TYPE 0x10 -#define DCBX_APP_SF_PORT 0x20 + u16 app_id; + u8 pri_bitmap; + u8 appBitfield; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 #elif defined(__LITTLE_ENDIAN) u8 appBitfield; -#define DCBX_APP_ENTRY_VALID 0x01 -#define DCBX_APP_ENTRY_SF_MASK 0x30 -#define DCBX_APP_ENTRY_SF_SHIFT 4 -#define DCBX_APP_SF_ETH_TYPE 0x10 -#define DCBX_APP_SF_PORT 0x20 - u8 pri_bitmap; - u16 app_id; + #define DCBX_APP_ENTRY_VALID 0x01 + #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_SF_ETH_TYPE 0x10 + #define DCBX_APP_SF_PORT 0x20 + u8 pri_bitmap; + u16 app_id; #endif }; + +/* FW structure in BE */ struct dcbx_app_priority_feature { #ifdef __BIG_ENDIAN u8 reserved; @@ -1336,302 +1723,403 @@ struct dcbx_app_priority_feature { struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; }; +/* FW structure in BE */ struct dcbx_features { + /* PG feature */ struct dcbx_ets_feature ets; + /* PFC feature */ struct dcbx_pfc_feature pfc; + /* APP feature */ struct dcbx_app_priority_feature app; }; +/* LLDP protocol parameters */ +/* FW structure in BE */ struct lldp_params { #ifdef __BIG_ENDIAN - u8 msg_fast_tx_interval; - u8 msg_tx_hold; - u8 msg_tx_interval; - u8 admin_status; -#define LLDP_TX_ONLY 0x01 -#define LLDP_RX_ONLY 0x02 -#define LLDP_TX_RX 0x03 -#define LLDP_DISABLED 0x04 - u8 reserved1; - u8 tx_fast; - u8 tx_crd_max; - u8 tx_crd; + u8 msg_fast_tx_interval; + u8 msg_tx_hold; + u8 msg_tx_interval; + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 reserved1; + u8 tx_fast; + u8 tx_crd_max; + u8 tx_crd; #elif defined(__LITTLE_ENDIAN) - u8 admin_status; -#define LLDP_TX_ONLY 0x01 -#define LLDP_RX_ONLY 0x02 -#define LLDP_TX_RX 0x03 -#define LLDP_DISABLED 0x04 - u8 msg_tx_interval; - u8 msg_tx_hold; - u8 msg_fast_tx_interval; - u8 tx_crd; - u8 tx_crd_max; - u8 tx_fast; - u8 reserved1; + u8 admin_status; + #define LLDP_TX_ONLY 0x01 + #define LLDP_RX_ONLY 0x02 + #define LLDP_TX_RX 0x03 + #define LLDP_DISABLED 0x04 + u8 msg_tx_interval; + u8 msg_tx_hold; + u8 msg_fast_tx_interval; + u8 tx_crd; + u8 tx_crd_max; + u8 tx_fast; + u8 reserved1; #endif -#define REM_CHASSIS_ID_STAT_LEN 4 -#define REM_PORT_ID_STAT_LEN 4 + #define REM_CHASSIS_ID_STAT_LEN 4 + #define REM_PORT_ID_STAT_LEN 4 + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. */ u32 peer_port_id[REM_PORT_ID_STAT_LEN]; }; struct lldp_dcbx_stat { -#define LOCAL_CHASSIS_ID_STAT_LEN 2 -#define LOCAL_PORT_ID_STAT_LEN 2 + #define LOCAL_CHASSIS_ID_STAT_LEN 2 + #define LOCAL_PORT_ID_STAT_LEN 2 + /* Holds local Chassis ID 8B payload of constant subtype 4. */ u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID 8B payload of constant subtype 3. */ u32 local_port_id[LOCAL_PORT_ID_STAT_LEN]; + /* Number of DCBX frames transmitted. */ u32 num_tx_dcbx_pkts; + /* Number of DCBX frames received. */ u32 num_rx_dcbx_pkts; }; +/* ADMIN MIB - DCBX local machine default configuration. */ struct lldp_admin_mib { - u32 ver_cfg_flags; -#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 -#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 -#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 -#define DCBX_ETS_RECO_TX_ENABLED 0x00000008 -#define DCBX_ETS_RECO_VALID 0x00000010 -#define DCBX_ETS_WILLING 0x00000020 -#define DCBX_PFC_WILLING 0x00000040 -#define DCBX_APP_WILLING 0x00000080 -#define DCBX_VERSION_CEE 0x00000100 -#define DCBX_VERSION_IEEE 0x00000200 -#define DCBX_DCBX_ENABLED 0x00000400 -#define DCBX_CEE_VERSION_MASK 0x0000f000 -#define DCBX_CEE_VERSION_SHIFT 12 -#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 -#define DCBX_CEE_MAX_VERSION_SHIFT 16 - struct dcbx_features features; -}; - + u32 ver_cfg_flags; + #define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001 + #define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002 + #define DCBX_APP_CONFIG_TX_ENABLED 0x00000004 + #define DCBX_ETS_RECO_TX_ENABLED 0x00000008 + #define DCBX_ETS_RECO_VALID 0x00000010 + #define DCBX_ETS_WILLING 0x00000020 + #define DCBX_PFC_WILLING 0x00000040 + #define DCBX_APP_WILLING 0x00000080 + #define DCBX_VERSION_CEE 0x00000100 + #define DCBX_VERSION_IEEE 0x00000200 + #define DCBX_DCBX_ENABLED 0x00000400 + #define DCBX_CEE_VERSION_MASK 0x0000f000 + #define DCBX_CEE_VERSION_SHIFT 12 + #define DCBX_CEE_MAX_VERSION_MASK 0x000f0000 + #define DCBX_CEE_MAX_VERSION_SHIFT 16 + struct dcbx_features features; +}; + +/* REMOTE MIB - remote machine DCBX configuration. */ struct lldp_remote_mib { u32 prefix_seq_num; u32 flags; -#define DCBX_ETS_TLV_RX 0x00000001 -#define DCBX_PFC_TLV_RX 0x00000002 -#define DCBX_APP_TLV_RX 0x00000004 -#define DCBX_ETS_RX_ERROR 0x00000010 -#define DCBX_PFC_RX_ERROR 0x00000020 -#define DCBX_APP_RX_ERROR 0x00000040 -#define DCBX_ETS_REM_WILLING 0x00000100 -#define DCBX_PFC_REM_WILLING 0x00000200 -#define DCBX_APP_REM_WILLING 0x00000400 -#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_ETS_TLV_RX 0x00000001 + #define DCBX_PFC_TLV_RX 0x00000002 + #define DCBX_APP_TLV_RX 0x00000004 + #define DCBX_ETS_RX_ERROR 0x00000010 + #define DCBX_PFC_RX_ERROR 0x00000020 + #define DCBX_APP_RX_ERROR 0x00000040 + #define DCBX_ETS_REM_WILLING 0x00000100 + #define DCBX_PFC_REM_WILLING 0x00000200 + #define DCBX_APP_REM_WILLING 0x00000400 + #define DCBX_REMOTE_ETS_RECO_VALID 0x00001000 + #define DCBX_REMOTE_MIB_VALID 0x00002000 struct dcbx_features features; u32 suffix_seq_num; }; +/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */ struct lldp_local_mib { u32 prefix_seq_num; + /* Indicates if there is mismatch with negotiation results. */ u32 error; -#define DCBX_LOCAL_ETS_ERROR 0x00000001 -#define DCBX_LOCAL_PFC_ERROR 0x00000002 -#define DCBX_LOCAL_APP_ERROR 0x00000004 -#define DCBX_LOCAL_PFC_MISMATCH 0x00000010 -#define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_LOCAL_ETS_ERROR 0x00000001 + #define DCBX_LOCAL_PFC_ERROR 0x00000002 + #define DCBX_LOCAL_APP_ERROR 0x00000004 + #define DCBX_LOCAL_PFC_MISMATCH 0x00000010 + #define DCBX_LOCAL_APP_MISMATCH 0x00000020 + #define DCBX_REMOTE_MIB_ERROR 0x00000040 struct dcbx_features features; u32 suffix_seq_num; }; /***END OF DCBX STRUCTURES DECLARATIONS***/ +struct ncsi_oem_fcoe_features { + u32 fcoe_features1; + #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF + #define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET 0 + + #define FCOE_FEATURES1_LOGINS_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET 16 + + u32 fcoe_features2; + #define FCOE_FEATURES2_EXCHANGES_MASK 0x0000FFFF + #define FCOE_FEATURES2_EXCHANGES_OFFSET 0 + + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK 0xFFFF0000 + #define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET 16 + + u32 fcoe_features3; + #define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK 0x0000FFFF + #define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET 0 + + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK 0xFFFF0000 + #define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET 16 + + u32 fcoe_features4; + #define FCOE_FEATURES4_FEATURE_SETTINGS_MASK 0x0000000F + #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET 0 +}; + +struct ncsi_oem_data { + u32 driver_version[4]; + struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features; +}; + struct shmem2_region { - u32 size; - - u32 dcc_support; -#define SHMEM_DCC_SUPPORT_NONE 0x00000000 -#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 -#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 -#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 -#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 -#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 -#define SHMEM_DCC_SUPPORT_DEFAULT SHMEM_DCC_SUPPORT_NONE - u32 ext_phy_fw_version2[PORT_MAX]; + u32 size; /* 0x0000 */ + + u32 dcc_support; /* 0x0004 */ + #define SHMEM_DCC_SUPPORT_NONE 0x00000000 + #define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV 0x00000001 + #define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV 0x00000004 + #define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV 0x00000008 + #define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV 0x00000040 + #define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV 0x00000080 + + u32 ext_phy_fw_version2[PORT_MAX]; /* 0x0008 */ /* * For backwards compatibility, if the mf_cfg_addr does not exist * (the size filed is smaller than 0xc) the mf_cfg resides at the * end of struct shmem_region - */ - u32 mf_cfg_addr; -#define SHMEM_MF_CFG_ADDR_NONE 0x00000000 - - struct fw_flr_mb flr_mb; - u32 dcbx_lldp_params_offset; -#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 - u32 dcbx_neg_res_offset; -#define SHMEM_DCBX_NEG_RES_NONE 0x00000000 - u32 dcbx_remote_mib_offset; -#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 + */ + u32 mf_cfg_addr; /* 0x0010 */ + #define SHMEM_MF_CFG_ADDR_NONE 0x00000000 + + struct fw_flr_mb flr_mb; /* 0x0014 */ + u32 dcbx_lldp_params_offset; /* 0x0028 */ + #define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000 + u32 dcbx_neg_res_offset; /* 0x002c */ + #define SHMEM_DCBX_NEG_RES_NONE 0x00000000 + u32 dcbx_remote_mib_offset; /* 0x0030 */ + #define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000 /* * The other shmemX_base_addr holds the other path's shmem address * required for example in case of common phy init, or for path1 to know * the address of mcp debug trace which is located in offset from shmem * of path0 */ - u32 other_shmem_base_addr; - u32 other_shmem2_base_addr; - u32 reserved1[E2_VF_MAX / 32]; - u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32]; - u32 dcbx_lldp_dcbx_stat_offset; -#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + u32 other_shmem_base_addr; /* 0x0034 */ + u32 other_shmem2_base_addr; /* 0x0038 */ + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + u32 mcp_vf_disabled[E2_VF_MAX / 32]; /* 0x003c */ + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */ + + u32 dcbx_lldp_dcbx_stat_offset; /* 0x0064 */ + #define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000 + + /* + * edebug_driver_if field is used to transfer messages between edebug + * app to the driver through shmem2. + * + * message format: + * bits 0-2 - function number / instance of driver to perform request + * bits 3-5 - op code / is_ack? + * bits 6-63 - data + */ + u32 edebug_driver_if[2]; /* 0x0068 */ + #define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR 1 + #define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR 2 + #define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT 3 + + u32 nvm_retain_bitmap_addr; /* 0x0070 */ + + u32 reserved1; /* 0x0074 */ + + u32 reserved2[E2_FUNC_MAX]; + + u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ + u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ + + u32 swim_base_addr; /* 0x0108 */ + u32 swim_funcs; + u32 swim_main_cb; + + u32 reserved5[2]; + + /* generic flags controlled by the driver */ + u32 drv_flags; + #define DRV_FLAGS_DCB_CONFIGURED 0x1 + + /* pointer to extended dev_info shared data copied from nvm image */ + u32 extended_dev_info_shared_addr; + u32 ncsi_oem_data_addr; + + u32 ocsd_host_addr; + u32 ocbb_host_addr; + u32 ocsd_req_update_interval; }; struct emac_stats { - u32 rx_stat_ifhcinoctets; - u32 rx_stat_ifhcinbadoctets; - u32 rx_stat_etherstatsfragments; - u32 rx_stat_ifhcinucastpkts; - u32 rx_stat_ifhcinmulticastpkts; - u32 rx_stat_ifhcinbroadcastpkts; - u32 rx_stat_dot3statsfcserrors; - u32 rx_stat_dot3statsalignmenterrors; - u32 rx_stat_dot3statscarriersenseerrors; - u32 rx_stat_xonpauseframesreceived; - u32 rx_stat_xoffpauseframesreceived; - u32 rx_stat_maccontrolframesreceived; - u32 rx_stat_xoffstateentered; - u32 rx_stat_dot3statsframestoolong; - u32 rx_stat_etherstatsjabbers; - u32 rx_stat_etherstatsundersizepkts; - u32 rx_stat_etherstatspkts64octets; - u32 rx_stat_etherstatspkts65octetsto127octets; - u32 rx_stat_etherstatspkts128octetsto255octets; - u32 rx_stat_etherstatspkts256octetsto511octets; - u32 rx_stat_etherstatspkts512octetsto1023octets; - u32 rx_stat_etherstatspkts1024octetsto1522octets; - u32 rx_stat_etherstatspktsover1522octets; - - u32 rx_stat_falsecarriererrors; - - u32 tx_stat_ifhcoutoctets; - u32 tx_stat_ifhcoutbadoctets; - u32 tx_stat_etherstatscollisions; - u32 tx_stat_outxonsent; - u32 tx_stat_outxoffsent; - u32 tx_stat_flowcontroldone; - u32 tx_stat_dot3statssinglecollisionframes; - u32 tx_stat_dot3statsmultiplecollisionframes; - u32 tx_stat_dot3statsdeferredtransmissions; - u32 tx_stat_dot3statsexcessivecollisions; - u32 tx_stat_dot3statslatecollisions; - u32 tx_stat_ifhcoutucastpkts; - u32 tx_stat_ifhcoutmulticastpkts; - u32 tx_stat_ifhcoutbroadcastpkts; - u32 tx_stat_etherstatspkts64octets; - u32 tx_stat_etherstatspkts65octetsto127octets; - u32 tx_stat_etherstatspkts128octetsto255octets; - u32 tx_stat_etherstatspkts256octetsto511octets; - u32 tx_stat_etherstatspkts512octetsto1023octets; - u32 tx_stat_etherstatspkts1024octetsto1522octets; - u32 tx_stat_etherstatspktsover1522octets; - u32 tx_stat_dot3statsinternalmactransmiterrors; + u32 rx_stat_ifhcinoctets; + u32 rx_stat_ifhcinbadoctets; + u32 rx_stat_etherstatsfragments; + u32 rx_stat_ifhcinucastpkts; + u32 rx_stat_ifhcinmulticastpkts; + u32 rx_stat_ifhcinbroadcastpkts; + u32 rx_stat_dot3statsfcserrors; + u32 rx_stat_dot3statsalignmenterrors; + u32 rx_stat_dot3statscarriersenseerrors; + u32 rx_stat_xonpauseframesreceived; + u32 rx_stat_xoffpauseframesreceived; + u32 rx_stat_maccontrolframesreceived; + u32 rx_stat_xoffstateentered; + u32 rx_stat_dot3statsframestoolong; + u32 rx_stat_etherstatsjabbers; + u32 rx_stat_etherstatsundersizepkts; + u32 rx_stat_etherstatspkts64octets; + u32 rx_stat_etherstatspkts65octetsto127octets; + u32 rx_stat_etherstatspkts128octetsto255octets; + u32 rx_stat_etherstatspkts256octetsto511octets; + u32 rx_stat_etherstatspkts512octetsto1023octets; + u32 rx_stat_etherstatspkts1024octetsto1522octets; + u32 rx_stat_etherstatspktsover1522octets; + + u32 rx_stat_falsecarriererrors; + + u32 tx_stat_ifhcoutoctets; + u32 tx_stat_ifhcoutbadoctets; + u32 tx_stat_etherstatscollisions; + u32 tx_stat_outxonsent; + u32 tx_stat_outxoffsent; + u32 tx_stat_flowcontroldone; + u32 tx_stat_dot3statssinglecollisionframes; + u32 tx_stat_dot3statsmultiplecollisionframes; + u32 tx_stat_dot3statsdeferredtransmissions; + u32 tx_stat_dot3statsexcessivecollisions; + u32 tx_stat_dot3statslatecollisions; + u32 tx_stat_ifhcoutucastpkts; + u32 tx_stat_ifhcoutmulticastpkts; + u32 tx_stat_ifhcoutbroadcastpkts; + u32 tx_stat_etherstatspkts64octets; + u32 tx_stat_etherstatspkts65octetsto127octets; + u32 tx_stat_etherstatspkts128octetsto255octets; + u32 tx_stat_etherstatspkts256octetsto511octets; + u32 tx_stat_etherstatspkts512octetsto1023octets; + u32 tx_stat_etherstatspkts1024octetsto1522octets; + u32 tx_stat_etherstatspktsover1522octets; + u32 tx_stat_dot3statsinternalmactransmiterrors; }; struct bmac1_stats { - u32 tx_stat_gtpkt_lo; - u32 tx_stat_gtpkt_hi; - u32 tx_stat_gtxpf_lo; - u32 tx_stat_gtxpf_hi; - u32 tx_stat_gtfcs_lo; - u32 tx_stat_gtfcs_hi; - u32 tx_stat_gtmca_lo; - u32 tx_stat_gtmca_hi; - u32 tx_stat_gtbca_lo; - u32 tx_stat_gtbca_hi; - u32 tx_stat_gtfrg_lo; - u32 tx_stat_gtfrg_hi; - u32 tx_stat_gtovr_lo; - u32 tx_stat_gtovr_hi; - u32 tx_stat_gt64_lo; - u32 tx_stat_gt64_hi; - u32 tx_stat_gt127_lo; - u32 tx_stat_gt127_hi; - u32 tx_stat_gt255_lo; - u32 tx_stat_gt255_hi; - u32 tx_stat_gt511_lo; - u32 tx_stat_gt511_hi; - u32 tx_stat_gt1023_lo; - u32 tx_stat_gt1023_hi; - u32 tx_stat_gt1518_lo; - u32 tx_stat_gt1518_hi; - u32 tx_stat_gt2047_lo; - u32 tx_stat_gt2047_hi; - u32 tx_stat_gt4095_lo; - u32 tx_stat_gt4095_hi; - u32 tx_stat_gt9216_lo; - u32 tx_stat_gt9216_hi; - u32 tx_stat_gt16383_lo; - u32 tx_stat_gt16383_hi; - u32 tx_stat_gtmax_lo; - u32 tx_stat_gtmax_hi; - u32 tx_stat_gtufl_lo; - u32 tx_stat_gtufl_hi; - u32 tx_stat_gterr_lo; - u32 tx_stat_gterr_hi; - u32 tx_stat_gtbyt_lo; - u32 tx_stat_gtbyt_hi; - - u32 rx_stat_gr64_lo; - u32 rx_stat_gr64_hi; - u32 rx_stat_gr127_lo; - u32 rx_stat_gr127_hi; - u32 rx_stat_gr255_lo; - u32 rx_stat_gr255_hi; - u32 rx_stat_gr511_lo; - u32 rx_stat_gr511_hi; - u32 rx_stat_gr1023_lo; - u32 rx_stat_gr1023_hi; - u32 rx_stat_gr1518_lo; - u32 rx_stat_gr1518_hi; - u32 rx_stat_gr2047_lo; - u32 rx_stat_gr2047_hi; - u32 rx_stat_gr4095_lo; - u32 rx_stat_gr4095_hi; - u32 rx_stat_gr9216_lo; - u32 rx_stat_gr9216_hi; - u32 rx_stat_gr16383_lo; - u32 rx_stat_gr16383_hi; - u32 rx_stat_grmax_lo; - u32 rx_stat_grmax_hi; - u32 rx_stat_grpkt_lo; - u32 rx_stat_grpkt_hi; - u32 rx_stat_grfcs_lo; - u32 rx_stat_grfcs_hi; - u32 rx_stat_grmca_lo; - u32 rx_stat_grmca_hi; - u32 rx_stat_grbca_lo; - u32 rx_stat_grbca_hi; - u32 rx_stat_grxcf_lo; - u32 rx_stat_grxcf_hi; - u32 rx_stat_grxpf_lo; - u32 rx_stat_grxpf_hi; - u32 rx_stat_grxuo_lo; - u32 rx_stat_grxuo_hi; - u32 rx_stat_grjbr_lo; - u32 rx_stat_grjbr_hi; - u32 rx_stat_grovr_lo; - u32 rx_stat_grovr_hi; - u32 rx_stat_grflr_lo; - u32 rx_stat_grflr_hi; - u32 rx_stat_grmeg_lo; - u32 rx_stat_grmeg_hi; - u32 rx_stat_grmeb_lo; - u32 rx_stat_grmeb_hi; - u32 rx_stat_grbyt_lo; - u32 rx_stat_grbyt_hi; - u32 rx_stat_grund_lo; - u32 rx_stat_grund_hi; - u32 rx_stat_grfrg_lo; - u32 rx_stat_grfrg_hi; - u32 rx_stat_grerb_lo; - u32 rx_stat_grerb_hi; - u32 rx_stat_grfre_lo; - u32 rx_stat_grfre_hi; - u32 rx_stat_gripj_lo; - u32 rx_stat_gripj_hi; + u32 tx_stat_gtpkt_lo; + u32 tx_stat_gtpkt_hi; + u32 tx_stat_gtxpf_lo; + u32 tx_stat_gtxpf_hi; + u32 tx_stat_gtfcs_lo; + u32 tx_stat_gtfcs_hi; + u32 tx_stat_gtmca_lo; + u32 tx_stat_gtmca_hi; + u32 tx_stat_gtbca_lo; + u32 tx_stat_gtbca_hi; + u32 tx_stat_gtfrg_lo; + u32 tx_stat_gtfrg_hi; + u32 tx_stat_gtovr_lo; + u32 tx_stat_gtovr_hi; + u32 tx_stat_gt64_lo; + u32 tx_stat_gt64_hi; + u32 tx_stat_gt127_lo; + u32 tx_stat_gt127_hi; + u32 tx_stat_gt255_lo; + u32 tx_stat_gt255_hi; + u32 tx_stat_gt511_lo; + u32 tx_stat_gt511_hi; + u32 tx_stat_gt1023_lo; + u32 tx_stat_gt1023_hi; + u32 tx_stat_gt1518_lo; + u32 tx_stat_gt1518_hi; + u32 tx_stat_gt2047_lo; + u32 tx_stat_gt2047_hi; + u32 tx_stat_gt4095_lo; + u32 tx_stat_gt4095_hi; + u32 tx_stat_gt9216_lo; + u32 tx_stat_gt9216_hi; + u32 tx_stat_gt16383_lo; + u32 tx_stat_gt16383_hi; + u32 tx_stat_gtmax_lo; + u32 tx_stat_gtmax_hi; + u32 tx_stat_gtufl_lo; + u32 tx_stat_gtufl_hi; + u32 tx_stat_gterr_lo; + u32 tx_stat_gterr_hi; + u32 tx_stat_gtbyt_lo; + u32 tx_stat_gtbyt_hi; + + u32 rx_stat_gr64_lo; + u32 rx_stat_gr64_hi; + u32 rx_stat_gr127_lo; + u32 rx_stat_gr127_hi; + u32 rx_stat_gr255_lo; + u32 rx_stat_gr255_hi; + u32 rx_stat_gr511_lo; + u32 rx_stat_gr511_hi; + u32 rx_stat_gr1023_lo; + u32 rx_stat_gr1023_hi; + u32 rx_stat_gr1518_lo; + u32 rx_stat_gr1518_hi; + u32 rx_stat_gr2047_lo; + u32 rx_stat_gr2047_hi; + u32 rx_stat_gr4095_lo; + u32 rx_stat_gr4095_hi; + u32 rx_stat_gr9216_lo; + u32 rx_stat_gr9216_hi; + u32 rx_stat_gr16383_lo; + u32 rx_stat_gr16383_hi; + u32 rx_stat_grmax_lo; + u32 rx_stat_grmax_hi; + u32 rx_stat_grpkt_lo; + u32 rx_stat_grpkt_hi; + u32 rx_stat_grfcs_lo; + u32 rx_stat_grfcs_hi; + u32 rx_stat_grmca_lo; + u32 rx_stat_grmca_hi; + u32 rx_stat_grbca_lo; + u32 rx_stat_grbca_hi; + u32 rx_stat_grxcf_lo; + u32 rx_stat_grxcf_hi; + u32 rx_stat_grxpf_lo; + u32 rx_stat_grxpf_hi; + u32 rx_stat_grxuo_lo; + u32 rx_stat_grxuo_hi; + u32 rx_stat_grjbr_lo; + u32 rx_stat_grjbr_hi; + u32 rx_stat_grovr_lo; + u32 rx_stat_grovr_hi; + u32 rx_stat_grflr_lo; + u32 rx_stat_grflr_hi; + u32 rx_stat_grmeg_lo; + u32 rx_stat_grmeg_hi; + u32 rx_stat_grmeb_lo; + u32 rx_stat_grmeb_hi; + u32 rx_stat_grbyt_lo; + u32 rx_stat_grbyt_hi; + u32 rx_stat_grund_lo; + u32 rx_stat_grund_hi; + u32 rx_stat_grfrg_lo; + u32 rx_stat_grfrg_hi; + u32 rx_stat_grerb_lo; + u32 rx_stat_grerb_hi; + u32 rx_stat_grfre_lo; + u32 rx_stat_grfre_hi; + u32 rx_stat_gripj_lo; + u32 rx_stat_gripj_hi; }; struct bmac2_stats { @@ -1750,187 +2238,316 @@ struct bmac2_stats { u32 rx_stat_gripj_hi; }; +struct mstat_stats { + struct { + /* OTE MSTAT on E3 has a bug where this register's contents are + * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp + */ + u32 tx_gtxpok_lo; + u32 tx_gtxpok_hi; + u32 tx_gtxpf_lo; + u32 tx_gtxpf_hi; + u32 tx_gtxpp_lo; + u32 tx_gtxpp_hi; + u32 tx_gtfcs_lo; + u32 tx_gtfcs_hi; + u32 tx_gtuca_lo; + u32 tx_gtuca_hi; + u32 tx_gtmca_lo; + u32 tx_gtmca_hi; + u32 tx_gtgca_lo; + u32 tx_gtgca_hi; + u32 tx_gtpkt_lo; + u32 tx_gtpkt_hi; + u32 tx_gt64_lo; + u32 tx_gt64_hi; + u32 tx_gt127_lo; + u32 tx_gt127_hi; + u32 tx_gt255_lo; + u32 tx_gt255_hi; + u32 tx_gt511_lo; + u32 tx_gt511_hi; + u32 tx_gt1023_lo; + u32 tx_gt1023_hi; + u32 tx_gt1518_lo; + u32 tx_gt1518_hi; + u32 tx_gt2047_lo; + u32 tx_gt2047_hi; + u32 tx_gt4095_lo; + u32 tx_gt4095_hi; + u32 tx_gt9216_lo; + u32 tx_gt9216_hi; + u32 tx_gt16383_lo; + u32 tx_gt16383_hi; + u32 tx_gtufl_lo; + u32 tx_gtufl_hi; + u32 tx_gterr_lo; + u32 tx_gterr_hi; + u32 tx_gtbyt_lo; + u32 tx_gtbyt_hi; + u32 tx_collisions_lo; + u32 tx_collisions_hi; + u32 tx_singlecollision_lo; + u32 tx_singlecollision_hi; + u32 tx_multiplecollisions_lo; + u32 tx_multiplecollisions_hi; + u32 tx_deferred_lo; + u32 tx_deferred_hi; + u32 tx_excessivecollisions_lo; + u32 tx_excessivecollisions_hi; + u32 tx_latecollisions_lo; + u32 tx_latecollisions_hi; + } stats_tx; + + struct { + u32 rx_gr64_lo; + u32 rx_gr64_hi; + u32 rx_gr127_lo; + u32 rx_gr127_hi; + u32 rx_gr255_lo; + u32 rx_gr255_hi; + u32 rx_gr511_lo; + u32 rx_gr511_hi; + u32 rx_gr1023_lo; + u32 rx_gr1023_hi; + u32 rx_gr1518_lo; + u32 rx_gr1518_hi; + u32 rx_gr2047_lo; + u32 rx_gr2047_hi; + u32 rx_gr4095_lo; + u32 rx_gr4095_hi; + u32 rx_gr9216_lo; + u32 rx_gr9216_hi; + u32 rx_gr16383_lo; + u32 rx_gr16383_hi; + u32 rx_grpkt_lo; + u32 rx_grpkt_hi; + u32 rx_grfcs_lo; + u32 rx_grfcs_hi; + u32 rx_gruca_lo; + u32 rx_gruca_hi; + u32 rx_grmca_lo; + u32 rx_grmca_hi; + u32 rx_grbca_lo; + u32 rx_grbca_hi; + u32 rx_grxpf_lo; + u32 rx_grxpf_hi; + u32 rx_grxpp_lo; + u32 rx_grxpp_hi; + u32 rx_grxuo_lo; + u32 rx_grxuo_hi; + u32 rx_grovr_lo; + u32 rx_grovr_hi; + u32 rx_grxcf_lo; + u32 rx_grxcf_hi; + u32 rx_grflr_lo; + u32 rx_grflr_hi; + u32 rx_grpok_lo; + u32 rx_grpok_hi; + u32 rx_grbyt_lo; + u32 rx_grbyt_hi; + u32 rx_grund_lo; + u32 rx_grund_hi; + u32 rx_grfrg_lo; + u32 rx_grfrg_hi; + u32 rx_grerb_lo; + u32 rx_grerb_hi; + u32 rx_grfre_lo; + u32 rx_grfre_hi; + + u32 rx_alignmenterrors_lo; + u32 rx_alignmenterrors_hi; + u32 rx_falsecarrier_lo; + u32 rx_falsecarrier_hi; + u32 rx_llfcmsgcnt_lo; + u32 rx_llfcmsgcnt_hi; + } stats_rx; +}; + union mac_stats { - struct emac_stats emac_stats; - struct bmac1_stats bmac1_stats; - struct bmac2_stats bmac2_stats; + struct emac_stats emac_stats; + struct bmac1_stats bmac1_stats; + struct bmac2_stats bmac2_stats; + struct mstat_stats mstat_stats; }; struct mac_stx { - /* in_bad_octets */ - u32 rx_stat_ifhcinbadoctets_hi; - u32 rx_stat_ifhcinbadoctets_lo; - - /* out_bad_octets */ - u32 tx_stat_ifhcoutbadoctets_hi; - u32 tx_stat_ifhcoutbadoctets_lo; - - /* crc_receive_errors */ - u32 rx_stat_dot3statsfcserrors_hi; - u32 rx_stat_dot3statsfcserrors_lo; - /* alignment_errors */ - u32 rx_stat_dot3statsalignmenterrors_hi; - u32 rx_stat_dot3statsalignmenterrors_lo; - /* carrier_sense_errors */ - u32 rx_stat_dot3statscarriersenseerrors_hi; - u32 rx_stat_dot3statscarriersenseerrors_lo; - /* false_carrier_detections */ - u32 rx_stat_falsecarriererrors_hi; - u32 rx_stat_falsecarriererrors_lo; - - /* runt_packets_received */ - u32 rx_stat_etherstatsundersizepkts_hi; - u32 rx_stat_etherstatsundersizepkts_lo; - /* jabber_packets_received */ - u32 rx_stat_dot3statsframestoolong_hi; - u32 rx_stat_dot3statsframestoolong_lo; - - /* error_runt_packets_received */ - u32 rx_stat_etherstatsfragments_hi; - u32 rx_stat_etherstatsfragments_lo; - /* error_jabber_packets_received */ - u32 rx_stat_etherstatsjabbers_hi; - u32 rx_stat_etherstatsjabbers_lo; - - /* control_frames_received */ - u32 rx_stat_maccontrolframesreceived_hi; - u32 rx_stat_maccontrolframesreceived_lo; - u32 rx_stat_bmac_xpf_hi; - u32 rx_stat_bmac_xpf_lo; - u32 rx_stat_bmac_xcf_hi; - u32 rx_stat_bmac_xcf_lo; - - /* xoff_state_entered */ - u32 rx_stat_xoffstateentered_hi; - u32 rx_stat_xoffstateentered_lo; - /* pause_xon_frames_received */ - u32 rx_stat_xonpauseframesreceived_hi; - u32 rx_stat_xonpauseframesreceived_lo; - /* pause_xoff_frames_received */ - u32 rx_stat_xoffpauseframesreceived_hi; - u32 rx_stat_xoffpauseframesreceived_lo; - /* pause_xon_frames_transmitted */ - u32 tx_stat_outxonsent_hi; - u32 tx_stat_outxonsent_lo; - /* pause_xoff_frames_transmitted */ - u32 tx_stat_outxoffsent_hi; - u32 tx_stat_outxoffsent_lo; - /* flow_control_done */ - u32 tx_stat_flowcontroldone_hi; - u32 tx_stat_flowcontroldone_lo; - - /* ether_stats_collisions */ - u32 tx_stat_etherstatscollisions_hi; - u32 tx_stat_etherstatscollisions_lo; - /* single_collision_transmit_frames */ - u32 tx_stat_dot3statssinglecollisionframes_hi; - u32 tx_stat_dot3statssinglecollisionframes_lo; - /* multiple_collision_transmit_frames */ - u32 tx_stat_dot3statsmultiplecollisionframes_hi; - u32 tx_stat_dot3statsmultiplecollisionframes_lo; - /* deferred_transmissions */ - u32 tx_stat_dot3statsdeferredtransmissions_hi; - u32 tx_stat_dot3statsdeferredtransmissions_lo; - /* excessive_collision_frames */ - u32 tx_stat_dot3statsexcessivecollisions_hi; - u32 tx_stat_dot3statsexcessivecollisions_lo; - /* late_collision_frames */ - u32 tx_stat_dot3statslatecollisions_hi; - u32 tx_stat_dot3statslatecollisions_lo; - - /* frames_transmitted_64_bytes */ - u32 tx_stat_etherstatspkts64octets_hi; - u32 tx_stat_etherstatspkts64octets_lo; - /* frames_transmitted_65_127_bytes */ - u32 tx_stat_etherstatspkts65octetsto127octets_hi; - u32 tx_stat_etherstatspkts65octetsto127octets_lo; - /* frames_transmitted_128_255_bytes */ - u32 tx_stat_etherstatspkts128octetsto255octets_hi; - u32 tx_stat_etherstatspkts128octetsto255octets_lo; - /* frames_transmitted_256_511_bytes */ - u32 tx_stat_etherstatspkts256octetsto511octets_hi; - u32 tx_stat_etherstatspkts256octetsto511octets_lo; - /* frames_transmitted_512_1023_bytes */ - u32 tx_stat_etherstatspkts512octetsto1023octets_hi; - u32 tx_stat_etherstatspkts512octetsto1023octets_lo; - /* frames_transmitted_1024_1522_bytes */ - u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; - u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; - /* frames_transmitted_1523_9022_bytes */ - u32 tx_stat_etherstatspktsover1522octets_hi; - u32 tx_stat_etherstatspktsover1522octets_lo; - u32 tx_stat_bmac_2047_hi; - u32 tx_stat_bmac_2047_lo; - u32 tx_stat_bmac_4095_hi; - u32 tx_stat_bmac_4095_lo; - u32 tx_stat_bmac_9216_hi; - u32 tx_stat_bmac_9216_lo; - u32 tx_stat_bmac_16383_hi; - u32 tx_stat_bmac_16383_lo; - - /* internal_mac_transmit_errors */ - u32 tx_stat_dot3statsinternalmactransmiterrors_hi; - u32 tx_stat_dot3statsinternalmactransmiterrors_lo; - - /* if_out_discards */ - u32 tx_stat_bmac_ufl_hi; - u32 tx_stat_bmac_ufl_lo; -}; - - -#define MAC_STX_IDX_MAX 2 + /* in_bad_octets */ + u32 rx_stat_ifhcinbadoctets_hi; + u32 rx_stat_ifhcinbadoctets_lo; + + /* out_bad_octets */ + u32 tx_stat_ifhcoutbadoctets_hi; + u32 tx_stat_ifhcoutbadoctets_lo; + + /* crc_receive_errors */ + u32 rx_stat_dot3statsfcserrors_hi; + u32 rx_stat_dot3statsfcserrors_lo; + /* alignment_errors */ + u32 rx_stat_dot3statsalignmenterrors_hi; + u32 rx_stat_dot3statsalignmenterrors_lo; + /* carrier_sense_errors */ + u32 rx_stat_dot3statscarriersenseerrors_hi; + u32 rx_stat_dot3statscarriersenseerrors_lo; + /* false_carrier_detections */ + u32 rx_stat_falsecarriererrors_hi; + u32 rx_stat_falsecarriererrors_lo; + + /* runt_packets_received */ + u32 rx_stat_etherstatsundersizepkts_hi; + u32 rx_stat_etherstatsundersizepkts_lo; + /* jabber_packets_received */ + u32 rx_stat_dot3statsframestoolong_hi; + u32 rx_stat_dot3statsframestoolong_lo; + + /* error_runt_packets_received */ + u32 rx_stat_etherstatsfragments_hi; + u32 rx_stat_etherstatsfragments_lo; + /* error_jabber_packets_received */ + u32 rx_stat_etherstatsjabbers_hi; + u32 rx_stat_etherstatsjabbers_lo; + + /* control_frames_received */ + u32 rx_stat_maccontrolframesreceived_hi; + u32 rx_stat_maccontrolframesreceived_lo; + u32 rx_stat_mac_xpf_hi; + u32 rx_stat_mac_xpf_lo; + u32 rx_stat_mac_xcf_hi; + u32 rx_stat_mac_xcf_lo; + + /* xoff_state_entered */ + u32 rx_stat_xoffstateentered_hi; + u32 rx_stat_xoffstateentered_lo; + /* pause_xon_frames_received */ + u32 rx_stat_xonpauseframesreceived_hi; + u32 rx_stat_xonpauseframesreceived_lo; + /* pause_xoff_frames_received */ + u32 rx_stat_xoffpauseframesreceived_hi; + u32 rx_stat_xoffpauseframesreceived_lo; + /* pause_xon_frames_transmitted */ + u32 tx_stat_outxonsent_hi; + u32 tx_stat_outxonsent_lo; + /* pause_xoff_frames_transmitted */ + u32 tx_stat_outxoffsent_hi; + u32 tx_stat_outxoffsent_lo; + /* flow_control_done */ + u32 tx_stat_flowcontroldone_hi; + u32 tx_stat_flowcontroldone_lo; + + /* ether_stats_collisions */ + u32 tx_stat_etherstatscollisions_hi; + u32 tx_stat_etherstatscollisions_lo; + /* single_collision_transmit_frames */ + u32 tx_stat_dot3statssinglecollisionframes_hi; + u32 tx_stat_dot3statssinglecollisionframes_lo; + /* multiple_collision_transmit_frames */ + u32 tx_stat_dot3statsmultiplecollisionframes_hi; + u32 tx_stat_dot3statsmultiplecollisionframes_lo; + /* deferred_transmissions */ + u32 tx_stat_dot3statsdeferredtransmissions_hi; + u32 tx_stat_dot3statsdeferredtransmissions_lo; + /* excessive_collision_frames */ + u32 tx_stat_dot3statsexcessivecollisions_hi; + u32 tx_stat_dot3statsexcessivecollisions_lo; + /* late_collision_frames */ + u32 tx_stat_dot3statslatecollisions_hi; + u32 tx_stat_dot3statslatecollisions_lo; + + /* frames_transmitted_64_bytes */ + u32 tx_stat_etherstatspkts64octets_hi; + u32 tx_stat_etherstatspkts64octets_lo; + /* frames_transmitted_65_127_bytes */ + u32 tx_stat_etherstatspkts65octetsto127octets_hi; + u32 tx_stat_etherstatspkts65octetsto127octets_lo; + /* frames_transmitted_128_255_bytes */ + u32 tx_stat_etherstatspkts128octetsto255octets_hi; + u32 tx_stat_etherstatspkts128octetsto255octets_lo; + /* frames_transmitted_256_511_bytes */ + u32 tx_stat_etherstatspkts256octetsto511octets_hi; + u32 tx_stat_etherstatspkts256octetsto511octets_lo; + /* frames_transmitted_512_1023_bytes */ + u32 tx_stat_etherstatspkts512octetsto1023octets_hi; + u32 tx_stat_etherstatspkts512octetsto1023octets_lo; + /* frames_transmitted_1024_1522_bytes */ + u32 tx_stat_etherstatspkts1024octetsto1522octets_hi; + u32 tx_stat_etherstatspkts1024octetsto1522octets_lo; + /* frames_transmitted_1523_9022_bytes */ + u32 tx_stat_etherstatspktsover1522octets_hi; + u32 tx_stat_etherstatspktsover1522octets_lo; + u32 tx_stat_mac_2047_hi; + u32 tx_stat_mac_2047_lo; + u32 tx_stat_mac_4095_hi; + u32 tx_stat_mac_4095_lo; + u32 tx_stat_mac_9216_hi; + u32 tx_stat_mac_9216_lo; + u32 tx_stat_mac_16383_hi; + u32 tx_stat_mac_16383_lo; + + /* internal_mac_transmit_errors */ + u32 tx_stat_dot3statsinternalmactransmiterrors_hi; + u32 tx_stat_dot3statsinternalmactransmiterrors_lo; + + /* if_out_discards */ + u32 tx_stat_mac_ufl_hi; + u32 tx_stat_mac_ufl_lo; +}; + + +#define MAC_STX_IDX_MAX 2 struct host_port_stats { - u32 host_port_stats_start; + u32 host_port_stats_start; - struct mac_stx mac_stx[MAC_STX_IDX_MAX]; + struct mac_stx mac_stx[MAC_STX_IDX_MAX]; - u32 brb_drop_hi; - u32 brb_drop_lo; + u32 brb_drop_hi; + u32 brb_drop_lo; - u32 host_port_stats_end; + u32 host_port_stats_end; }; struct host_func_stats { - u32 host_func_stats_start; + u32 host_func_stats_start; - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; - u32 host_func_stats_end; + u32 host_func_stats_end; }; +/* VIC definitions */ +#define VICSTATST_UIF_INDEX 2 -#define BCM_5710_FW_MAJOR_VERSION 6 -#define BCM_5710_FW_MINOR_VERSION 2 -#define BCM_5710_FW_REVISION_VERSION 9 -#define BCM_5710_FW_ENGINEERING_VERSION 0 +#define BCM_5710_FW_MAJOR_VERSION 7 +#define BCM_5710_FW_MINOR_VERSION 0 +#define BCM_5710_FW_REVISION_VERSION 23 +#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -1948,6 +2565,115 @@ struct atten_sp_status_block { /* + * The eth aggregative context of Cstorm + */ +struct cstorm_eth_ag_context { + u32 __reserved0[10]; +}; + + +/* + * dmae command structure + */ +struct dmae_command { + u32 opcode; +#define DMAE_COMMAND_SRC (0x1<<0) +#define DMAE_COMMAND_SRC_SHIFT 0 +#define DMAE_COMMAND_DST (0x3<<1) +#define DMAE_COMMAND_DST_SHIFT 1 +#define DMAE_COMMAND_C_DST (0x1<<3) +#define DMAE_COMMAND_C_DST_SHIFT 3 +#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) +#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) +#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) +#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 +#define DMAE_COMMAND_ENDIANITY (0x3<<9) +#define DMAE_COMMAND_ENDIANITY_SHIFT 9 +#define DMAE_COMMAND_PORT (0x1<<11) +#define DMAE_COMMAND_PORT_SHIFT 11 +#define DMAE_COMMAND_CRC_RESET (0x1<<12) +#define DMAE_COMMAND_CRC_RESET_SHIFT 12 +#define DMAE_COMMAND_SRC_RESET (0x1<<13) +#define DMAE_COMMAND_SRC_RESET_SHIFT 13 +#define DMAE_COMMAND_DST_RESET (0x1<<14) +#define DMAE_COMMAND_DST_RESET_SHIFT 14 +#define DMAE_COMMAND_E1HVN (0x3<<15) +#define DMAE_COMMAND_E1HVN_SHIFT 15 +#define DMAE_COMMAND_DST_VN (0x3<<17) +#define DMAE_COMMAND_DST_VN_SHIFT 17 +#define DMAE_COMMAND_C_FUNC (0x1<<19) +#define DMAE_COMMAND_C_FUNC_SHIFT 19 +#define DMAE_COMMAND_ERR_POLICY (0x3<<20) +#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 +#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) +#define DMAE_COMMAND_RESERVED0_SHIFT 22 + u32 src_addr_lo; + u32 src_addr_hi; + u32 dst_addr_lo; + u32 dst_addr_hi; +#if defined(__BIG_ENDIAN) + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 + u16 len; +#elif defined(__LITTLE_ENDIAN) + u16 len; + u16 opcode_iov; +#define DMAE_COMMAND_SRC_VFID (0x3F<<0) +#define DMAE_COMMAND_SRC_VFID_SHIFT 0 +#define DMAE_COMMAND_SRC_VFPF (0x1<<6) +#define DMAE_COMMAND_SRC_VFPF_SHIFT 6 +#define DMAE_COMMAND_RESERVED1 (0x1<<7) +#define DMAE_COMMAND_RESERVED1_SHIFT 7 +#define DMAE_COMMAND_DST_VFID (0x3F<<8) +#define DMAE_COMMAND_DST_VFID_SHIFT 8 +#define DMAE_COMMAND_DST_VFPF (0x1<<14) +#define DMAE_COMMAND_DST_VFPF_SHIFT 14 +#define DMAE_COMMAND_RESERVED2 (0x1<<15) +#define DMAE_COMMAND_RESERVED2_SHIFT 15 +#endif + u32 comp_addr_lo; + u32 comp_addr_hi; + u32 comp_val; + u32 crc32; + u32 crc32_c; +#if defined(__BIG_ENDIAN) + u16 crc16_c; + u16 crc16; +#elif defined(__LITTLE_ENDIAN) + u16 crc16; + u16 crc16_c; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 crc_t10; +#elif defined(__LITTLE_ENDIAN) + u16 crc_t10; + u16 reserved3; +#endif +#if defined(__BIG_ENDIAN) + u16 xsum8; + u16 xsum16; +#elif defined(__LITTLE_ENDIAN) + u16 xsum16; + u16 xsum8; +#endif +}; + + +/* * common data for all protocols */ struct doorbell_hdr { @@ -1963,33 +2689,29 @@ struct doorbell_hdr { }; /* - * doorbell message sent to the chip - */ -struct doorbell { -#if defined(__BIG_ENDIAN) - u16 zero_fill2; - u8 zero_fill1; - struct doorbell_hdr header; -#elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr header; - u8 zero_fill1; - u16 zero_fill2; -#endif -}; - - -/* - * doorbell message sent to the chip + * Ethernet doorbell */ -struct doorbell_set_prod { +struct eth_tx_doorbell { #if defined(__BIG_ENDIAN) - u16 prod; - u8 zero_fill1; - struct doorbell_hdr header; + u16 npackets; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + struct doorbell_hdr hdr; #elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr header; - u8 zero_fill1; - u16 prod; + struct doorbell_hdr hdr; + u8 params; +#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) +#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) +#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 +#define ETH_TX_DOORBELL_SPARE (0x1<<7) +#define ETH_TX_DOORBELL_SPARE_SHIFT 7 + u16 npackets; #endif }; @@ -2000,7 +2722,7 @@ struct doorbell_set_prod { struct hc_status_block_e1x { __le16 index_values[HC_SB_MAX_INDICES_E1X]; __le16 running_index[HC_SB_MAX_SM]; - u32 rsrv; + __le32 rsrv[11]; }; /* @@ -2017,7 +2739,7 @@ struct host_hc_status_block_e1x { struct hc_status_block_e2 { __le16 index_values[HC_SB_MAX_INDICES_E2]; __le16 running_index[HC_SB_MAX_SM]; - u32 reserved; + __le32 reserved[11]; }; /* @@ -2138,6 +2860,16 @@ union igu_consprod_reg { /* + * Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + + +/* * Control register for the IGU command register */ struct igu_ctrl_reg { @@ -2156,6 +2888,29 @@ struct igu_ctrl_reg { /* + * Igu interrupt command + */ +enum igu_int_cmd { + IGU_INT_ENABLE, + IGU_INT_DISABLE, + IGU_INT_NOP, + IGU_INT_NOP2, + MAX_IGU_INT_CMD +}; + + +/* + * Igu segments + */ +enum igu_seg_access { + IGU_SEG_ACCESS_NORM, + IGU_SEG_ACCESS_DEF, + IGU_SEG_ACCESS_ATTN, + MAX_IGU_SEG_ACCESS +}; + + +/* * Parser parsing flags field */ struct parsing_flags { @@ -2189,94 +2944,46 @@ struct parsing_flags { }; -struct regpair { - __le32 lo; - __le32 hi; +/* + * Parsing flags for TCP ACK type + */ +enum prs_flags_ack_type { + PRS_FLAG_PUREACK_PIGGY, + PRS_FLAG_PUREACK_PURE, + MAX_PRS_FLAGS_ACK_TYPE }; /* - * dmae command structure + * Parsing flags for Ethernet address type */ -struct dmae_command { - u32 opcode; -#define DMAE_COMMAND_SRC (0x1<<0) -#define DMAE_COMMAND_SRC_SHIFT 0 -#define DMAE_COMMAND_DST (0x3<<1) -#define DMAE_COMMAND_DST_SHIFT 1 -#define DMAE_COMMAND_C_DST (0x1<<3) -#define DMAE_COMMAND_C_DST_SHIFT 3 -#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4) -#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4 -#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5) -#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5 -#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6) -#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6 -#define DMAE_COMMAND_ENDIANITY (0x3<<9) -#define DMAE_COMMAND_ENDIANITY_SHIFT 9 -#define DMAE_COMMAND_PORT (0x1<<11) -#define DMAE_COMMAND_PORT_SHIFT 11 -#define DMAE_COMMAND_CRC_RESET (0x1<<12) -#define DMAE_COMMAND_CRC_RESET_SHIFT 12 -#define DMAE_COMMAND_SRC_RESET (0x1<<13) -#define DMAE_COMMAND_SRC_RESET_SHIFT 13 -#define DMAE_COMMAND_DST_RESET (0x1<<14) -#define DMAE_COMMAND_DST_RESET_SHIFT 14 -#define DMAE_COMMAND_E1HVN (0x3<<15) -#define DMAE_COMMAND_E1HVN_SHIFT 15 -#define DMAE_COMMAND_DST_VN (0x3<<17) -#define DMAE_COMMAND_DST_VN_SHIFT 17 -#define DMAE_COMMAND_C_FUNC (0x1<<19) -#define DMAE_COMMAND_C_FUNC_SHIFT 19 -#define DMAE_COMMAND_ERR_POLICY (0x3<<20) -#define DMAE_COMMAND_ERR_POLICY_SHIFT 20 -#define DMAE_COMMAND_RESERVED0 (0x3FF<<22) -#define DMAE_COMMAND_RESERVED0_SHIFT 22 - u32 src_addr_lo; - u32 src_addr_hi; - u32 dst_addr_lo; - u32 dst_addr_hi; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u16 len; -#elif defined(__LITTLE_ENDIAN) - u16 len; - u16 reserved1; -#endif - u32 comp_addr_lo; - u32 comp_addr_hi; - u32 comp_val; - u32 crc32; - u32 crc32_c; -#if defined(__BIG_ENDIAN) - u16 crc16_c; - u16 crc16; -#elif defined(__LITTLE_ENDIAN) - u16 crc16; - u16 crc16_c; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved3; - u16 crc_t10; -#elif defined(__LITTLE_ENDIAN) - u16 crc_t10; - u16 reserved3; -#endif -#if defined(__BIG_ENDIAN) - u16 xsum8; - u16 xsum16; -#elif defined(__LITTLE_ENDIAN) - u16 xsum16; - u16 xsum8; -#endif +enum prs_flags_eth_addr_type { + PRS_FLAG_ETHTYPE_NON_UNICAST, + PRS_FLAG_ETHTYPE_UNICAST, + MAX_PRS_FLAGS_ETH_ADDR_TYPE }; -struct double_regpair { - u32 regpair0_lo; - u32 regpair0_hi; - u32 regpair1_lo; - u32 regpair1_hi; +/* + * Parsing flags for over-ethernet protocol + */ +enum prs_flags_over_eth { + PRS_FLAG_OVERETH_UNKNOWN, + PRS_FLAG_OVERETH_IPV4, + PRS_FLAG_OVERETH_IPV6, + PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN, + MAX_PRS_FLAGS_OVER_ETH +}; + + +/* + * Parsing flags for over-IP protocol + */ +enum prs_flags_over_ip { + PRS_FLAG_OVERIP_UNKNOWN, + PRS_FLAG_OVERIP_TCP, + PRS_FLAG_OVERIP_UDP, + MAX_PRS_FLAGS_OVER_IP }; @@ -2297,54 +3004,23 @@ struct sdm_op_gen { #define SDM_OP_GEN_RESERVED_SHIFT 17 }; -/* - * The eth Rx Buffer Descriptor - */ -struct eth_rx_bd { - __le32 addr_lo; - __le32 addr_hi; -}; /* - * The eth Rx SGE Descriptor - */ -struct eth_rx_sge { - __le32 addr_lo; - __le32 addr_hi; -}; - - - -/* - * The eth storm context of Ustorm - */ -struct ustorm_eth_st_context { - u32 reserved0[48]; -}; - -/* - * The eth storm context of Tstorm + * Timers connection context */ -struct tstorm_eth_st_context { - u32 __reserved0[28]; +struct timers_block_context { + u32 __reserved_0; + u32 __reserved_1; + u32 __reserved_2; + u32 flags; +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) +#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) +#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 +#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) +#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 }; -/* - * The eth aggregative context of Xstorm - */ -struct xstorm_eth_ag_context { - u32 reserved0; -#if defined(__BIG_ENDIAN) - u8 cdu_reserved; - u8 reserved2; - u16 reserved1; -#elif defined(__LITTLE_ENDIAN) - u16 reserved1; - u8 reserved2; - u8 cdu_reserved; -#endif - u32 reserved3[30]; -}; /* * The eth aggregative context of Tstorm @@ -2355,14 +3031,6 @@ struct tstorm_eth_ag_context { /* - * The eth aggregative context of Cstorm - */ -struct cstorm_eth_ag_context { - u32 __reserved0[10]; -}; - - -/* * The eth aggregative context of Ustorm */ struct ustorm_eth_ag_context { @@ -2379,229 +3047,81 @@ struct ustorm_eth_ag_context { u32 __reserved3[6]; }; -/* - * Timers connection context - */ -struct timers_block_context { - u32 __reserved_0; - u32 __reserved_1; - u32 __reserved_2; - u32 flags; -#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0) -#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0 -#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2) -#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2 -#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3) -#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3 -}; /* - * structure for easy accessibility to assembler - */ -struct eth_tx_bd_flags { - u8 as_bitfield; -#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) -#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 -#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) -#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 -#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) -#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 -#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) -#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 -#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) -#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 -#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) -#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 -#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) -#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 -}; - -/* - * The eth Tx Buffer Descriptor - */ -struct eth_tx_start_bd { - __le32 addr_lo; - __le32 addr_hi; - __le16 nbd; - __le16 nbytes; - __le16 vlan_or_ethertype; - struct eth_tx_bd_flags bd_flags; - u8 general_data; -#define ETH_TX_START_BD_HDR_NBDS (0x3F<<0) -#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 -#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) -#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 -}; - -/* - * Tx regular BD structure - */ -struct eth_tx_bd { - __le32 addr_lo; - __le32 addr_hi; - __le16 total_pkt_bytes; - __le16 nbytes; - u8 reserved[4]; -}; - -/* - * Tx parsing BD structure for ETH E1/E1h - */ -struct eth_tx_parse_bd_e1x { - u8 global_data; -#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) -#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) -#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) -#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) -#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 -#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) -#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 - u8 tcp_flags; -#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) -#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 -#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) -#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 -#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) -#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 -#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) -#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 -#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) -#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 -#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) -#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 -#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) -#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 -#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) -#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 - u8 ip_hlen_w; - s8 reserved; - __le16 total_hlen_w; - __le16 tcp_pseudo_csum; - __le16 lso_mss; - __le16 ip_id; - __le32 tcp_send_seq; -}; - -/* - * Tx parsing BD structure for ETH E2 + * The eth aggregative context of Xstorm */ -struct eth_tx_parse_bd_e2 { - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; - __le32 parsing_data; -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) -#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 -#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) -#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) -#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 +struct xstorm_eth_ag_context { + u32 reserved0; +#if defined(__BIG_ENDIAN) + u8 cdu_reserved; + u8 reserved2; + u16 reserved1; +#elif defined(__LITTLE_ENDIAN) + u16 reserved1; + u8 reserved2; + u8 cdu_reserved; +#endif + u32 reserved3[30]; }; -/* - * The last BD in the BD memory will hold a pointer to the next BD memory - */ -struct eth_tx_next_bd { - __le32 addr_lo; - __le32 addr_hi; - u8 reserved[8]; -}; /* - * union for 4 Bd types + * doorbell message sent to the chip */ -union eth_tx_bd_types { - struct eth_tx_start_bd start_bd; - struct eth_tx_bd reg_bd; - struct eth_tx_parse_bd_e1x parse_bd_e1x; - struct eth_tx_parse_bd_e2 parse_bd_e2; - struct eth_tx_next_bd next_bd; +struct doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif }; /* - * The eth storm context of Xstorm + * doorbell message sent to the chip */ -struct xstorm_eth_st_context { - u32 reserved0[60]; +struct doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif }; -/* - * The eth storm context of Cstorm - */ -struct cstorm_eth_st_context { - u32 __reserved0[4]; -}; -/* - * Ethernet connection context - */ -struct eth_context { - struct ustorm_eth_st_context ustorm_st_context; - struct tstorm_eth_st_context tstorm_st_context; - struct xstorm_eth_ag_context xstorm_ag_context; - struct tstorm_eth_ag_context tstorm_ag_context; - struct cstorm_eth_ag_context cstorm_ag_context; - struct ustorm_eth_ag_context ustorm_ag_context; - struct timers_block_context timers_context; - struct xstorm_eth_st_context xstorm_st_context; - struct cstorm_eth_st_context cstorm_st_context; +struct regpair { + __le32 lo; + __le32 hi; }; /* - * Ethernet doorbell + * Classify rule opcodes in E2/E3 */ -struct eth_tx_doorbell { -#if defined(__BIG_ENDIAN) - u16 npackets; - u8 params; -#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) -#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 -#define ETH_TX_DOORBELL_SPARE (0x1<<7) -#define ETH_TX_DOORBELL_SPARE_SHIFT 7 - struct doorbell_hdr hdr; -#elif defined(__LITTLE_ENDIAN) - struct doorbell_hdr hdr; - u8 params; -#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0) -#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0 -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6) -#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6 -#define ETH_TX_DOORBELL_SPARE (0x1<<7) -#define ETH_TX_DOORBELL_SPARE_SHIFT 7 - u16 npackets; -#endif +enum classify_rule { + CLASSIFY_RULE_OPCODE_MAC, + CLASSIFY_RULE_OPCODE_VLAN, + CLASSIFY_RULE_OPCODE_PAIR, + MAX_CLASSIFY_RULE }; /* - * client init fc data + * Classify rule types in E2/E3 */ -struct client_init_fc_data { - __le16 cqe_pause_thr_low; - __le16 cqe_pause_thr_high; - __le16 bd_pause_thr_low; - __le16 bd_pause_thr_high; - __le16 sge_pause_thr_low; - __le16 sge_pause_thr_high; - __le16 rx_cos_mask; - u8 safc_group_num; - u8 safc_group_en_flg; - u8 traffic_type; - u8 reserved0; - __le16 reserved1; - __le32 reserved2; +enum classify_rule_action_type { + CLASSIFY_RULE_REMOVE, + CLASSIFY_RULE_ADD, + MAX_CLASSIFY_RULE_ACTION_TYPE }; @@ -2615,8 +3135,12 @@ struct client_init_general_data { u8 is_fcoe_flg; u8 activate_flg; u8 sp_client_id; - __le16 reserved0; - __le32 reserved1[2]; + __le16 mtu; + u8 statistics_zero_flg; + u8 func_id; + u8 cos; + u8 traffic_type; + u32 reserved0; }; @@ -2624,7 +3148,13 @@ struct client_init_general_data { * client init rx data */ struct client_init_rx_data { - u8 tpa_en_flg; + u8 tpa_en; +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) +#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 +#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) +#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 u8 vmqueue_mode_en_flg; u8 extra_data_over_sgl_en_flg; u8 cache_line_alignment_log_size; @@ -2639,17 +3169,46 @@ struct client_init_rx_data { u8 outer_vlan_removal_enable_flg; u8 status_block_id; u8 rx_sb_index_number; - u8 reserved0[3]; - __le16 bd_buff_size; + u8 reserved0; + u8 max_tpa_queues; + u8 silent_vlan_removal_flg; + __le16 max_bytes_on_bd; __le16 sge_buff_size; - __le16 mtu; + u8 approx_mcast_engine_id; + u8 rss_engine_id; struct regpair bd_page_base; struct regpair sge_page_base; struct regpair cqe_page_base; u8 is_leading_rss; u8 is_approx_mcast; __le16 max_agg_size; - __le32 reserved2[3]; + __le16 state; +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0) +#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3) +#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3 +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4) +#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4 +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5) +#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5 +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6) +#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6 +#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7) +#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7 + __le16 cqe_pause_thr_low; + __le16 cqe_pause_thr_high; + __le16 bd_pause_thr_low; + __le16 bd_pause_thr_high; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; + __le16 rx_cos_mask; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + __le32 reserved6[2]; }; /* @@ -2659,11 +3218,25 @@ struct client_init_tx_data { u8 enforce_security_flg; u8 tx_status_block_id; u8 tx_sb_index_number; - u8 reserved0; - __le16 mtu; - __le16 reserved1; + u8 tss_leading_client_id; + u8 tx_switching_flg; + u8 anti_spoofing_flg; + __le16 default_vlan; struct regpair tx_bd_page_base; - __le32 reserved2[2]; + __le16 state; +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0) +#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0 +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1) +#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1 +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2) +#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) +#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 +#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) +#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 + u8 default_vlan_flg; + u8 reserved2; + __le32 reserved3; }; /* @@ -2673,7 +3246,146 @@ struct client_init_ramrod_data { struct client_init_general_data general; struct client_init_rx_data rx; struct client_init_tx_data tx; - struct client_init_fc_data fc; +}; + + +/* + * client update ramrod data + */ +struct client_update_ramrod_data { + u8 client_id; + u8 func_id; + u8 inner_vlan_removal_enable_flg; + u8 inner_vlan_removal_change_flg; + u8 outer_vlan_removal_enable_flg; + u8 outer_vlan_removal_change_flg; + u8 anti_spoofing_enable_flg; + u8 anti_spoofing_change_flg; + u8 activate_flg; + u8 activate_change_flg; + __le16 default_vlan; + u8 default_vlan_enable_flg; + u8 default_vlan_change_flg; + __le16 silent_vlan_value; + __le16 silent_vlan_mask; + u8 silent_vlan_removal_flg; + u8 silent_vlan_change_flg; + __le32 echo; +}; + + +/* + * The eth storm context of Cstorm + */ +struct cstorm_eth_st_context { + u32 __reserved0[4]; +}; + + +struct double_regpair { + u32 regpair0_lo; + u32 regpair0_hi; + u32 regpair1_lo; + u32 regpair1_hi; +}; + + +/* + * Ethernet address typesm used in ethernet tx BDs + */ +enum eth_addr_type { + UNKNOWN_ADDRESS, + UNICAST_ADDRESS, + MULTICAST_ADDRESS, + BROADCAST_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + + +/* + * + */ +struct eth_classify_cmd_header { + u8 cmd_general_data; +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0) +#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0 +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1) +#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1 +#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2) +#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2 +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4) +#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4 +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5) +#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5 + u8 func_id; + u8 client_id; + u8 reserved1; +}; + + +/* + * header for eth classification config ramrod + */ +struct eth_classify_header { + u8 rule_cnt; + u8 reserved0; + __le16 reserved1; + __le32 echo; +}; + + +/* + * Command for adding/removing a MAC classification rule + */ +struct eth_classify_mac_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 reserved1; +}; + + +/* + * Command for adding/removing a MAC-VLAN pair classification rule + */ +struct eth_classify_pair_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan; +}; + + +/* + * Command for adding/removing a VLAN classification rule + */ +struct eth_classify_vlan_cmd { + struct eth_classify_cmd_header header; + __le32 reserved0; + __le32 reserved1; + __le16 reserved2; + __le16 vlan; +}; + +/* + * union for eth classification rule + */ +union eth_classify_rule_cmd { + struct eth_classify_mac_cmd mac; + struct eth_classify_vlan_cmd vlan; + struct eth_classify_pair_cmd pair; +}; + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_classify_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; }; @@ -2681,8 +3393,45 @@ struct client_init_ramrod_data { * The data contain client ID need to the ramrod */ struct eth_common_ramrod_data { - u32 client_id; - u32 reserved1; + __le32 client_id; + __le32 reserved1; +}; + + +/* + * The eth storm context of Ustorm + */ +struct ustorm_eth_st_context { + u32 reserved0[52]; +}; + +/* + * The eth storm context of Tstorm + */ +struct tstorm_eth_st_context { + u32 __reserved0[28]; +}; + +/* + * The eth storm context of Xstorm + */ +struct xstorm_eth_st_context { + u32 reserved0[60]; +}; + +/* + * Ethernet connection context + */ +struct eth_context { + struct ustorm_eth_st_context ustorm_st_context; + struct tstorm_eth_st_context tstorm_st_context; + struct xstorm_eth_ag_context xstorm_ag_context; + struct tstorm_eth_ag_context tstorm_ag_context; + struct cstorm_eth_ag_context cstorm_ag_context; + struct ustorm_eth_ag_context ustorm_ag_context; + struct timers_block_context timers_context; + struct xstorm_eth_st_context xstorm_st_context; + struct cstorm_eth_st_context cstorm_st_context; }; @@ -2695,24 +3444,47 @@ union eth_sgl_or_raw_data { }; /* + * eth FP end aggregation CQE parameters struct + */ +struct eth_end_agg_rx_cqe { + u8 type_error_flags; +#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0) +#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0 +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3) +#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3 + u8 reserved1; + u8 queue_index; + u8 reserved2; + __le32 timestamp_delta; + __le16 num_of_coalesced_segs; + __le16 pkt_len; + u8 pure_ack_count; + u8 reserved3; + __le16 reserved4; + union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved5[8]; +}; + + +/* * regular eth FP CQE parameters struct */ struct eth_fast_path_rx_cqe { u8 type_error_flags; -#define ETH_FAST_PATH_RX_CQE_TYPE (0x1<<0) +#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0) #define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<1) -#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 1 -#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<2) -#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 2 -#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<3) -#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 3 -#define ETH_FAST_PATH_RX_CQE_START_FLG (0x1<<4) -#define ETH_FAST_PATH_RX_CQE_START_FLG_SHIFT 4 -#define ETH_FAST_PATH_RX_CQE_END_FLG (0x1<<5) -#define ETH_FAST_PATH_RX_CQE_END_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x3<<6) -#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2) +#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2 +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3) +#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3 +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4) +#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) +#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 u8 status_flags; #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 @@ -2726,39 +3498,108 @@ struct eth_fast_path_rx_cqe { #define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6 #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7) #define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7 - u8 placement_offset; u8 queue_index; + u8 placement_offset; __le32 rss_hash_result; __le16 vlan_tag; __le16 pkt_len; __le16 len_on_bd; struct parsing_flags pars_flags; union eth_sgl_or_raw_data sgl_or_raw_data; + __le32 reserved1[8]; }; /* - * The data for RSS setup ramrod + * Command for setting classification flags for a client + */ +struct eth_filter_rules_cmd { + u8 cmd_general_data; +#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2) +#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2 + u8 func_id; + u8 client_id; + u8 reserved1; + __le16 state; +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0) +#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2) +#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3) +#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3 +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4) +#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5) +#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6) +#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6 +#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7) +#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7 + __le16 reserved3; + struct regpair reserved4; +}; + + +/* + * parameters for eth classification filters ramrod + */ +struct eth_filter_rules_ramrod_data { + struct eth_classify_header header; + struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; +}; + + +/* + * parameters for eth classification configuration ramrod + */ +struct eth_general_rules_ramrod_data { + struct eth_classify_header header; + union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT]; +}; + + +/* + * The data for Halt ramrod */ struct eth_halt_ramrod_data { - u32 client_id; - u32 reserved0; + __le32 client_id; + __le32 reserved0; }; + /* - * The data for statistics query ramrod + * Command for setting multicast classification for a client */ -struct common_query_ramrod_data { -#if defined(__BIG_ENDIAN) - u8 reserved0; - u8 collect_port; - u16 drv_counter; -#elif defined(__LITTLE_ENDIAN) - u16 drv_counter; - u8 collect_port; - u8 reserved0; -#endif - u32 ctr_id_vector; +struct eth_multicast_rules_cmd { + u8 cmd_general_data; +#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) +#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0 +#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1) +#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1 +#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2) +#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2 +#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3) +#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3 + u8 func_id; + u8 bin_id; + u8 engine_id; + __le32 reserved2; + struct regpair reserved3; +}; + + +/* + * parameters for multicast classification ramrod + */ +struct eth_multicast_rules_ramrod_data { + struct eth_classify_header header; + struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; }; @@ -2779,16 +3620,86 @@ union eth_ramrod_data { /* + * RSS toeplitz hash type, as reported in CQE + */ +enum eth_rss_hash_type { + DEFAULT_HASH_TYPE, + IPV4_HASH_TYPE, + TCP_IPV4_HASH_TYPE, + IPV6_HASH_TYPE, + TCP_IPV6_HASH_TYPE, + VLAN_PRI_HASH_TYPE, + E1HOV_PRI_HASH_TYPE, + DSCP_HASH_TYPE, + MAX_ETH_RSS_HASH_TYPE +}; + + +/* + * Ethernet RSS mode + */ +enum eth_rss_mode { + ETH_RSS_MODE_DISABLED, + ETH_RSS_MODE_REGULAR, + ETH_RSS_MODE_VLAN_PRI, + ETH_RSS_MODE_E1HOV_PRI, + ETH_RSS_MODE_IP_DSCP, + MAX_ETH_RSS_MODE +}; + + +/* + * parameters for RSS update ramrod (E2) + */ +struct eth_rss_update_ramrod_data { + u8 rss_engine_id; + u8 capabilities; +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6 +#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7) +#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7 + u8 rss_result_mask; + u8 rss_mode; + __le32 __reserved2; + u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; + __le32 rss_key[T_ETH_RSS_KEY]; + __le32 echo; + __le32 reserved3; +}; + + +/* + * The eth Rx Buffer Descriptor + */ +struct eth_rx_bd { + __le32 addr_lo; + __le32 addr_hi; +}; + + +/* * Eth Rx Cqe structure- general structure for ramrods */ struct common_ramrod_eth_rx_cqe { u8 ramrod_type; -#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0) +#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0) #define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0 -#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1) -#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1 -#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2) -#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2) +#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2 +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3) +#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3 u8 conn_type; __le16 reserved1; __le32 conn_and_cmd_data; @@ -2797,7 +3708,8 @@ struct common_ramrod_eth_rx_cqe { #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24) #define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24 struct ramrod_data protocol_data; - __le32 reserved2[4]; + __le32 echo; + __le32 reserved2[11]; }; /* @@ -2806,7 +3718,7 @@ struct common_ramrod_eth_rx_cqe { struct eth_rx_cqe_next_page { __le32 addr_lo; __le32 addr_hi; - __le32 reserved[6]; + __le32 reserved[14]; }; /* @@ -2816,6 +3728,38 @@ union eth_rx_cqe { struct eth_fast_path_rx_cqe fast_path_cqe; struct common_ramrod_eth_rx_cqe ramrod_cqe; struct eth_rx_cqe_next_page next_page_cqe; + struct eth_end_agg_rx_cqe end_agg_cqe; +}; + + +/* + * Values for RX ETH CQE type field + */ +enum eth_rx_cqe_type { + RX_ETH_CQE_TYPE_ETH_FASTPATH, + RX_ETH_CQE_TYPE_ETH_RAMROD, + RX_ETH_CQE_TYPE_ETH_START_AGG, + RX_ETH_CQE_TYPE_ETH_STOP_AGG, + MAX_ETH_RX_CQE_TYPE +}; + + +/* + * Type of SGL/Raw field in ETH RX fast path CQE + */ +enum eth_rx_fp_sel { + ETH_FP_CQE_REGULAR, + ETH_FP_CQE_RAW, + MAX_ETH_RX_FP_SEL +}; + + +/* + * The eth Rx SGE Descriptor + */ +struct eth_rx_sge { + __le32 addr_lo; + __le32 addr_hi; }; @@ -2837,14 +3781,18 @@ struct spe_hdr { }; /* - * Ethernet slow path element + * specific data for ethernet slow path element */ union eth_specific_data { u8 protocol_data[8]; + struct regpair client_update_ramrod_data; struct regpair client_init_ramrod_init_data; struct eth_halt_ramrod_data halt_ramrod_data; struct regpair update_data_addr; struct eth_common_ramrod_data common_ramrod_data; + struct regpair classify_cfg_addr; + struct regpair filter_cfg_addr; + struct regpair mcast_cfg_addr; }; /* @@ -2857,94 +3805,202 @@ struct eth_spe { /* - * array of 13 bds as appears in the eth xstorm context + * Ethernet command ID for slow path elements */ -struct eth_tx_bds_array { - union eth_tx_bd_types bds[13]; +enum eth_spqe_cmd_id { + RAMROD_CMD_ID_ETH_UNUSED, + RAMROD_CMD_ID_ETH_CLIENT_SETUP, + RAMROD_CMD_ID_ETH_HALT, + RAMROD_CMD_ID_ETH_FORWARD_SETUP, + RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP, + RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + RAMROD_CMD_ID_ETH_EMPTY, + RAMROD_CMD_ID_ETH_TERMINATE, + RAMROD_CMD_ID_ETH_TPA_UPDATE, + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES, + RAMROD_CMD_ID_ETH_FILTER_RULES, + RAMROD_CMD_ID_ETH_MULTICAST_RULES, + RAMROD_CMD_ID_ETH_RSS_UPDATE, + RAMROD_CMD_ID_ETH_SET_MAC, + MAX_ETH_SPQE_CMD_ID }; /* - * Common configuration parameters per function in Tstorm + * eth tpa update command */ -struct tstorm_eth_function_common_config { -#if defined(__BIG_ENDIAN) - u8 reserved1; - u8 rss_result_mask; - u16 config_flags; -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 -#elif defined(__LITTLE_ENDIAN) - u16 config_flags; -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA (0x1<<7) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA_SHIFT 7 -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<8) -#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 8 -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0x7F<<9) -#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 9 - u8 rss_result_mask; - u8 reserved1; -#endif - u16 vlan_id[2]; +enum eth_tpa_update_command { + TPA_UPDATE_NONE_COMMAND, + TPA_UPDATE_ENABLE_COMMAND, + TPA_UPDATE_DISABLE_COMMAND, + MAX_ETH_TPA_UPDATE_COMMAND }; + /* - * RSS idirection table update configuration + * Tx regular BD structure */ -struct rss_update_config { -#if defined(__BIG_ENDIAN) - u16 toe_rss_bitmap; - u16 flags; -#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) -#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 -#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) -#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 -#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) -#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 -#elif defined(__LITTLE_ENDIAN) - u16 flags; -#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE (0x1<<0) -#define RSS_UPDATE_CONFIG_ETH_UPDATE_ENABLE_SHIFT 0 -#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE (0x1<<1) -#define RSS_UPDATE_CONFIG_TOE_UPDATE_ENABLE_SHIFT 1 -#define __RSS_UPDATE_CONFIG_RESERVED0 (0x3FFF<<2) -#define __RSS_UPDATE_CONFIG_RESERVED0_SHIFT 2 - u16 toe_rss_bitmap; -#endif - u32 reserved1; +struct eth_tx_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 total_pkt_bytes; + __le16 nbytes; + u8 reserved[4]; +}; + + +/* + * structure for easy accessibility to assembler + */ +struct eth_tx_bd_flags { + u8 as_bitfield; +#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0) +#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0 +#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1) +#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1 +#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2) +#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2 +#define ETH_TX_BD_FLAGS_START_BD (0x1<<4) +#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4 +#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5) +#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5 +#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6) +#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6 +#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7) +#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7 +}; + +/* + * The eth Tx Buffer Descriptor + */ +struct eth_tx_start_bd { + __le32 addr_lo; + __le32 addr_hi; + __le16 nbd; + __le16 nbytes; + __le16 vlan_or_ethertype; + struct eth_tx_bd_flags bd_flags; + u8 general_data; +#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) +#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) +#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 +#define ETH_TX_START_BD_RESREVED (0x1<<5) +#define ETH_TX_START_BD_RESREVED_SHIFT 5 +#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6) +#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6 +}; + +/* + * Tx parsing BD structure for ETH E1/E1h + */ +struct eth_tx_parse_bd_e1x { + u8 global_data; +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0) +#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5) +#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7 + u8 tcp_flags; +#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0) +#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0 +#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1) +#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1 +#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2) +#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2 +#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3) +#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3 +#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4) +#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4 +#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5) +#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5 +#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6) +#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6 +#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7) +#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7 + u8 ip_hlen_w; + s8 reserved; + __le16 total_hlen_w; + __le16 tcp_pseudo_csum; + __le16 lso_mss; + __le16 ip_id; + __le32 tcp_send_seq; +}; + +/* + * Tx parsing BD structure for ETH E2 + */ +struct eth_tx_parse_bd_e2 { + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le32 parsing_data; +#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13) +#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13 +#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17) +#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17 +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31) +#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31 +}; + +/* + * The last BD in the BD memory will hold a pointer to the next BD memory + */ +struct eth_tx_next_bd { + __le32 addr_lo; + __le32 addr_hi; + u8 reserved[8]; +}; + +/* + * union for 4 Bd types + */ +union eth_tx_bd_types { + struct eth_tx_start_bd start_bd; + struct eth_tx_bd reg_bd; + struct eth_tx_parse_bd_e1x parse_bd_e1x; + struct eth_tx_parse_bd_e2 parse_bd_e2; + struct eth_tx_next_bd next_bd; +}; + +/* + * array of 13 bds as appears in the eth xstorm context + */ +struct eth_tx_bds_array { + union eth_tx_bd_types bds[13]; }; + /* - * parameters for eth update ramrod + * VLAN mode on TX BDs */ -struct eth_update_ramrod_data { - struct tstorm_eth_function_common_config func_config; - u8 indirectionTable[128]; - struct rss_update_config rss_config; +enum eth_tx_vlan_type { + X_ETH_NO_VLAN, + X_ETH_OUTBAND_VLAN, + X_ETH_INBAND_VLAN, + X_ETH_FW_ADDED_VLAN, + MAX_ETH_TX_VLAN_TYPE +}; + + +/* + * Ethernet VLAN filtering mode in E1x + */ +enum eth_vlan_filter_mode { + ETH_VLAN_FILTER_ANY_VLAN, + ETH_VLAN_FILTER_SPECIFIC_VLAN, + ETH_VLAN_FILTER_CLASSIFY, + MAX_ETH_VLAN_FILTER_MODE }; @@ -2954,9 +4010,8 @@ struct eth_update_ramrod_data { struct mac_configuration_hdr { u8 length; u8 offset; - u16 client_id; - u16 echo; - u16 reserved1; + __le16 client_id; + __le32 echo; }; /* @@ -2981,8 +4036,8 @@ struct mac_configuration_entry { #define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5 #define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6) #define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6 - u16 reserved0; - u32 clients_bit_vector; + __le16 reserved0; + __le32 clients_bit_vector; }; /* @@ -2995,6 +4050,36 @@ struct mac_configuration_cmd { /* + * Set-MAC command type (in E1x) + */ +enum set_mac_action_type { + T_ETH_MAC_COMMAND_INVALIDATE, + T_ETH_MAC_COMMAND_SET, + MAX_SET_MAC_ACTION_TYPE +}; + + +/* + * tpa update ramrod data + */ +struct tpa_update_ramrod_data { + u8 update_ipv4; + u8 update_ipv6; + u8 client_id; + u8 max_tpa_queues; + u8 max_sges_for_packet; + u8 complete_on_both_clients; + __le16 reserved1; + __le16 sge_buff_size; + __le16 max_agg_size; + __le32 sge_page_base_lo; + __le32 sge_page_base_hi; + __le16 sge_pause_thr_low; + __le16 sge_pause_thr_high; +}; + + +/* * approximate-match multicast filtering for E1H per function in Tstorm */ struct tstorm_eth_approximate_match_multicast_filtering { @@ -3003,35 +4088,50 @@ struct tstorm_eth_approximate_match_multicast_filtering { /* + * Common configuration parameters per function in Tstorm + */ +struct tstorm_eth_function_common_config { + __le16 config_flags; +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4 +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7) +#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7 +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8) +#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8 + u8 rss_result_mask; + u8 reserved1; + __le16 vlan_id[2]; +}; + + +/* * MAC filtering configuration parameters per port in Tstorm */ struct tstorm_eth_mac_filter_config { - u32 ucast_drop_all; - u32 ucast_accept_all; - u32 mcast_drop_all; - u32 mcast_accept_all; - u32 bcast_drop_all; - u32 bcast_accept_all; - u32 vlan_filter[2]; - u32 unmatched_unicast; - u32 reserved; + __le32 ucast_drop_all; + __le32 ucast_accept_all; + __le32 mcast_drop_all; + __le32 mcast_accept_all; + __le32 bcast_accept_all; + __le32 vlan_filter[2]; + __le32 unmatched_unicast; }; /* - * common flag to indicate existence of TPA. + * tx only queue init ramrod data */ -struct tstorm_eth_tpa_exist { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 reserved0; - u8 tpa_exist; -#elif defined(__LITTLE_ENDIAN) - u8 tpa_exist; - u8 reserved0; - u16 reserved1; -#endif - u32 reserved2; +struct tx_queue_init_ramrod_data { + struct client_init_general_data general; + struct client_init_tx_data tx; }; @@ -3061,10 +4161,8 @@ struct ustorm_eth_rx_producers { */ struct cfc_del_event_data { u32 cid; - u8 error; - u8 reserved0; - u16 reserved1; - u32 reserved2; + u32 reserved0; + u32 reserved1; }; @@ -3072,22 +4170,18 @@ struct cfc_del_event_data { * per-port SAFC demo variables */ struct cmng_flags_per_port { - u8 con_number[NUM_OF_PROTOCOLS]; u32 cmng_enables; #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0) #define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0 #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1) #define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1 -#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL (0x1<<2) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_PROTOCOL_SHIFT 2 -#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL (0x1<<3) -#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_PROTOCOL_SHIFT 3 -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<4) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 4 -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<5) -#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 5 -#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0x3FFFFFF<<6) -#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 6 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2 +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3) +#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3 +#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4) +#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4 + u32 __reserved1; }; @@ -3106,6 +4200,7 @@ struct fairness_vars_per_port { u32 upper_bound; u32 fair_threshold; u32 fairness_timeout; + u32 reserved0; }; /* @@ -3122,65 +4217,65 @@ struct safc_struct_per_port { u16 __reserved1; #endif u8 cos_to_traffic_types[MAX_COS_NUMBER]; - u32 __reserved2; u16 cos_to_pause_mask[NUM_OF_SAFC_BITS]; }; /* - * per-port PFC variables + * Per-port congestion management variables */ -struct pfc_struct_per_port { - u8 priority_to_traffic_types[MAX_PFC_PRIORITIES]; -#if defined(__BIG_ENDIAN) - u16 pfc_pause_quanta_in_nanosec; - u8 __reserved0; - u8 priority_non_pausable_mask; -#elif defined(__LITTLE_ENDIAN) - u8 priority_non_pausable_mask; - u8 __reserved0; - u16 pfc_pause_quanta_in_nanosec; -#endif +struct cmng_struct_per_port { + struct rate_shaping_vars_per_port rs_vars; + struct fairness_vars_per_port fair_vars; + struct safc_struct_per_port safc_vars; + struct cmng_flags_per_port flags; }; + /* - * Priority and cos + * Protocol-common command ID for slow path elements */ -struct priority_cos { -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 cos; - u8 priority; -#elif defined(__LITTLE_ENDIAN) - u8 priority; - u8 cos; - u16 reserved1; -#endif - u32 reserved2; +enum common_spqe_cmd_id { + RAMROD_CMD_ID_COMMON_UNUSED, + RAMROD_CMD_ID_COMMON_FUNCTION_START, + RAMROD_CMD_ID_COMMON_FUNCTION_STOP, + RAMROD_CMD_ID_COMMON_CFC_DEL, + RAMROD_CMD_ID_COMMON_CFC_DEL_WB, + RAMROD_CMD_ID_COMMON_STAT_QUERY, + RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, + RAMROD_CMD_ID_COMMON_START_TRAFFIC, + RAMROD_CMD_ID_COMMON_RESERVED1, + RAMROD_CMD_ID_COMMON_RESERVED2, + MAX_COMMON_SPQE_CMD_ID }; + /* - * Per-port congestion management variables + * Per-protocol connection types */ -struct cmng_struct_per_port { - struct rate_shaping_vars_per_port rs_vars; - struct fairness_vars_per_port fair_vars; - struct safc_struct_per_port safc_vars; - struct pfc_struct_per_port pfc_vars; -#if defined(__BIG_ENDIAN) - u16 __reserved1; - u8 dcb_enabled; - u8 llfc_mode; -#elif defined(__LITTLE_ENDIAN) - u8 llfc_mode; - u8 dcb_enabled; - u16 __reserved1; -#endif - struct priority_cos - traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; - struct cmng_flags_per_port flags; +enum connection_type { + ETH_CONNECTION_TYPE, + TOE_CONNECTION_TYPE, + RDMA_CONNECTION_TYPE, + ISCSI_CONNECTION_TYPE, + FCOE_CONNECTION_TYPE, + RESERVED_CONNECTION_TYPE_0, + RESERVED_CONNECTION_TYPE_1, + RESERVED_CONNECTION_TYPE_2, + NONE_CONNECTION_TYPE, + MAX_CONNECTION_TYPE }; +/* + * Cos modes + */ +enum cos_mode { + OVERRIDE_COS, + STATIC_COS, + FW_WRR, + MAX_COS_MODE +}; + /* * Dynamic HC counters set by the driver @@ -3197,126 +4292,174 @@ struct cstorm_queue_zone_data { struct regpair reserved[2]; }; + /* - * Dynamic host coalescing init parameters + * Vf-PF channel data in cstorm ram (non-triggered zone) */ -struct dynamic_hc_config { - u32 threshold[3]; - u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; - u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; +struct vf_pf_channel_zone_data { + u32 msg_addr_lo; + u32 msg_addr_hi; }; - /* - * Protocol-common statistics collected by the Xstorm (per client) + * zone for VF non-triggered data */ -struct xstorm_per_client_stats { - __le32 reserved0; - __le32 unicast_pkts_sent; - struct regpair unicast_bytes_sent; - struct regpair multicast_bytes_sent; - __le32 multicast_pkts_sent; - __le32 broadcast_pkts_sent; - struct regpair broadcast_bytes_sent; - __le16 stats_counter; - __le16 reserved1; - __le32 reserved2; +struct non_trigger_vf_zone { + struct vf_pf_channel_zone_data vf_pf_channel; }; /* - * Common statistics collected by the Xstorm (per port) + * Vf-PF channel trigger zone in cstorm ram */ -struct xstorm_common_stats { - struct xstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; +struct vf_pf_channel_zone_trigger { + u8 addr_valid; }; /* - * Protocol-common statistics collected by the Tstorm (per port) + * zone that triggers the in-bound interrupt */ -struct tstorm_per_port_stats { - __le32 mac_filter_discard; - __le32 xxoverflow_discard; - __le32 brb_truncate_discard; - __le32 mac_discard; +struct trigger_vf_zone { +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 reserved0; + struct vf_pf_channel_zone_trigger vf_pf_channel; +#elif defined(__LITTLE_ENDIAN) + struct vf_pf_channel_zone_trigger vf_pf_channel; + u8 reserved0; + u16 reserved1; +#endif + u32 reserved2; }; /* - * Protocol-common statistics collected by the Tstorm (per client) + * zone B per-VF data */ -struct tstorm_per_client_stats { - struct regpair rcv_unicast_bytes; - struct regpair rcv_broadcast_bytes; - struct regpair rcv_multicast_bytes; - struct regpair rcv_error_bytes; - __le32 checksum_discard; - __le32 packets_too_big_discard; - __le32 rcv_unicast_pkts; - __le32 rcv_broadcast_pkts; - __le32 rcv_multicast_pkts; - __le32 no_buff_discard; - __le32 ttl0_discard; - __le16 stats_counter; - __le16 reserved0; +struct cstorm_vf_zone_data { + struct non_trigger_vf_zone non_trigger; + struct trigger_vf_zone trigger; }; + /* - * Protocol-common statistics collected by the Tstorm + * Dynamic host coalescing init parameters, per state machine */ -struct tstorm_common_stats { - struct tstorm_per_port_stats port_statistics; - struct tstorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; +struct dynamic_hc_sm_config { + u32 threshold[3]; + u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES]; + u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES]; }; /* - * Protocol-common statistics collected by the Ustorm (per client) + * Dynamic host coalescing init parameters */ -struct ustorm_per_client_stats { - struct regpair ucast_no_buff_bytes; - struct regpair mcast_no_buff_bytes; - struct regpair bcast_no_buff_bytes; - __le32 ucast_no_buff_pkts; - __le32 mcast_no_buff_pkts; - __le32 bcast_no_buff_pkts; - __le16 stats_counter; - __le16 reserved0; +struct dynamic_hc_config { + struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM]; +}; + + +struct e2_integ_data { +#if defined(__BIG_ENDIAN) + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 + u8 cos; + u8 voq; + u8 pbf_queue; +#elif defined(__LITTLE_ENDIAN) + u8 pbf_queue; + u8 voq; + u8 cos; + u8 flags; +#define E2_INTEG_DATA_TESTING_EN (0x1<<0) +#define E2_INTEG_DATA_TESTING_EN_SHIFT 0 +#define E2_INTEG_DATA_LB_TX (0x1<<1) +#define E2_INTEG_DATA_LB_TX_SHIFT 1 +#define E2_INTEG_DATA_COS_TX (0x1<<2) +#define E2_INTEG_DATA_COS_TX_SHIFT 2 +#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3) +#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3 +#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4) +#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4 +#define E2_INTEG_DATA_RESERVED (0x7<<5) +#define E2_INTEG_DATA_RESERVED_SHIFT 5 +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 reserved2; + u8 ramEn; +#elif defined(__LITTLE_ENDIAN) + u8 ramEn; + u8 reserved2; + u16 reserved3; +#endif }; + /* - * Protocol-common statistics collected by the Ustorm + * set mac event data */ -struct ustorm_common_stats { - struct ustorm_per_client_stats client_statistics[MAX_STAT_COUNTER_ID]; +struct eth_event_data { + u32 echo; + u32 reserved0; + u32 reserved1; }; + /* - * Eth statistics query structure for the eth_stats_query ramrod + * pf-vf event data */ -struct eth_stats_query { - struct xstorm_common_stats xstorm_common; - struct tstorm_common_stats tstorm_common; - struct ustorm_common_stats ustorm_common; +struct vf_pf_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 msg_addr_lo; + u32 msg_addr_hi; }; +/* + * VF FLR event data + */ +struct vf_flr_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; + u32 reserved2; + u32 reserved3; +}; /* - * set mac event data + * malicious VF event data */ -struct set_mac_event_data { - u16 echo; - u16 reserved0; - u32 reserved1; +struct malicious_vf_event_data { + u8 vf_id; + u8 reserved0; + u16 reserved1; u32 reserved2; + u32 reserved3; }; /* * union for all event ring message types */ union event_data { - struct set_mac_event_data set_mac_event; + struct vf_pf_event_data vf_pf_event; + struct eth_event_data eth_event; struct cfc_del_event_data cfc_del_event; + struct vf_flr_event_data vf_flr_event; + struct malicious_vf_event_data malicious_vf_event; }; @@ -3343,7 +4486,7 @@ struct event_ring_data { */ struct event_ring_msg { u8 opcode; - u8 reserved0; + u8 error; u16 reserved1; union event_data data; }; @@ -3366,32 +4509,82 @@ union event_ring_elem { /* + * Common event ring opcodes + */ +enum event_ring_opcode { + EVENT_RING_OPCODE_VF_PF_CHANNEL, + EVENT_RING_OPCODE_FUNCTION_START, + EVENT_RING_OPCODE_FUNCTION_STOP, + EVENT_RING_OPCODE_CFC_DEL, + EVENT_RING_OPCODE_CFC_DEL_WB, + EVENT_RING_OPCODE_STAT_QUERY, + EVENT_RING_OPCODE_STOP_TRAFFIC, + EVENT_RING_OPCODE_START_TRAFFIC, + EVENT_RING_OPCODE_VF_FLR, + EVENT_RING_OPCODE_MALICIOUS_VF, + EVENT_RING_OPCODE_FORWARD_SETUP, + EVENT_RING_OPCODE_RSS_UPDATE_RULES, + EVENT_RING_OPCODE_RESERVED1, + EVENT_RING_OPCODE_RESERVED2, + EVENT_RING_OPCODE_SET_MAC, + EVENT_RING_OPCODE_CLASSIFICATION_RULES, + EVENT_RING_OPCODE_FILTERS_RULES, + EVENT_RING_OPCODE_MULTICAST_RULES, + MAX_EVENT_RING_OPCODE +}; + + +/* + * Modes for fairness algorithm + */ +enum fairness_mode { + FAIRNESS_COS_WRR_MODE, + FAIRNESS_COS_ETS_MODE, + MAX_FAIRNESS_MODE +}; + + +/* * per-vnic fairness variables */ struct fairness_vars_per_vn { u32 cos_credit_delta[MAX_COS_NUMBER]; - u32 protocol_credit_delta[NUM_OF_PROTOCOLS]; u32 vn_credit_delta; u32 __reserved0; }; /* + * Priority and cos + */ +struct priority_cos { + u8 priority; + u8 cos; + __le16 reserved1; +}; + +/* * The data for flow control configuration */ struct flow_control_configuration { - struct priority_cos - traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES]; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u8 dcb_version; - u8 dcb_enabled; -#elif defined(__LITTLE_ENDIAN) + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; u8 dcb_version; - u16 reserved1; -#endif - u32 reserved2; + u8 dont_add_pri_0_en; + u8 reserved1; + __le32 reserved2; +}; + + +/* + * + */ +struct function_start_data { + __le16 function_mode; + __le16 sd_vlan_tag; + u16 reserved; + u8 path_id; + u8 network_cos_mode; }; @@ -3504,13 +4697,13 @@ struct hc_sb_data { struct pci_entity p_func; #if defined(__BIG_ENDIAN) u8 rsrv0; + u8 state; u8 dhc_qzone_id; - u8 __dynamic_hc_level; u8 same_igu_sb_1b; #elif defined(__LITTLE_ENDIAN) u8 same_igu_sb_1b; - u8 __dynamic_hc_level; u8 dhc_qzone_id; + u8 state; u8 rsrv0; #endif struct regpair rsrv1[2]; @@ -3518,18 +4711,30 @@ struct hc_sb_data { /* + * Segment types for host coaslescing + */ +enum hc_segment { + HC_REGULAR_SEGMENT, + HC_DEFAULT_SEGMENT, + MAX_HC_SEGMENT +}; + + +/* * The fast-path status block meta-data */ struct hc_sp_status_block_data { struct regpair host_sb_addr; #if defined(__BIG_ENDIAN) - u16 rsrv; + u8 rsrv1; + u8 state; u8 igu_seg_id; u8 igu_sb_id; #elif defined(__LITTLE_ENDIAN) u8 igu_sb_id; u8 igu_seg_id; - u16 rsrv; + u8 state; + u8 rsrv1; #endif struct pci_entity p_func; }; @@ -3554,6 +4759,129 @@ struct hc_status_block_data_e2 { /* + * IGU block operartion modes (in Everest2) + */ +enum igu_mode { + HC_IGU_BC_MODE, + HC_IGU_NBC_MODE, + MAX_IGU_MODE +}; + + +/* + * IP versions + */ +enum ip_ver { + IP_V4, + IP_V6, + MAX_IP_VER +}; + + +/* + * Multi-function modes + */ +enum mf_mode { + SINGLE_FUNCTION, + MULTI_FUNCTION_SD, + MULTI_FUNCTION_SI, + MULTI_FUNCTION_RESERVED, + MAX_MF_MODE +}; + +/* + * Protocol-common statistics collected by the Tstorm (per pf) + */ +struct tstorm_per_pf_stats { + struct regpair rcv_error_bytes; +}; + +/* + * + */ +struct per_pf_stats { + struct tstorm_per_pf_stats tstorm_pf_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per port) + */ +struct tstorm_per_port_stats { + __le32 mac_discard; + __le32 mac_filter_discard; + __le32 brb_truncate_discard; + __le32 mf_tag_discard; + __le32 packet_drop; + __le32 reserved; +}; + +/* + * + */ +struct per_port_stats { + struct tstorm_per_port_stats tstorm_port_statistics; +}; + + +/* + * Protocol-common statistics collected by the Tstorm (per client) + */ +struct tstorm_per_queue_stats { + struct regpair rcv_ucast_bytes; + __le32 rcv_ucast_pkts; + __le32 checksum_discard; + struct regpair rcv_bcast_bytes; + __le32 rcv_bcast_pkts; + __le32 pkts_too_big_discard; + struct regpair rcv_mcast_bytes; + __le32 rcv_mcast_pkts; + __le32 ttl0_discard; + __le16 no_buff_discard; + __le16 reserved0; + __le32 reserved1; +}; + +/* + * Protocol-common statistics collected by the Ustorm (per client) + */ +struct ustorm_per_queue_stats { + struct regpair ucast_no_buff_bytes; + struct regpair mcast_no_buff_bytes; + struct regpair bcast_no_buff_bytes; + __le32 ucast_no_buff_pkts; + __le32 mcast_no_buff_pkts; + __le32 bcast_no_buff_pkts; + __le32 coalesced_pkts; + struct regpair coalesced_bytes; + __le32 coalesced_events; + __le32 coalesced_aborts; +}; + +/* + * Protocol-common statistics collected by the Xstorm (per client) + */ +struct xstorm_per_queue_stats { + struct regpair ucast_bytes_sent; + struct regpair mcast_bytes_sent; + struct regpair bcast_bytes_sent; + __le32 ucast_pkts_sent; + __le32 mcast_pkts_sent; + __le32 bcast_pkts_sent; + __le32 error_drop_pkts; +}; + +/* + * + */ +struct per_queue_stats { + struct tstorm_per_queue_stats tstorm_queue_statistics; + struct ustorm_per_queue_stats ustorm_queue_statistics; + struct xstorm_per_queue_stats xstorm_queue_statistics; +}; + + +/* * FW version stored in first line of pram */ struct pram_fw_version { @@ -3582,7 +4910,6 @@ union protocol_common_specific_data { u8 protocol_data[8]; struct regpair phy_address; struct regpair mac_config_addr; - struct common_query_ramrod_data query_ramrod_data; }; /* @@ -3613,7 +4940,6 @@ struct rate_shaping_counter { * per-vnic rate shaping variables */ struct rate_shaping_vars_per_vn { - struct rate_shaping_counter protocol_counters[NUM_OF_PROTOCOLS]; struct rate_shaping_counter vn_counter; }; @@ -3628,39 +4954,100 @@ struct slow_path_element { /* - * eth/toe flags that indicate if to query + * Protocol-common statistics counter */ -struct stats_indication_flags { - u32 collect_eth; - u32 collect_toe; +struct stats_counter { + __le16 xstats_counter; + __le16 reserved0; + __le32 reserved1; + __le16 tstats_counter; + __le16 reserved2; + __le32 reserved3; + __le16 ustats_counter; + __le16 reserved4; + __le32 reserved5; + __le16 cstats_counter; + __le16 reserved6; + __le32 reserved7; }; /* - * per-port PFC variables + * */ -struct storm_pfc_struct_per_port { -#if defined(__BIG_ENDIAN) - u16 mid_mac_addr; - u16 msb_mac_addr; -#elif defined(__LITTLE_ENDIAN) - u16 msb_mac_addr; - u16 mid_mac_addr; -#endif -#if defined(__BIG_ENDIAN) - u16 pfc_pause_quanta_in_nanosec; - u16 lsb_mac_addr; -#elif defined(__LITTLE_ENDIAN) - u16 lsb_mac_addr; - u16 pfc_pause_quanta_in_nanosec; -#endif +struct stats_query_entry { + u8 kind; + u8 index; + __le16 funcID; + __le32 reserved; + struct regpair address; }; /* - * Per-port congestion management variables + * statistic command */ -struct storm_cmng_struct_per_port { - struct storm_pfc_struct_per_port pfc_vars; +struct stats_query_cmd_group { + struct stats_query_entry query[STATS_QUERY_CMD_COUNT]; +}; + + +/* + * statistic command header + */ +struct stats_query_header { + u8 cmd_num; + u8 reserved0; + __le16 drv_stats_counter; + __le32 reserved1; + struct regpair stats_counters_addrs; +}; + + +/* + * Types of statistcis query entry + */ +enum stats_query_type { + STATS_TYPE_QUEUE, + STATS_TYPE_PORT, + STATS_TYPE_PF, + STATS_TYPE_TOE, + STATS_TYPE_FCOE, + MAX_STATS_QUERY_TYPE +}; + + +/* + * Indicate of the function status block state + */ +enum status_block_state { + SB_DISABLED, + SB_ENABLED, + SB_CLEANED, + MAX_STATUS_BLOCK_STATE +}; + + +/* + * Storm IDs (including attentions for IGU related enums) + */ +enum storm_id { + USTORM_ID, + CSTORM_ID, + XSTORM_ID, + TSTORM_ID, + ATTENTION_ID, + MAX_STORM_ID +}; + + +/* + * Taffic types used in ETS and flow control algorithms + */ +enum traffic_type { + LLFC_TRAFFIC_TYPE_NW, + LLFC_TRAFFIC_TYPE_FCOE, + LLFC_TRAFFIC_TYPE_ISCSI, + MAX_TRAFFIC_TYPE }; @@ -3715,6 +5102,16 @@ struct vf_pf_channel_data { /* + * State of VF-PF channel + */ +enum vf_pf_channel_state { + VF_PF_CHANNEL_STATE_READY, + VF_PF_CHANNEL_STATE_WAITING_FOR_ACK, + MAX_VF_PF_CHANNEL_STATE +}; + + +/* * zone A per-queue data */ struct xstorm_queue_zone_data { diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index d5399206f66..4d748e77d1a 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h @@ -15,98 +15,34 @@ #ifndef BNX2X_INIT_H #define BNX2X_INIT_H -/* RAM0 size in bytes */ -#define STORM_INTMEM_SIZE_E1 0x5800 -#define STORM_INTMEM_SIZE_E1H 0x10000 -#define STORM_INTMEM_SIZE(bp) ((CHIP_IS_E1(bp) ? STORM_INTMEM_SIZE_E1 : \ - STORM_INTMEM_SIZE_E1H) / 4) - - /* Init operation types and structures */ -/* Common for both E1 and E1H */ -#define OP_RD 0x1 /* read single register */ -#define OP_WR 0x2 /* write single register */ -#define OP_IW 0x3 /* write single register using mailbox */ -#define OP_SW 0x4 /* copy a string to the device */ -#define OP_SI 0x5 /* copy a string using mailbox */ -#define OP_ZR 0x6 /* clear memory */ -#define OP_ZP 0x7 /* unzip then copy with DMAE */ -#define OP_WR_64 0x8 /* write 64 bit pattern */ -#define OP_WB 0x9 /* copy a string using DMAE */ - -/* FPGA and EMUL specific operations */ -#define OP_WR_EMUL 0xa /* write single register on Emulation */ -#define OP_WR_FPGA 0xb /* write single register on FPGA */ -#define OP_WR_ASIC 0xc /* write single register on ASIC */ - -/* Init stages */ -/* Never reorder stages !!! */ -#define COMMON_STAGE 0 -#define PORT0_STAGE 1 -#define PORT1_STAGE 2 -#define FUNC0_STAGE 3 -#define FUNC1_STAGE 4 -#define FUNC2_STAGE 5 -#define FUNC3_STAGE 6 -#define FUNC4_STAGE 7 -#define FUNC5_STAGE 8 -#define FUNC6_STAGE 9 -#define FUNC7_STAGE 10 -#define STAGE_IDX_MAX 11 - -#define STAGE_START 0 -#define STAGE_END 1 - - -/* Indices of blocks */ -#define PRS_BLOCK 0 -#define SRCH_BLOCK 1 -#define TSDM_BLOCK 2 -#define TCM_BLOCK 3 -#define BRB1_BLOCK 4 -#define TSEM_BLOCK 5 -#define PXPCS_BLOCK 6 -#define EMAC0_BLOCK 7 -#define EMAC1_BLOCK 8 -#define DBU_BLOCK 9 -#define MISC_BLOCK 10 -#define DBG_BLOCK 11 -#define NIG_BLOCK 12 -#define MCP_BLOCK 13 -#define UPB_BLOCK 14 -#define CSDM_BLOCK 15 -#define USDM_BLOCK 16 -#define CCM_BLOCK 17 -#define UCM_BLOCK 18 -#define USEM_BLOCK 19 -#define CSEM_BLOCK 20 -#define XPB_BLOCK 21 -#define DQ_BLOCK 22 -#define TIMERS_BLOCK 23 -#define XSDM_BLOCK 24 -#define QM_BLOCK 25 -#define PBF_BLOCK 26 -#define XCM_BLOCK 27 -#define XSEM_BLOCK 28 -#define CDU_BLOCK 29 -#define DMAE_BLOCK 30 -#define PXP_BLOCK 31 -#define CFC_BLOCK 32 -#define HC_BLOCK 33 -#define PXP2_BLOCK 34 -#define MISC_AEU_BLOCK 35 -#define PGLUE_B_BLOCK 36 -#define IGU_BLOCK 37 -#define ATC_BLOCK 38 -#define QM_4PORT_BLOCK 39 -#define XSEM_4PORT_BLOCK 40 +enum { + OP_RD = 0x1, /* read a single register */ + OP_WR, /* write a single register */ + OP_SW, /* copy a string to the device */ + OP_ZR, /* clear memory */ + OP_ZP, /* unzip then copy with DMAE */ + OP_WR_64, /* write 64 bit pattern */ + OP_WB, /* copy a string using DMAE */ + OP_WB_ZR, /* Clear a string using DMAE or indirect-wr */ + /* Skip the following ops if all of the init modes don't match */ + OP_IF_MODE_OR, + /* Skip the following ops if any of the init modes don't match */ + OP_IF_MODE_AND, + OP_MAX +}; +enum { + STAGE_START, + STAGE_END, +}; /* Returns the index of start or end of a specific block stage in ops array*/ #define BLOCK_OPS_IDX(block, stage, end) \ - (2*(((block)*STAGE_IDX_MAX) + (stage)) + (end)) + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) +/* structs for the various opcodes */ struct raw_op { u32 op:8; u32 offset:24; @@ -116,7 +52,7 @@ struct raw_op { struct op_read { u32 op:8; u32 offset:24; - u32 pad; + u32 val; }; struct op_write { @@ -125,15 +61,15 @@ struct op_write { u32 val; }; -struct op_string_write { +struct op_arr_write { u32 op:8; u32 offset:24; -#ifdef __LITTLE_ENDIAN - u16 data_off; - u16 data_len; -#else /* __BIG_ENDIAN */ +#ifdef __BIG_ENDIAN u16 data_len; u16 data_off; +#else /* __LITTLE_ENDIAN */ + u16 data_off; + u16 data_len; #endif }; @@ -143,14 +79,209 @@ struct op_zero { u32 len; }; +struct op_if_mode { + u32 op:8; + u32 cmd_offset:24; + u32 mode_bit_map; +}; + + union init_op { struct op_read read; struct op_write write; - struct op_string_write str_wr; + struct op_arr_write arr_wr; struct op_zero zero; struct raw_op raw; + struct op_if_mode if_mode; +}; + + +/* Init Phases */ +enum { + PHASE_COMMON, + PHASE_PORT0, + PHASE_PORT1, + PHASE_PF0, + PHASE_PF1, + PHASE_PF2, + PHASE_PF3, + PHASE_PF4, + PHASE_PF5, + PHASE_PF6, + PHASE_PF7, + NUM_OF_INIT_PHASES }; +/* Init Modes */ +enum { + MODE_ASIC = 0x00000001, + MODE_FPGA = 0x00000002, + MODE_EMUL = 0x00000004, + MODE_E2 = 0x00000008, + MODE_E3 = 0x00000010, + MODE_PORT2 = 0x00000020, + MODE_PORT4 = 0x00000040, + MODE_SF = 0x00000080, + MODE_MF = 0x00000100, + MODE_MF_SD = 0x00000200, + MODE_MF_SI = 0x00000400, + MODE_MF_NIV = 0x00000800, + MODE_E3_A0 = 0x00001000, + MODE_E3_B0 = 0x00002000, + MODE_COS3 = 0x00004000, + MODE_COS6 = 0x00008000, + MODE_LITTLE_ENDIAN = 0x00010000, + MODE_BIG_ENDIAN = 0x00020000, +}; + +/* Init Blocks */ +enum { + BLOCK_ATC, + BLOCK_BRB1, + BLOCK_CCM, + BLOCK_CDU, + BLOCK_CFC, + BLOCK_CSDM, + BLOCK_CSEM, + BLOCK_DBG, + BLOCK_DMAE, + BLOCK_DORQ, + BLOCK_HC, + BLOCK_IGU, + BLOCK_MISC, + BLOCK_NIG, + BLOCK_PBF, + BLOCK_PGLUE_B, + BLOCK_PRS, + BLOCK_PXP2, + BLOCK_PXP, + BLOCK_QM, + BLOCK_SRC, + BLOCK_TCM, + BLOCK_TM, + BLOCK_TSDM, + BLOCK_TSEM, + BLOCK_UCM, + BLOCK_UPB, + BLOCK_USDM, + BLOCK_USEM, + BLOCK_XCM, + BLOCK_XPB, + BLOCK_XSDM, + BLOCK_XSEM, + BLOCK_MISC_AEU, + NUM_OF_INIT_BLOCKS +}; + +/* QM queue numbers */ +#define BNX2X_ETH_Q 0 +#define BNX2X_TOE_Q 3 +#define BNX2X_TOE_ACK_Q 6 +#define BNX2X_ISCSI_Q 9 +#define BNX2X_ISCSI_ACK_Q 11 +#define BNX2X_FCOE_Q 10 + +/* Vnics per mode */ +#define BNX2X_PORT2_MODE_NUM_VNICS 4 +#define BNX2X_PORT4_MODE_NUM_VNICS 2 + +/* COS offset for port1 in E3 B0 4port mode */ +#define BNX2X_E3B0_PORT1_COS_OFFSET 3 + +/* QM Register addresses */ +#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\ + (QM_REG_QVOQIDX_0 + 4 * (pf_q_num)) +#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\ + (QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5))) +#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\ + (QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4)) + +/* extracts the QM queue number for the specified port and vnic */ +#define BNX2X_PF_Q_NUM(q_num, port, vnic)\ + ((((port) << 1) | (vnic)) * 16 + (q_num)) + + +/* Maps the specified queue to the specified COS */ +static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos) +{ + /* find current COS mapping */ + u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4); + + /* check if queue->COS mapping has changed */ + if (curr_cos != new_cos) { + u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS; + u32 reg_addr, reg_bit_map, vnic; + + /* update parameters for 4port mode */ + if (INIT_MODE_FLAGS(bp) & MODE_PORT4) { + num_vnics = BNX2X_PORT4_MODE_NUM_VNICS; + if (BP_PORT(bp)) { + curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + new_cos += BNX2X_E3B0_PORT1_COS_OFFSET; + } + } + + /* change queue mapping for each VNIC */ + for (vnic = 0; vnic < num_vnics; vnic++) { + u32 pf_q_num = + BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic); + u32 q_bit_map = 1 << (pf_q_num & 0x1f); + + /* overwrite queue->VOQ mapping */ + REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos); + + /* clear queue bit from current COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map)); + + /* set queue bit in new COS bit map */ + reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); + + /* set/clear queue bit in command-queue bit map + (E2/E3A0 only, valid COS values are 0/1) */ + if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { + reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); + reg_bit_map = REG_RD(bp, reg_addr); + q_bit_map = 1 << (2 * (pf_q_num & 0xf)); + reg_bit_map = new_cos ? + (reg_bit_map | q_bit_map) : + (reg_bit_map & (~q_bit_map)); + REG_WR(bp, reg_addr, reg_bit_map); + } + } + } +} + +/* Configures the QM according to the specified per-traffic-type COSes */ +static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode, + struct priority_cos *traffic_cos) +{ + bnx2x_map_q_cos(bp, BNX2X_FCOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos); + if (mode != STATIC_COS) { + /* required only in backward compatible COS mode */ + bnx2x_map_q_cos(bp, BNX2X_ETH_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q, + traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos); + } +} + + +/* Returns the index of start or end of a specific block stage in ops array*/ +#define BLOCK_OPS_IDX(block, stage, end) \ + (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) + + #define INITOP_SET 0 /* set the HW directly */ #define INITOP_CLEAR 1 /* clear the HW directly */ #define INITOP_INIT 2 /* set the init-value array */ @@ -195,25 +326,25 @@ struct src_ent { /**************************************************************************** * Parity configuration ****************************************************************************/ -#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \ +#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \ { \ block##_REG_##block##_PRTY_MASK, \ block##_REG_##block##_PRTY_STS_CLR, \ - en_mask, {m1, m1h, m2}, #block \ + en_mask, {m1, m1h, m2, m3}, #block \ } -#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \ +#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \ { \ block##_REG_##block##_PRTY_MASK_0, \ block##_REG_##block##_PRTY_STS_CLR_0, \ - en_mask, {m1, m1h, m2}, #block"_0" \ + en_mask, {m1, m1h, m2, m3}, #block"_0" \ } -#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \ +#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \ { \ block##_REG_##block##_PRTY_MASK_1, \ block##_REG_##block##_PRTY_STS_CLR_1, \ - en_mask, {m1, m1h, m2}, #block"_1" \ + en_mask, {m1, m1h, m2, m3}, #block"_1" \ } static const struct { @@ -224,6 +355,7 @@ static const struct { u32 e1; /* 57710 */ u32 e1h; /* 57711 */ u32 e2; /* 57712 */ + u32 e3; /* 578xx */ } reg_mask; /* Register mask (all valid bits) */ char name[7]; /* Block's longest name is 6 characters long * (name + suffix) @@ -241,39 +373,56 @@ static const struct { /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't * want to handle "system kill" flow at the moment. */ - BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), - BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), - BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), - BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff), - BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1), - BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff), - BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3), + BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff, + 0x7ffffff), + BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(PXP2, 0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff), + BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0), + BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0), + BLOCK_PRTY_INFO_0(NIG, 0xffffffff, 0, 0, 0xffffffff, 0xffffffff), + BLOCK_PRTY_INFO_1(NIG, 0xffff, 0, 0, 0xff, 0xffff), + BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff), + BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f), + BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3), + BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3), {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, - GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0, - {0xf, 0xf, 0xf}, "UPB"}, + GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf, + {0xf, 0xf, 0xf, 0xf}, "UPB"}, {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0, - {0xf, 0xf, 0xf}, "XPB"}, - BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7), - BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f), - BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf), - BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1), - BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf), - BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf), - BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff), - BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff), - BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f), - BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f), - BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f), - BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff), - BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f), + {0xf, 0xf, 0xf, 0xf}, "XPB"}, + BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7), + BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f), + BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1), + BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf), + BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff), + BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff), + BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f), + BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff), + BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff), + BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff), + BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), + BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f), + BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff, + 0xffffffff), + BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f), }; @@ -324,8 +473,10 @@ static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx) return bnx2x_blocks_parity_data[idx].reg_mask.e1; else if (CHIP_IS_E1H(bp)) return bnx2x_blocks_parity_data[idx].reg_mask.e1h; - else + else if (CHIP_IS_E2(bp)) return bnx2x_blocks_parity_data[idx].reg_mask.e2; + else /* CHIP_IS_E3 */ + return bnx2x_blocks_parity_data[idx].reg_mask.e3; } static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp) diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h index aafd0232393..7ec1724753a 100644 --- a/drivers/net/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/bnx2x/bnx2x_init_ops.h @@ -15,13 +15,39 @@ #ifndef BNX2X_INIT_OPS_H #define BNX2X_INIT_OPS_H + +#ifndef BP_ILT +#define BP_ILT(bp) NULL +#endif + +#ifndef BP_FUNC +#define BP_FUNC(bp) 0 +#endif + +#ifndef BP_PORT +#define BP_PORT(bp) 0 +#endif + +#ifndef BNX2X_ILT_FREE +#define BNX2X_ILT_FREE(x, y, sz) +#endif + +#ifndef BNX2X_ILT_ZALLOC +#define BNX2X_ILT_ZALLOC(x, y, sz) +#endif + +#ifndef ILOG2 +#define ILOG2(x) x +#endif + static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len); static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val); -static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, - u32 addr, u32 len); +static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, + dma_addr_t phys_addr, u32 addr, + u32 len); -static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, - u32 len) +static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) { u32 i; @@ -29,24 +55,32 @@ static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data, REG_WR(bp, addr + i*4, data[i]); } -static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data, - u32 len) +static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) { u32 i; for (i = 0; i < len; i++) - REG_WR_IND(bp, addr + i*4, data[i]); + bnx2x_reg_wr_ind(bp, addr + i*4, data[i]); } -static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len) +static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len, + u8 wb) { if (bp->dmae_ready) bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + else if (wb) + /* + * Wide bus registers with no dmae need to be written + * using indirect write. + */ + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); else bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len); } -static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) +static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, + u32 len, u8 wb) { u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4)); u32 buf_len32 = buf_len/4; @@ -57,12 +91,20 @@ static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) for (i = 0; i < len; i += buf_len32) { u32 cur_len = min(buf_len32, len - i); - bnx2x_write_big_buf(bp, addr + i*4, cur_len); + bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); } } -static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, - u32 len64) +static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) +{ + if (bp->dmae_ready) + bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + else + bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); +} + +static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len64) { u32 buf_len32 = FW_BUF_SIZE/4; u32 len = len64*2; @@ -82,7 +124,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, for (i = 0; i < len; i += buf_len32) { u32 cur_len = min(buf_len32, len - i); - bnx2x_write_big_buf(bp, addr + i*4, cur_len); + bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len); } } @@ -100,7 +142,8 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data, #define IF_IS_PRAM_ADDR(base, addr) \ if (((base) <= (addr)) && ((base) + 0x40000 >= (addr))) -static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) +static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, + const u8 *data) { IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr) data = INIT_TSEM_INT_TABLE_DATA(bp); @@ -129,31 +172,17 @@ static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data) return data; } -static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len) +static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, + const u32 *data, u32 len) { if (bp->dmae_ready) - bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len); + VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); else - bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len); -} - -static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data, - u32 len) -{ - const u32 *old_data = data; - - data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data); - - if (bp->dmae_ready) { - if (old_data != data) - VIRT_WR_DMAE_LEN(bp, data, addr, len, 1); - else - VIRT_WR_DMAE_LEN(bp, data, addr, len, 0); - } else bnx2x_init_ind_wr(bp, addr, data, len); } -static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) +static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, + u32 val_hi) { u32 wb_write[2]; @@ -161,8 +190,8 @@ static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi) wb_write[1] = val_hi; REG_WR_DMAE_LEN(bp, reg, wb_write, 2); } - -static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) +static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, + u32 blob_off) { const u8 *data = NULL; int rc; @@ -186,39 +215,33 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off) static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) { u16 op_start = - INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)]; + INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, + STAGE_START)]; u16 op_end = - INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)]; + INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, + STAGE_END)]; union init_op *op; - int hw_wr; - u32 i, op_type, addr, len; + u32 op_idx, op_type, addr, len; const u32 *data, *data_base; /* If empty block */ if (op_start == op_end) return; - if (CHIP_REV_IS_FPGA(bp)) - hw_wr = OP_WR_FPGA; - else if (CHIP_REV_IS_EMUL(bp)) - hw_wr = OP_WR_EMUL; - else - hw_wr = OP_WR_ASIC; - data_base = INIT_DATA(bp); - for (i = op_start; i < op_end; i++) { - - op = (union init_op *)&(INIT_OPS(bp)[i]); + for (op_idx = op_start; op_idx < op_end; op_idx++) { - op_type = op->str_wr.op; - addr = op->str_wr.offset; - len = op->str_wr.data_len; - data = data_base + op->str_wr.data_off; - - /* HW/EMUL specific */ - if ((op_type > OP_WB) && (op_type == hw_wr)) - op_type = OP_WR; + op = (union init_op *)&(INIT_OPS(bp)[op_idx]); + /* Get generic data */ + op_type = op->raw.op; + addr = op->raw.offset; + /* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and + * OP_WR64 (we assume that op_arr_write and op_write have the + * same structure). + */ + len = op->arr_wr.data_len; + data = data_base + op->arr_wr.data_off; switch (op_type) { case OP_RD: @@ -233,21 +256,39 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage) case OP_WB: bnx2x_init_wr_wb(bp, addr, data, len); break; - case OP_SI: - bnx2x_init_ind_wr(bp, addr, data, len); - break; case OP_ZR: - bnx2x_init_fill(bp, addr, 0, op->zero.len); + bnx2x_init_fill(bp, addr, 0, op->zero.len, 0); + break; + case OP_WB_ZR: + bnx2x_init_fill(bp, addr, 0, op->zero.len, 1); break; case OP_ZP: bnx2x_init_wr_zp(bp, addr, len, - op->str_wr.data_off); + op->arr_wr.data_off); break; case OP_WR_64: bnx2x_init_wr_64(bp, addr, data, len); break; + case OP_IF_MODE_AND: + /* if any of the flags doesn't match, skip the + * conditional block. + */ + if ((INIT_MODE_FLAGS(bp) & + op->if_mode.mode_bit_map) != + op->if_mode.mode_bit_map) + op_idx += op->if_mode.cmd_offset; + break; + case OP_IF_MODE_OR: + /* if all the flags don't match, skip the conditional + * block. + */ + if ((INIT_MODE_FLAGS(bp) & + op->if_mode.mode_bit_map) == 0) + op_idx += op->if_mode.cmd_offset; + break; default: - /* happens whenever an op is of a diff HW */ + /* Should never get here! */ + break; } } @@ -417,7 +458,8 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = { PXP2_REG_RQ_BW_WR_UBOUND30} }; -static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) +static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, + int w_order) { u32 val, i; @@ -491,19 +533,21 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD)) REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); - if (CHIP_IS_E2(bp)) + if (CHIP_IS_E3(bp)) + REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order)); + else if (CHIP_IS_E2(bp)) REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order)); else REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); - if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1(bp)) { /* MPS w_order optimal TH presently TH * 128 0 0 2 * 256 1 1 3 * >=512 2 2 3 */ /* DMAE is special */ - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1H(bp)) { /* E2 can use optimal TH */ val = w_order; REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val); @@ -557,8 +601,8 @@ static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order) #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44))) #define ILT_RANGE(f, l) (((l) << 10) | f) -static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, - u32 size, u8 memop) +static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, + struct ilt_line *line, u32 size, u8 memop) { if (memop == ILT_MEMOP_FREE) { BNX2X_ILT_FREE(line->page, line->page_mapping, line->size); @@ -572,7 +616,8 @@ static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line, } -static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop) +static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, + u8 memop) { int i, rc; struct bnx2x_ilt *ilt = BP_ILT(bp); @@ -617,8 +662,8 @@ static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx, bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping)); } -static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt, - int idx, u8 initop) +static void bnx2x_ilt_line_init_op(struct bnx2x *bp, + struct bnx2x_ilt *ilt, int idx, u8 initop) { dma_addr_t null_mapping; int abs_idx = ilt->start_line + idx; @@ -733,7 +778,7 @@ static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) } static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, - u32 psz_reg, u8 initop) + u32 psz_reg, u8 initop) { struct bnx2x_ilt *ilt = BP_ILT(bp); struct ilt_client_info *ilt_cli = &ilt->clients[cli_num]; @@ -848,7 +893,8 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, /* Initialize T2 */ for (i = 0; i < src_cid_count-1; i++) - t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent)); + t2[i].next = (u64)(t2_mapping + + (i+1)*sizeof(struct src_ent)); /* tell the searcher where the T2 table is */ REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count); diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 076e11f5769..bcd8f003862 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -25,6 +25,8 @@ #include <linux/mutex.h> #include "bnx2x.h" +#include "bnx2x_cmn.h" + /********************************************************/ #define ETH_HLEN 14 @@ -35,6 +37,13 @@ #define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define MDIO_ACCESS_TIMEOUT 1000 #define BMAC_CONTROL_RX_ENABLE 2 +#define WC_LANE_MAX 4 +#define I2C_SWITCH_WIDTH 2 +#define I2C_BSC0 0 +#define I2C_BSC1 1 +#define I2C_WA_RETRY_CNT 3 +#define MCPR_IMC_COMMAND_READ_OP 1 +#define MCPR_IMC_COMMAND_WRITE_OP 2 /***********************************************************/ /* Shortcut definitions */ @@ -103,16 +112,13 @@ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG #define GP_STATUS_10G_CX4 \ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4 -#define GP_STATUS_12G_HIG \ - MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG -#define GP_STATUS_12_5G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G -#define GP_STATUS_13G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G -#define GP_STATUS_15G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G -#define GP_STATUS_16G MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G #define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX #define GP_STATUS_10G_KX4 \ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 - +#define GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR +#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI +#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS +#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI #define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD #define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD @@ -126,20 +132,10 @@ #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD #define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD #define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD -#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD -#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD -#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD -#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD -#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD -#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD -#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD -#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD -#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD -#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD - -#define PHY_XGXS_FLAG 0x1 -#define PHY_SGMII_FLAG 0x2 -#define PHY_SERDES_FLAG 0x4 +#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD +#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD + + /* */ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 @@ -165,8 +161,104 @@ #define EDC_MODE_PASSIVE_DAC 0x0055 +/* BRB thresholds for E2*/ +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170 +#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE 250 +#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 90 + +#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250 + +/* BRB thresholds for E3A0 */ +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290 +#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE 410 +#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 170 + +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410 + + +/* BRB thresholds for E3B0 2 port mode*/ +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE 1025 +#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 1025 + +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE 1025 + +/* only for E3B0*/ +#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR 1025 +#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR 1025 + +/* Lossy +Lossless GUARANTIED == GUART */ +#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART 284 +/* Lossless +Lossless*/ +#define PFC_E3B0_2P_PAUSE_LB_GUART 236 +/* Lossy +Lossy*/ +#define PFC_E3B0_2P_NON_PAUSE_LB_GUART 342 + +/* Lossy +Lossless*/ +#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART 284 +/* Lossless +Lossless*/ +#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART 236 +/* Lossy +Lossy*/ +#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART 336 +#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST 80 + +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART 0 +#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST 0 + +/* BRB thresholds for E3B0 4 port mode */ +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 304 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0 + +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE 384 +#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE 0 + +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE 10 +#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE 304 + +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50 +#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384 + + +/* only for E3B0*/ +#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304 +#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384 +#define PFC_E3B0_4P_LB_GUART 120 + +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120 +#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80 + +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80 +#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120 + +#define DCBX_INVALID_COS (0xFF) + #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) #define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000) +#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS (1360) +#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS (2720) +#define ETS_E3B0_PBF_MIN_W_VAL (10000) + +#define MAX_PACKET_SIZE (9700) +#define WC_UC_TIMEOUT 100 + /**********************************************************/ /* INTERFACE */ /**********************************************************/ @@ -202,14 +294,86 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) } /******************************************************************/ +/* EPIO/GPIO section */ +/******************************************************************/ +static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en) +{ + u32 epio_mask, gp_oenable; + *en = 0; + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin); + return; + } + + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask); + + *en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin; +} +static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en) +{ + u32 epio_mask, gp_output, gp_oenable; + + /* Sanity check */ + if (epio_pin > 31) { + DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin); + return; + } + DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en); + epio_mask = 1 << epio_pin; + /* Set this EPIO to output */ + gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS); + if (en) + gp_output |= epio_mask; + else + gp_output &= ~epio_mask; + + REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output); + + /* Set the value for this EPIO */ + gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE); + REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask); +} + +static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val) +{ + if (pin_cfg == PIN_CFG_NA) + return; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port); + } +} + +static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val) +{ + if (pin_cfg == PIN_CFG_NA) + return -EINVAL; + if (pin_cfg >= PIN_CFG_EPIO0) { + bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val); + } else { + u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3; + u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2; + *val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + } + return 0; + +} +/******************************************************************/ /* ETS section */ /******************************************************************/ -void bnx2x_ets_disabled(struct link_params *params) +static void bnx2x_ets_e2e3a0_disabled(struct link_params *params) { /* ETS disabled configuration*/ struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); + DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); /* * mapping between entry priority to client number (0,1,2 -debug and @@ -262,7 +426,756 @@ void bnx2x_ets_disabled(struct link_params *params) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); } +/****************************************************************************** +* Description: +* Getting min_w_val will be set according to line speed . +*. +******************************************************************************/ +static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars) +{ + u32 min_w_val = 0; + /* Calculate min_w_val.*/ + if (vars->link_up) { + if (SPEED_20000 == vars->line_speed) + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; + } else + min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; + /** + * If the link isn't up (static configuration for example ) The + * link will be according to 20GBPS. + */ + return min_w_val; +} +/****************************************************************************** +* Description: +* Getting credit upper bound form min_w_val. +*. +******************************************************************************/ +static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val) +{ + const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val), + MAX_PACKET_SIZE); + return credit_upper_bound; +} +/****************************************************************************** +* Description: +* Set credit upper bound for NIG. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_nig( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 : + NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound); + + if (0 == port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7, + credit_upper_bound); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8, + credit_upper_bound); + } +} +/****************************************************************************** +* Description: +* Will return the NIG ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured acording to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); + /** + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS1, ... 8 - + * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by + * reset value or init tool + */ + if (port) { + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210); + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0); + } else { + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); + } + /** + * For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ + /* TODO_ETS - Should be done by reset value or init tool */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : + NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + /** + * mapping between the CREDIT_WEIGHT registers and actual client + * numbers + */ + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) { + /*Port 1 has 6 COS*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0); + } else { + /*Port 0 has 9 COS*/ + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB, + 0x43210876); + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); + } + + /** + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - + * COS0 entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ + if (port) + REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f); + else + REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff); + /* defines which entries (clients) are subjected to WFQ arbitration */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); + + /** + * Please notice the register address are note continuous and a + * for here is note appropriate.In 2 port mode port0 only COS0-5 + * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 + * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT + * are never used for WFQ + */ + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0); + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0); + if (0 == port) { + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0); + REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0); + } + + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val); +} +/****************************************************************************** +* Description: +* Set credit upper bound for PBF. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf( + const struct link_params *params, + const u32 min_w_val) +{ + struct bnx2x *bp = params->bp; + const u32 credit_upper_bound = + bnx2x_ets_get_credit_upper_bound(min_w_val); + const u8 port = params->port; + u32 base_upper_bound = 0; + u8 max_cos = 0; + u8 i = 0; + /** + * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 + * port mode port1 has COS0-2 that can be used for WFQ. + */ + if (0 == port) { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound); +} + +/****************************************************************************** +* Description: +* Will return the PBF ETS registers to init values.Except +* credit_upper_bound. +* That isn't used in this configuration (No WFQ is enabled) and will be +* configured acording to spec +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 i = 0; + u32 base_weight = 0; + u8 max_cos = 0; + + /** + * mapping between entry priority to client number 0 - COS0 + * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. + * TODO_ETS - Should be done by reset value or init tool + */ + if (port) + /* 0x688 (|011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688); + else + /* (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688); + + /* TODO_ETS - Should be done by reset value or init tool */ + if (port) + /* 0x688 (|011|0 10|00 1|000)*/ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688); + else + /* 0x2C688 (10 1|100 |011|0 10|00 1|000) */ + REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 : + PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100); + + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); + /** + * In 2 port mode port0 has COS0-5 that can be used for WFQ. + * In 4 port mode port1 has COS0-2 that can be used for WFQ. + */ + if (0 == port) { + base_weight = PBF_REG_COS0_WEIGHT_P0; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; + } else { + base_weight = PBF_REG_COS0_WEIGHT_P1; + max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1; + } + + for (i = 0; i < max_cos; i++) + REG_WR(bp, base_weight + (0x4 * i), 0); + + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); +} +/****************************************************************************** +* Description: +* E3B0 disable will return basicly the values to init values. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_disabled(const struct link_params *params, + const struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" + "\n"); + return -EINVAL; + } + + bnx2x_ets_e3b0_nig_disabled(params, vars); + + bnx2x_ets_e3b0_pbf_disabled(params); + + return 0; +} +/****************************************************************************** +* Description: +* Disable will return basicly the values to init values. +*. +******************************************************************************/ +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + + if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp))) + bnx2x_ets_e2e3a0_disabled(params); + else if (CHIP_IS_E3B0(bp)) + bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars); + else { + DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n"); + return -EINVAL; + } + + return bnx2x_status; +} + +/****************************************************************************** +* Description +* Set the COS mappimg to SP and BW until this point all the COS are not +* set as SP or BW. +******************************************************************************/ +static int bnx2x_ets_e3b0_cli_map(const struct link_params *params, + const struct bnx2x_ets_params *ets_params, + const u8 cos_sp_bitmap, + const u8 cos_bw_bitmap) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3); + const u8 pbf_cli_sp_bitmap = cos_sp_bitmap; + const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3; + const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap; + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT : + NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap); + + REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + nig_cli_subject2wfq_bitmap); + + REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : + PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0, + pbf_cli_subject2wfq_bitmap); + + return 0; +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, + const u8 cos_entry, + const u32 min_w_val_nig, + const u32 min_w_val_pbf, + const u16 total_bw, + const u8 bw, + const u8 port) +{ + u32 nig_reg_adress_crd_weight = 0; + u32 pbf_reg_adress_crd_weight = 0; + /* Calculate and set BW for this COS*/ + const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw; + const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw; + + switch (cos_entry) { + case 0: + nig_reg_adress_crd_weight = + (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; + break; + case 1: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; + break; + case 2: + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; + + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; + pbf_reg_adress_crd_weight = + PBF_REG_COS3_WEIGHT_P0; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; + pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; + pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; + break; + } + + REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); + + REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf); + + return 0; +} +/****************************************************************************** +* Description: +* Calculate the total BW.A value of 0 isn't legal. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_get_total_bw( + const struct link_params *params, + const struct bnx2x_ets_params *ets_params, + u16 *total_bw) +{ + struct bnx2x *bp = params->bp; + u8 cos_idx = 0; + + *total_bw = 0 ; + /* Calculate total BW requested */ + for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { + if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) { + + if (0 == ets_params->cos[cos_idx].params.bw_params.bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" + "was set to 0\n"); + return -EINVAL; + } + *total_bw += + ets_params->cos[cos_idx].params.bw_params.bw; + } + } + + /*Check taotl BW is valid */ + if ((100 != *total_bw) || (0 == *total_bw)) { + if (0 == *total_bw) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW" + "shouldn't be 0\n"); + return -EINVAL; + } + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be" + "100\n"); + /** + * We can handle a case whre the BW isn't 100 this can happen + * if the TC are joined. + */ + } + return 0; +} + +/****************************************************************************** +* Description: +* Invalidate all the sp_pri_to_cos. +*. +******************************************************************************/ +static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) +{ + u8 pri = 0; + for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++) + sp_pri_to_cos[pri] = DCBX_INVALID_COS; +} +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos. +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, + u8 *sp_pri_to_cos, const u8 pri, + const u8 cos_entry) +{ + struct bnx2x *bp = params->bp; + const u8 port = params->port; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter There can't be two COS's with" + "the same strict pri\n"); + return -EINVAL; + } + + if (pri > max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid" + "parameter Illegal strict priority\n"); + return -EINVAL; + } + + sp_pri_to_cos[pri] = cos_entry; + return 0; + +} + +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in +* the sp_pri_cli register. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, + const u8 pri_set, + const u8 pri_offset, + const u8 entry_size) +{ + u64 pri_cli_nig = 0; + pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size * + (pri_set + pri_offset)); + + return pri_cli_nig; +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for NIG. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) +{ + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + const u8 nig_cos_offset = 3; + const u8 nig_pri_offset = 3; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set, + nig_pri_offset, 4); + +} +/****************************************************************************** +* Description: +* Returns the correct value according to COS and priority in the +* sp_pri_cli register for PBF. +*. +******************************************************************************/ +static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) +{ + const u8 pbf_cos_offset = 0; + const u8 pbf_pri_offset = 0; + + return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set, + pbf_pri_offset, 3); + +} + +/****************************************************************************** +* Description: +* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers +* according to sp_pri_to_cos.(which COS has higher priority) +*. +******************************************************************************/ +static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, + u8 *sp_pri_to_cos) +{ + struct bnx2x *bp = params->bp; + u8 i = 0; + const u8 port = params->port; + /* MCP Dbg0 and dbg1 are always with higher strict pri*/ + u64 pri_cli_nig = 0x210; + u32 pri_cli_pbf = 0x0; + u8 pri_set = 0; + u8 pri_bitmask = 0; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + + u8 cos_bit_to_set = (1 << max_num_of_cos) - 1; + + /* Set all the strict priority first */ + for (i = 0; i < max_num_of_cos; i++) { + if (DCBX_INVALID_COS != sp_pri_to_cos[i]) { + if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid cos entry\n"); + return -EINVAL; + } + + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + sp_pri_to_cos[i], pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + sp_pri_to_cos[i], pri_set); + pri_bitmask = 1 << sp_pri_to_cos[i]; + /* COS is used remove it from bitmap.*/ + if (0 == (pri_bitmask & cos_bit_to_set)) { + DP(NETIF_MSG_LINK, + "bnx2x_ets_e3b0_sp_set_pri_cli_reg " + "invalid There can't be two COS's with" + " the same strict pri\n"); + return -EINVAL; + } + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + /* Set all the Non strict priority i= COS*/ + for (i = 0; i < max_num_of_cos; i++) { + pri_bitmask = 1 << i; + /* Check if COS was already used for SP */ + if (pri_bitmask & cos_bit_to_set) { + /* COS wasn't used for SP */ + pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig( + i, pri_set); + + pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf( + i, pri_set); + /* COS is used remove it from bitmap.*/ + cos_bit_to_set &= ~pri_bitmask; + pri_set++; + } + } + + if (pri_set != max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all " + "entries were set\n"); + return -EINVAL; + } + + if (port) { + /* Only 6 usable clients*/ + REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, + (u32)pri_cli_nig); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf); + } else { + /* Only 9 usable clients*/ + const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig); + const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF); + + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, + pri_cli_nig_lsb); + REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, + pri_cli_nig_msb); + + REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf); + } + return 0; +} + +/****************************************************************************** +* Description: +* Configure the COS to ETS according to BW and SP settings. +******************************************************************************/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + const struct bnx2x_ets_params *ets_params) +{ + struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + const u8 port = params->port; + u16 total_bw = 0; + const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars); + const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL; + u8 cos_bw_bitmap = 0; + u8 cos_sp_bitmap = 0; + u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0}; + const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : + DCBX_E3B0_MAX_NUM_COS_PORT0; + u8 cos_entry = 0; + + if (!CHIP_IS_E3B0(bp)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0" + "\n"); + return -EINVAL; + } + + if ((ets_params->num_of_cos > max_num_of_cos)) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS " + "isn't supported\n"); + return -EINVAL; + } + + /* Prepare sp strict priority parameters*/ + bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos); + + /* Prepare BW parameters*/ + bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params, + &total_bw); + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed " + "\n"); + return -EINVAL; + } + + /** + * Upper bound is set according to current link speed (min_w_val + * should be the same for upper bound and COS credit val). + */ + bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); + bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf); + + + for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { + if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { + cos_bw_bitmap |= (1 << cos_entry); + /** + * The function also sets the BW in HW(not the mappin + * yet) + */ + bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( + bp, cos_entry, min_w_val_nig, min_w_val_pbf, + total_bw, + ets_params->cos[cos_entry].params.bw_params.bw, + port); + } else if (bnx2x_cos_state_strict == + ets_params->cos[cos_entry].state){ + cos_sp_bitmap |= (1 << cos_entry); + + bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set( + params, + sp_pri_to_cos, + ets_params->cos[cos_entry].params.sp_params.pri, + cos_entry); + + } else { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not" + " valid\n"); + return -EINVAL; + } + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw " + "failed\n"); + return bnx2x_status; + } + } + + /* Set SP register (which COS has higher priority) */ + bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params, + sp_pri_to_cos); + + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg " + "failed\n"); + return bnx2x_status; + } + + /* Set client mapping of BW and strict */ + bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params, + cos_sp_bitmap, + cos_bw_bitmap); + + if (0 != bnx2x_status) { + DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n"); + return bnx2x_status; + } + return 0; +} static void bnx2x_ets_bw_limit_common(const struct link_params *params) { /* ETS disabled configuration */ @@ -342,7 +1255,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight); } -u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) { /* ETS disabled configuration*/ struct bnx2x *bp = params->bp; @@ -388,24 +1301,64 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /* PFC section */ /******************************************************************/ -static void bnx2x_bmac2_get_pfc_stat(struct link_params *params, - u32 pfc_frames_sent[2], - u32 pfc_frames_received[2]) +static void bnx2x_update_pfc_xmac(struct link_params *params, + struct link_vars *vars, + u8 is_lb) { - /* Read pfc statistic */ struct bnx2x *bp = params->bp; - u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; + u32 xmac_base; + u32 pause_val, pfc0_val, pfc1_val; - DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n"); + /* XMAC base adrr */ + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; - REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP, - pfc_frames_sent, 2); + /* Initialize pause and pfc registers */ + pause_val = 0x18000; + pfc0_val = 0xFFFF8000; + pfc1_val = 0x2; - REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP, - pfc_frames_received, 2); + /* No PFC support */ + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + /* + * RX flow control - Process pause frame in receive direction + */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; + + /* + * TX flow control - Send pause packet when buffer is full + */ + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; + } else {/* PFC support */ + pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN | + XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN | + XMAC_PFC_CTRL_HI_REG_RX_PFC_EN | + XMAC_PFC_CTRL_HI_REG_TX_PFC_EN; + } + + /* Write pause and PFC registers */ + REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val); + + + /* Set MAC address for source TX Pause/PFC frames */ + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + udelay(30); } + + static void bnx2x_emac_get_pfc_stat(struct link_params *params, u32 pfc_frames_sent[2], u32 pfc_frames_received[2]) @@ -437,33 +1390,54 @@ static void bnx2x_emac_get_pfc_stat(struct link_params *params, pfc_frames_sent[0] = val_xon + val_xoff; } +/* Read pfc statistic*/ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, u32 pfc_frames_sent[2], u32 pfc_frames_received[2]) { /* Read pfc statistic */ struct bnx2x *bp = params->bp; - u32 val = 0; + DP(NETIF_MSG_LINK, "pfc statistic\n"); if (!vars->link_up) return; - val = REG_RD(bp, MISC_REG_RESET_REG_2); - if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) - == 0) { - DP(NETIF_MSG_LINK, "About to read stats from EMAC\n"); + if (MAC_TYPE_EMAC == vars->mac_type) { + DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n"); bnx2x_emac_get_pfc_stat(params, pfc_frames_sent, pfc_frames_received); - } else { - DP(NETIF_MSG_LINK, "About to read stats from BMAC\n"); - bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent, - pfc_frames_received); } } /******************************************************************/ /* MAC/PBF section */ /******************************************************************/ +static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) +{ + u32 mode, emac_base; + /** + * Set clause 45 mode, slow down the MDIO clock to 2.5MHz + * (a value of 49==0x31) and make sure that the AUTO poll is off + */ + + if (CHIP_IS_E2(bp)) + emac_base = GRCBASE_EMAC0; + else + emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); + mode &= ~(EMAC_MDIO_MODE_AUTO_POLL | + EMAC_MDIO_MODE_CLOCK_CNT); + if (USES_WARPCORE(bp)) + mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + else + mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); + + mode |= (EMAC_MDIO_MODE_CLAUSE_45); + REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); + + udelay(40); +} + static void bnx2x_emac_init(struct link_params *params, struct link_vars *vars) { @@ -495,7 +1469,7 @@ static void bnx2x_emac_init(struct link_params *params, } timeout--; } while (val & EMAC_MODE_RESET); - + bnx2x_set_mdio_clk(bp, params->chip_id, port); /* Set mac address */ val = ((params->mac_addr[0] << 8) | params->mac_addr[1]); @@ -508,9 +1482,246 @@ static void bnx2x_emac_init(struct link_params *params, EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val); } -static u8 bnx2x_emac_enable(struct link_params *params, +static void bnx2x_set_xumac_nig(struct link_params *params, + u16 tx_pause_en, + u8 enable) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN, + enable); + REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN : + NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); +} + +static void bnx2x_umac_enable(struct link_params *params, struct link_vars *vars, u8 lb) { + u32 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + struct bnx2x *bp = params->bp; + /* Reset UMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); + + DP(NETIF_MSG_LINK, "enabling UMAC\n"); + + /** + * This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /** + * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK + + * params->port*0x14, 0xfffff. + */ + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN | + UMAC_COMMAND_CONFIG_REG_PAD_EN | + UMAC_COMMAND_CONFIG_REG_SW_RESET | + UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK; + switch (vars->line_speed) { + case SPEED_10: + val |= (0<<2); + break; + case SPEED_100: + val |= (1<<2); + break; + case SPEED_1000: + val |= (2<<2); + break; + case SPEED_2500: + val |= (3<<2); + break; + default: + DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n", + vars->line_speed); + break; + } + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, + ((params->mac_addr[2] << 24) | + (params->mac_addr[3] << 16) | + (params->mac_addr[4] << 8) | + (params->mac_addr[5]))); + REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1, + ((params->mac_addr[0] << 8) | + (params->mac_addr[1]))); + + /* Enable RX and TX */ + val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN; + val |= UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + udelay(50); + + /* Remove SW Reset */ + val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET; + + /* Check loopback mode */ + if (lb) + val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); + + /* + * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + vars->mac_type = MAC_TYPE_UMAC; + +} + +static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) +{ + u32 port4mode_ovwr_val; + /* Check 4-port override enabled */ + port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); + if (port4mode_ovwr_val & (1<<0)) { + /* Return 4-port mode override value */ + return ((port4mode_ovwr_val & (1<<1)) == (1<<1)); + } + /* Return 4-port mode from input pin */ + return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN); +} + +/* Define the XMAC mode */ +static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed) +{ + u32 is_port4mode = bnx2x_is_4_port_mode(bp); + + /** + * In 4-port mode, need to set the mode only once, so if XMAC is + * already out of reset, it means the mode has already been set, + * and it must not* reset the XMAC again, since it controls both + * ports of the path + **/ + + if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC)) { + DP(NETIF_MSG_LINK, "XMAC already out of reset" + " in 4-port mode\n"); + return; + } + + /* Hard reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC); + if (is_port4mode) { + DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); + + /* Set the number of ports on the system side to up to 2 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + /* Set the number of ports on the system side to 1 */ + REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); + if (max_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1" + " port per path\n"); + /* Set the number of ports on the Warp Core to 10G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); + } else { + DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports" + " per path\n"); + /* Set the number of ports on the Warp Core to 20G */ + REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1); + } + } + /* Soft reset */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + usleep_range(1000, 1000); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); + +} + +static void bnx2x_xmac_disable(struct link_params *params) +{ + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + if (REG_RD(bp, MISC_REG_RESET_REG_2) & + MISC_REGISTERS_RESET_REG_2_XMAC) { + DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); + usleep_range(1000, 1000); + bnx2x_set_xumac_nig(params, 0, 0); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, + XMAC_CTRL_REG_SOFT_RESET); + } +} + +static int bnx2x_xmac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ + u32 val, xmac_base; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "enabling XMAC\n"); + + xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + bnx2x_xmac_init(bp, vars->line_speed); + + /* + * This register determines on which events the MAC will assert + * error on the i/f to the NIG along w/ EOP. + */ + + /* + * This register tells the NIG whether to send traffic to UMAC + * or XMAC + */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); + + /* Set Max packet size */ + REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); + + /* CRC append for Tx packets */ + REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800); + + /* update PFC */ + bnx2x_update_pfc_xmac(params, vars, 0); + + /* Enable TX and RX */ + val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; + + /* Check loopback mode */ + if (lb) + val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK; + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); + bnx2x_set_xumac_nig(params, + ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1); + + vars->mac_type = MAC_TYPE_XMAC; + + return 0; +} +static int bnx2x_emac_enable(struct link_params *params, + struct link_vars *vars, u8 lb) +{ struct bnx2x *bp = params->bp; u8 port = params->port; u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; @@ -760,95 +1971,398 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); } -static void bnx2x_update_pfc_brb(struct link_params *params, - struct link_vars *vars, - struct bnx2x_nig_brb_pfc_port_params *pfc_params) + +/* PFC BRB internal port configuration params */ +struct bnx2x_pfc_brb_threshold_val { + u32 pause_xoff; + u32 pause_xon; + u32 full_xoff; + u32 full_xon; +}; + +struct bnx2x_pfc_brb_e3b0_val { + u32 full_lb_xoff_th; + u32 full_lb_xon_threshold; + u32 lb_guarantied; + u32 mac_0_class_t_guarantied; + u32 mac_0_class_t_guarantied_hyst; + u32 mac_1_class_t_guarantied; + u32 mac_1_class_t_guarantied_hyst; +}; + +struct bnx2x_pfc_brb_th_val { + struct bnx2x_pfc_brb_threshold_val pauseable_th; + struct bnx2x_pfc_brb_threshold_val non_pauseable_th; +}; +static int bnx2x_pfc_brb_get_config_params( + struct link_params *params, + struct bnx2x_pfc_brb_th_val *config_val) { struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n"); + if (CHIP_IS_E2(bp)) { + config_val->pauseable_th.pause_xoff = + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else if (CHIP_IS_E3A0(bp)) { + config_val->pauseable_th.pause_xoff = + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else if (CHIP_IS_E3B0(bp)) { + if (params->phy[INT_PHY].flags & + FLAGS_4_PORT_MODE) { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } else { + config_val->pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE; + config_val->pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE; + config_val->pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; + config_val->pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; + /* non pause able*/ + config_val->non_pauseable_th.pause_xoff = + PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.pause_xon = + PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xoff = + PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE; + config_val->non_pauseable_th.full_xon = + PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE; + } + } else + return -EINVAL; + + return 0; +} + + +static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params, + struct bnx2x_pfc_brb_e3b0_val + *e3b0_val, + u32 cos0_pauseable, + u32 cos1_pauseable) +{ + if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_4P_BRB_FULL_LB_XON_THR; + e3b0_val->lb_guarantied = + PFC_E3B0_4P_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST; + } else { + e3b0_val->full_lb_xoff_th = + PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR; + e3b0_val->full_lb_xon_threshold = + PFC_E3B0_2P_BRB_FULL_LB_XON_THR; + e3b0_val->mac_0_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST; + e3b0_val->mac_1_class_t_guarantied = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART; + e3b0_val->mac_1_class_t_guarantied_hyst = + PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST; + + if (cos0_pauseable != cos1_pauseable) { + /* nonpauseable= Lossy + pauseable = Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_MIX_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART; + } else if (cos0_pauseable) { + /* Lossless +Lossless*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART; + } else { + /* Lossy +Lossy*/ + e3b0_val->lb_guarantied = + PFC_E3B0_2P_NON_PAUSE_LB_GUART; + e3b0_val->mac_0_class_t_guarantied = + PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART; + } + } +} +static int bnx2x_update_pfc_brb(struct link_params *params, + struct link_vars *vars, + struct bnx2x_nig_brb_pfc_port_params + *pfc_params) +{ + struct bnx2x *bp = params->bp; + struct bnx2x_pfc_brb_th_val config_val = { {0} }; + struct bnx2x_pfc_brb_threshold_val *reg_th_config = + &config_val.pauseable_th; + struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0}; int set_pfc = params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED; + int bnx2x_status = 0; + u8 port = params->port; /* default - pause configuration */ - u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE; - u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE; - u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE; - u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE; + reg_th_config = &config_val.pauseable_th; + bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val); + if (0 != bnx2x_status) + return bnx2x_status; if (set_pfc && pfc_params) /* First COS */ - if (!pfc_params->cos0_pauseable) { - pause_xoff_th = - PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE; - pause_xon_th = - PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE; - full_xoff_th = - PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE; - full_xon_th = - PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; - } + if (!pfc_params->cos0_pauseable) + reg_th_config = &config_val.non_pauseable_th; /* * The number of free blocks below which the pause signal to class 0 * of MAC #n is asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); + REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , + reg_th_config->pause_xoff); /* * The number of free blocks above which the pause signal to class 0 * of MAC #n is de-asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); + REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); /* * The number of free blocks below which the full signal to class 0 * of MAC #n is asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); + REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); /* * The number of free blocks above which the full signal to class 0 * of MAC #n is de-asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); + REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : + BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon); if (set_pfc && pfc_params) { /* Second COS */ - if (pfc_params->cos1_pauseable) { - pause_xoff_th = - PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE; - pause_xon_th = - PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE; - full_xoff_th = - PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE; - full_xon_th = - PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE; - } else { - pause_xoff_th = - PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE; - pause_xon_th = - PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE; - full_xoff_th = - PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE; - full_xon_th = - PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; - } + if (pfc_params->cos1_pauseable) + reg_th_config = &config_val.pauseable_th; + else + reg_th_config = &config_val.non_pauseable_th; /* * The number of free blocks below which the pause signal to * class 1 of MAC #n is asserted. n=0,1 - */ - REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); + **/ + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, + reg_th_config->pause_xoff); /* * The number of free blocks above which the pause signal to * class 1 of MAC #n is de-asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); + REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : + BRB1_REG_PAUSE_1_XON_THRESHOLD_0, + reg_th_config->pause_xon); /* * The number of free blocks below which the full signal to * class 1 of MAC #n is asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : + BRB1_REG_FULL_1_XOFF_THRESHOLD_0, + reg_th_config->full_xoff); /* * The number of free blocks above which the full signal to * class 1 of MAC #n is de-asserted. n=0,1 */ - REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); + REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : + BRB1_REG_FULL_1_XON_THRESHOLD_0, + reg_th_config->full_xon); + + + if (CHIP_IS_E3B0(bp)) { + /*Should be done by init tool */ + /* + * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD + * reset value + * 944 + */ + + /** + * The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. + **/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80); + + bnx2x_pfc_brb_get_e3b0_config_params( + params, + &e3b0_val, + pfc_params->cos0_pauseable, + pfc_params->cos1_pauseable); + /** + * The number of free blocks below which the full signal to the + * LB port is asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, + e3b0_val.full_lb_xoff_th); + /** + * The number of free blocks above which the full signal to the + * LB port is de-asserted. + */ + REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, + e3b0_val.full_lb_xon_threshold); + /** + * The number of blocks guarantied for the MAC #n port. n=0,1 + */ + + /*The number of blocks guarantied for the LB port.*/ + REG_WR(bp, BRB1_REG_LB_GUARANTIED, + e3b0_val.lb_guarantied); + + /** + * The number of blocks guarantied for the MAC #n port. + */ + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, + 2 * e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, + 2 * e3b0_val.mac_1_class_t_guarantied); + /** + * The number of blocks guarantied for class #t in MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, + e3b0_val.mac_0_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class in + * MAC0. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_0_class_t_guarantied_hyst); + + /** + * The number of blocks guarantied for class #t in MAC1.t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, + e3b0_val.mac_1_class_t_guarantied); + /** + * The hysteresis on the guarantied buffer space for class #t + * in MAC1. t=0,1 + */ + REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST, + e3b0_val.mac_1_class_t_guarantied_hyst); + + } + + } + + return bnx2x_status; +} + +/****************************************************************************** +* Description: +* This function is needed because NIG ARB_CREDIT_WEIGHT_X are +* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. +******************************************************************************/ +int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) +{ + u32 nig_reg_rx_priority_mask_add = 0; + + switch (cos_entry) { + case 0: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS0_PRIORITY_MASK : + NIG_REG_P0_RX_COS0_PRIORITY_MASK; + break; + case 1: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS1_PRIORITY_MASK : + NIG_REG_P0_RX_COS1_PRIORITY_MASK; + break; + case 2: + nig_reg_rx_priority_mask_add = (port) ? + NIG_REG_P1_RX_COS2_PRIORITY_MASK : + NIG_REG_P0_RX_COS2_PRIORITY_MASK; + break; + case 3: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK; + break; + case 4: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK; + break; + case 5: + if (port) + return -EINVAL; + nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK; + break; } + + REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask); + + return 0; +} +static void bnx2x_update_mng(struct link_params *params, u32 link_status) +{ + struct bnx2x *bp = params->bp; + + REG_WR(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); } static void bnx2x_update_pfc_nig(struct link_params *params, @@ -858,9 +2372,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0; u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0; u32 pkt_priority_to_cos = 0; - u32 val; struct bnx2x *bp = params->bp; - int port = params->port; + u8 port = params->port; + int set_pfc = params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED; DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); @@ -881,6 +2395,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, pause_enable = 0; llfc_out_en = 0; llfc_enable = 0; + if (CHIP_IS_E3(bp)) + ppp_enable = 0; + else ppp_enable = 1; xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); @@ -899,6 +2416,9 @@ static void bnx2x_update_pfc_nig(struct link_params *params, xcm0_out_en = 1; } + if (CHIP_IS_E3(bp)) + REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN : + NIG_REG_BRB0_PAUSE_IN_EN, pause_enable); REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 : NIG_REG_LLFC_OUT_EN_0, llfc_out_en); REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 : @@ -920,30 +2440,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params, /* HW PFC TX enable */ REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable); - /* 0x2 = BMAC, 0x1= EMAC */ - switch (vars->mac_type) { - case MAC_TYPE_EMAC: - val = 1; - break; - case MAC_TYPE_BMAC: - val = 0; - break; - default: - val = 0; - break; - } - REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val); - if (nig_params) { + u8 i = 0; pkt_priority_to_cos = nig_params->pkt_priority_to_cos; - REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK : - NIG_REG_P0_RX_COS0_PRIORITY_MASK, - nig_params->rx_cos0_priority_mask); - - REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK : - NIG_REG_P0_RX_COS1_PRIORITY_MASK, - nig_params->rx_cos1_priority_mask); + for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++) + bnx2x_pfc_nig_rx_priority_mask(bp, i, + nig_params->rx_cos_priority_mask[i], port); REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 : NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0, @@ -958,8 +2461,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, pkt_priority_to_cos); } - -void bnx2x_update_pfc(struct link_params *params, +int bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params) { @@ -970,41 +2472,59 @@ void bnx2x_update_pfc(struct link_params *params, */ u32 val; struct bnx2x *bp = params->bp; + int bnx2x_status = 0; + u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC); + + if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) + vars->link_status |= LINK_STATUS_PFC_ENABLED; + else + vars->link_status &= ~LINK_STATUS_PFC_ENABLED; + + bnx2x_update_mng(params, vars->link_status); /* update NIG params */ bnx2x_update_pfc_nig(params, vars, pfc_params); /* update BRB params */ - bnx2x_update_pfc_brb(params, vars, pfc_params); + bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); + if (0 != bnx2x_status) + return bnx2x_status; if (!vars->link_up) - return; - - val = REG_RD(bp, MISC_REG_RESET_REG_2); - if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) - == 0) { - DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); - bnx2x_emac_enable(params, vars, 0); - return; - } + return bnx2x_status; DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); - if (CHIP_IS_E2(bp)) - bnx2x_update_pfc_bmac2(params, vars, 0); - else - bnx2x_update_pfc_bmac1(params, vars); + if (CHIP_IS_E3(bp)) + bnx2x_update_pfc_xmac(params, vars, 0); + else { + val = REG_RD(bp, MISC_REG_RESET_REG_2); + if ((val & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) + == 0) { + DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n"); + bnx2x_emac_enable(params, vars, 0); + return bnx2x_status; + } - val = 0; - if ((params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED) || - (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) - val = 1; - REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); + if (CHIP_IS_E2(bp)) + bnx2x_update_pfc_bmac2(params, vars, bmac_loopback); + else + bnx2x_update_pfc_bmac1(params, vars); + + val = 0; + if ((params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED) || + (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val = 1; + REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val); + } + return bnx2x_status; } -static u8 bnx2x_bmac1_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) + +static int bnx2x_bmac1_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1063,12 +2583,18 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, wb_data, 2); + if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { + REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS, + wb_data, 2); + if (wb_data[0] > 0) + return -ESRCH; + } return 0; } -static u8 bnx2x_bmac2_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) +static int bnx2x_bmac2_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1128,14 +2654,25 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, udelay(30); bnx2x_update_pfc_bmac2(params, vars, is_lb); + if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { + REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT, + wb_data, 2); + if (wb_data[0] > 0) { + DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n", + wb_data[0]); + return -ESRCH; + } + } + return 0; } -static u8 bnx2x_bmac_enable(struct link_params *params, - struct link_vars *vars, - u8 is_lb) +static int bnx2x_bmac_enable(struct link_params *params, + struct link_vars *vars, + u8 is_lb) { - u8 rc, port = params->port; + int rc = 0; + u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; /* reset and unreset the BigMac */ @@ -1173,16 +2710,6 @@ static u8 bnx2x_bmac_enable(struct link_params *params, return rc; } - -static void bnx2x_update_mng(struct link_params *params, u32 link_status) -{ - struct bnx2x *bp = params->bp; - - REG_WR(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[params->port].link_status), link_status); -} - static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : @@ -1218,8 +2745,8 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) } } -static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, - u32 line_speed) +static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, + u32 line_speed) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1269,18 +2796,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, case SPEED_10000: init_crd = thresh + 553 - 22; break; - - case SPEED_12000: - init_crd = thresh + 664 - 22; - break; - - case SPEED_13000: - init_crd = thresh + 742 - 22; - break; - - case SPEED_16000: - init_crd = thresh + 778 - 22; - break; default: DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", line_speed); @@ -1349,31 +2864,23 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp, } /******************************************************************/ -/* CL45 access functions */ +/* CL22 access functions */ /******************************************************************/ -static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 val) +static int bnx2x_cl22_write(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 val) { - u32 tmp, saved_mode; - u8 i, rc = 0; - /* - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz - * (a value of 49==0x31) and make sure that the AUTO poll is off - */ - - saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - tmp = saved_mode & ~(EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT); - tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | - (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); - REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - udelay(40); + u32 tmp, mode; + u8 i; + int rc = 0; + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); /* address */ - - tmp = ((phy->addr << 21) | (devad << 16) | reg | - EMAC_MDIO_COMM_COMMAND_ADDRESS | + tmp = ((phy->addr << 21) | (reg << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY); REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); @@ -1388,57 +2895,60 @@ static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, } if (tmp & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; - } else { - /* data */ - tmp = ((phy->addr << 21) | (devad << 16) | val | - EMAC_MDIO_COMM_COMMAND_WRITE_45 | - EMAC_MDIO_COMM_START_BUSY); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); + return rc; +} - for (i = 0; i < 50; i++) { - udelay(10); +static int bnx2x_cl22_read(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 reg, u16 *ret_val) +{ + u32 val, mode; + u16 i; + int rc = 0; - tmp = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); - if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { - udelay(5); - break; - } - } - if (tmp & EMAC_MDIO_COMM_START_BUSY) { - DP(NETIF_MSG_LINK, "write phy register failed\n"); - netdev_err(bp->dev, "MDC/MDIO access timeout\n"); - rc = -EFAULT; + /* Switch to CL22 */ + mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, + mode & ~EMAC_MDIO_MODE_CLAUSE_45); + + /* address */ + val = ((phy->addr << 21) | (reg << 16) | + EMAC_MDIO_COMM_COMMAND_READ_22 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); + + for (i = 0; i < 50; i++) { + udelay(10); + + val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(val & EMAC_MDIO_COMM_START_BUSY)) { + *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); + udelay(5); + break; } } + if (val & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "read phy register failed\n"); - /* Restore the saved mode */ - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); - + *ret_val = 0; + rc = -EFAULT; + } + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode); return rc; } -static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 *ret_val) +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val) { - u32 val, saved_mode; + u32 val; u16 i; - u8 rc = 0; - /* - * Set clause 45 mode, slow down the MDIO clock to 2.5MHz - * (a value of 49==0x31) and make sure that the AUTO poll is off - */ - - saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT)); - val |= (EMAC_MDIO_MODE_CLAUSE_45 | - (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); - REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); - udelay(40); + int rc = 0; /* address */ val = ((phy->addr << 21) | (devad << 16) | reg | @@ -1460,7 +2970,6 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, netdev_err(bp->dev, "MDC/MDIO access timeout\n"); *ret_val = 0; rc = -EFAULT; - } else { /* data */ val = ((phy->addr << 21) | (devad << 16) | @@ -1485,15 +2994,214 @@ static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, rc = -EFAULT; } } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + + return rc; +} + +static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val) +{ + u32 tmp; + u8 i; + int rc = 0; + + /* address */ + + tmp = ((phy->addr << 21) | (devad << 16) | reg | + EMAC_MDIO_COMM_COMMAND_ADDRESS | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + + } else { + /* data */ + tmp = ((phy->addr << 21) | (devad << 16) | val | + EMAC_MDIO_COMM_COMMAND_WRITE_45 | + EMAC_MDIO_COMM_START_BUSY); + REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); + + for (i = 0; i < 50; i++) { + udelay(10); + + tmp = REG_RD(bp, phy->mdio_ctrl + + EMAC_REG_EMAC_MDIO_COMM); + if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { + udelay(5); + break; + } + } + if (tmp & EMAC_MDIO_COMM_START_BUSY) { + DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); + rc = -EFAULT; + } + } + /* Work around for E3 A0 */ + if (phy->flags & FLAGS_MDC_MDIO_WA) { + phy->flags ^= FLAGS_DUMMY_READ; + if (phy->flags & FLAGS_DUMMY_READ) { + u16 temp_val; + bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); + } + } + + return rc; +} + - /* Restore the saved mode */ - REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, saved_mode); +/******************************************************************/ +/* BSC access functions from E3 */ +/******************************************************************/ +static void bnx2x_bsc_module_sel(struct link_params *params) +{ + int idx; + u32 board_cfg, sfp_ctrl; + u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH]; + struct bnx2x *bp = params->bp; + u8 port = params->port; + /* Read I2C output PINs */ + board_cfg = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.shared_hw_config.board)); + i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK; + i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >> + SHARED_HW_CFG_E3_I2C_MUX1_SHIFT; + + /* Read I2C output value */ + sfp_ctrl = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)); + i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0; + i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0; + DP(NETIF_MSG_LINK, "Setting BSC switch\n"); + for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++) + bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]); +} + +static int bnx2x_bsc_read(struct link_params *params, + struct bnx2x_phy *phy, + u8 sl_devid, + u16 sl_addr, + u8 lc_addr, + u8 xfer_cnt, + u32 *data_array) +{ + u32 val, i; + int rc = 0; + struct bnx2x *bp = params->bp; + + if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) { + DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid); + return -EINVAL; + } + + if (xfer_cnt > 16) { + DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", + xfer_cnt); + return -EINVAL; + } + bnx2x_bsc_module_sel(params); + + xfer_cnt = 16 - lc_addr; + + /* enable the engine */ + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + val |= MCPR_IMC_COMMAND_ENABLE; + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* program slave device ID */ + val = (sl_devid << 16) | sl_addr; + REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); + + /* start xfer with 0 byte to update the address pointer ???*/ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_WRITE_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n", + i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + + /* start xfer with read op */ + val = (MCPR_IMC_COMMAND_ENABLE) | + (MCPR_IMC_COMMAND_READ_OP << + MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | + (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | + (xfer_cnt); + REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); + + /* poll for completion */ + i = 0; + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { + udelay(10); + val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); + if (i++ > 1000) { + DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i); + rc = -EFAULT; + break; + } + } + if (rc == -EFAULT) + return rc; + for (i = (lc_addr >> 2); i < 4; i++) { + data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4)); +#ifdef __BIG_ENDIAN + data_array[i] = ((data_array[i] & 0x000000ff) << 24) | + ((data_array[i] & 0x0000ff00) << 8) | + ((data_array[i] & 0x00ff0000) >> 8) | + ((data_array[i] & 0xff000000) >> 24); +#endif + } return rc; } -u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 *ret_val) +static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 or_val) +{ + u16 val; + bnx2x_cl45_read(bp, phy, devad, reg, &val); + bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); +} + +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val) { u8 phy_index; /* @@ -1510,8 +3218,8 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, return -EINVAL; } -u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 val) +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val) { u8 phy_index; /* @@ -1527,9 +3235,62 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, } return -EINVAL; } +static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, + struct link_params *params) +{ + u8 lane = 0; + struct bnx2x *bp = params->bp; + u32 path_swap, path_swap_ovr; + u8 path, port; + + path = BP_PATH(bp); + port = params->port; + + if (bnx2x_is_4_port_mode(bp)) { + u32 port_swap, port_swap_ovr; + + /*figure out path swap value */ + path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) + path_swap = (path_swap_ovr & 0x2); + else + path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP); + + if (path_swap) + path = path ^ 1; -static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, - struct bnx2x_phy *phy) + /*figure out port swap value */ + port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); + if (port_swap_ovr & 0x1) + port_swap = (port_swap_ovr & 0x2); + else + port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP); + + if (port_swap) + port = port ^ 1; + + lane = (port<<1) + path; + } else { /* two port mode - no port swap */ + + /*figure out path swap value */ + path_swap_ovr = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); + if (path_swap_ovr & 0x1) { + path_swap = (path_swap_ovr & 0x2); + } else { + path_swap = + REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP); + } + if (path_swap) + path = path ^ 1; + + lane = path << 1 ; + } + return lane; +} + +static void bnx2x_set_aer_mmd(struct link_params *params, + struct bnx2x_phy *phy) { u32 ser_lane; u16 offset, aer_val; @@ -1538,20 +3299,28 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - offset = phy->addr + ser_lane; - if (CHIP_IS_E2(bp)) + offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ? + (phy->addr + ser_lane) : 0; + + if (USES_WARPCORE(bp)) { + aer_val = bnx2x_get_warpcore_lane(phy, params); + /* + * In Dual-lane mode, two lanes are joined together, + * so in order to configure them, the AER broadcast method is + * used here. + * 0x200 is the broadcast address for lanes 0,1 + * 0x201 is the broadcast address for lanes 2,3 + */ + if (phy->flags & FLAGS_WC_DUAL_MODE) + aer_val = (aer_val >> 1) | 0x200; + } else if (CHIP_IS_E2(bp)) aer_val = 0x3800 + offset - 1; else aer_val = 0x3800 + offset; + DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val); CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, aer_val); -} -static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, - struct bnx2x_phy *phy) -{ - CL22_WR_OVER_CL45(bp, phy, - MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0x3800); + } /******************************************************************/ @@ -1611,20 +3380,979 @@ static void bnx2x_xgxs_deassert(struct link_params *params) params->phy[INT_PHY].def_md_devad); } +static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, + struct link_params *params, u16 *ieee_fc) +{ + struct bnx2x *bp = params->bp; + *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; + /** + * resolve pause mode and advertisement Please refer to Table + * 28B-3 of the 802.3ab-1999 spec + */ + + switch (phy->req_flow_ctrl) { + case BNX2X_FLOW_CTRL_AUTO: + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + else + *ieee_fc |= + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case BNX2X_FLOW_CTRL_TX: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + break; + + case BNX2X_FLOW_CTRL_RX: + case BNX2X_FLOW_CTRL_BOTH: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + break; + + case BNX2X_FLOW_CTRL_NONE: + default: + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; + break; + } + DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); +} + +static void set_phy_vars(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 actual_phy_idx, phy_index, link_cfg_idx; + u8 phy_config_swapped = params->multi_phy_config & + PORT_HW_CFG_PHY_SWAPPED_ENABLED; + for (phy_index = INT_PHY; phy_index < params->num_phys; + phy_index++) { + link_cfg_idx = LINK_CONFIG_IDX(phy_index); + actual_phy_idx = phy_index; + if (phy_config_swapped) { + if (phy_index == EXT_PHY1) + actual_phy_idx = EXT_PHY2; + else if (phy_index == EXT_PHY2) + actual_phy_idx = EXT_PHY1; + } + params->phy[actual_phy_idx].req_flow_ctrl = + params->req_flow_ctrl[link_cfg_idx]; + + params->phy[actual_phy_idx].req_line_speed = + params->req_line_speed[link_cfg_idx]; + + params->phy[actual_phy_idx].speed_cap_mask = + params->speed_cap_mask[link_cfg_idx]; + + params->phy[actual_phy_idx].req_duplex = + params->req_duplex[link_cfg_idx]; + + if (params->req_line_speed[link_cfg_idx] == + SPEED_AUTO_NEG) + vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; + + DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," + " speed_cap_mask %x\n", + params->phy[actual_phy_idx].req_flow_ctrl, + params->phy[actual_phy_idx].req_line_speed, + params->phy[actual_phy_idx].speed_cap_mask); + } +} + +static void bnx2x_ext_phy_set_pause(struct link_params *params, + struct bnx2x_phy *phy, + struct link_vars *vars) +{ + u16 val; + struct bnx2x *bp = params->bp; + /* read modify write pause advertizing */ + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); + + val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; + + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + } + if ((vars->ieee_fc & + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { + val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + } + DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); +} + +static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) +{ /* LD LP */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; + break; + + case 0xe: /* 1 1 1 0 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; + break; + + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ + vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + break; + + default: + break; + } + if (pause_result & (1<<0)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; + if (pause_result & (1<<1)) + vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; +} + +static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 ld_pause; /* local */ + u16 lp_pause; /* link partner */ + u16 pause_result; + u8 ret = 0; + /* read twice */ + + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + + if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) + vars->flow_ctrl = phy->req_flow_ctrl; + else if (phy->req_line_speed != SPEED_AUTO_NEG) + vars->flow_ctrl = params->req_fc_auto_adv; + else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { + ret = 1; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) { + bnx2x_cl22_read(bp, phy, + 0x4, &ld_pause); + bnx2x_cl22_read(bp, phy, + 0x5, &lp_pause); + } else { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + } + pause_result = (ld_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; + pause_result |= (lp_pause & + MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; + DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", + pause_result); + bnx2x_pause_resolve(vars, pause_result); + } + return ret; +} +/******************************************************************/ +/* Warpcore section */ +/******************************************************************/ +/* The init_internal_warpcore should mirror the xgxs, + * i.e. reset the lane (if needed), set aer for the + * init configuration, and set/clear SGMII flag. Internal + * phy init is done purely in phy_init stage. + */ +static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { + u16 val16 = 0, lane, bam37 = 0; + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); + /* Check adding advertisement for 1G KX */ + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (vars->line_speed == SPEED_1000)) { + u16 sd_digital; + val16 |= (1<<5); + + /* Enable CL37 1G Parallel Detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (sd_digital | 0x1)); + + DP(NETIF_MSG_LINK, "Advertize 1G\n"); + } + if (((vars->line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || + (vars->line_speed == SPEED_10000)) { + /* Check adding advertisement for 10G KR */ + val16 |= (1<<7); + /* Enable 10G Parallel Detect */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + + DP(NETIF_MSG_LINK, "Advertize 10G\n"); + } + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, + 0x03f0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, + 0x03f0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + 0x383f); + + /* Advertised speeds */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); + + /* Enable CL37 BAM */ + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } + + /* Advertise pause */ + bnx2x_ext_phy_set_pause(params, phy, vars); + + /* Enable Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000); + + /* Over 1G - AN local device user page 1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1f); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, &val16); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); +} + +static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val; + + /* Disable Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0xa); + + /* Disable CL36 PCS Tx */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); + + /* Double Wide Single Data Rate @ pll rate */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); + + /* Leave cl72 training enable, needed for KR */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, + 0x2); + + /* Leave CL72 enabled */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + val | 0x3800); + + /* Set speed via PMA/PMD register */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); + + /*Enable encoded forced speed */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); + + /* Turn TX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX66_CONTROL, 0x9); + + /* Turn RX scramble payload only the 64/66 scrambler */ + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0xF9); + + /* set and clear loopback to cause a reset to 64/66 decoder */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + +} + +static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, + struct link_params *params, + u8 is_xfi) +{ + struct bnx2x *bp = params->bp; + u16 misc1_val, tap_val, tx_driver_val, lane, val; + /* Hold rxSeqStart */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); + + /* Hold tx_fifo_reset */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); + + /* Disable CL73 AN */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + + /* Disable 100FX Enable and Auto-Detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); + + /* Disable 100FX Idle detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); + + /* Set Block address to Remote PHY & Clear forced_speed[5] */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F)); + + /* Turn off auto-detect & fiber mode */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (val & 0xFFEE)); + + /* Set filter_force_link, disable_false_link and parallel_detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + ((val | 0x0006) & 0xFFFE)); + + /* Set XFI / SFI */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val); + + misc1_val &= ~(0x1f); + + if (is_xfi) { + misc1_val |= 0x5; + tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); + tx_driver_val = + ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + + } else { + misc1_val |= 0x9; + tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); + tx_driver_val = + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); + } + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); + + /* Set Transmit PMD settings */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + tx_driver_val); + + /* Enable fiber mode, enable and invert sig_det */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); + + /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); + + /* 10G XFI Full Duplex */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); + + /* Release tx_fifo_reset */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); + + /* Release rxSeqStart */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); +} + +static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, + struct bnx2x_phy *phy) +{ + DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); +} + +static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 lane) +{ + /* Rx0 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90); + + /* Rx2 anaRxControl1G */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0, 0xE070); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1, 0xC0D0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2, 0xA0B0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3, 0x8090); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0); + + /* Serdes Digital Misc1 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008); + + /* Serdes Digital4 Misc3 */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8088); + + /* Set Transmit PMD settings */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, + ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | + (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | + (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) | + MDIO_WC_REG_TX_FIR_TAP_ENABLE)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, + ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | + (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); +} + +static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, + struct link_params *params, + u8 fiber_mode) +{ + struct bnx2x *bp = params->bp; + u16 val16, digctrl_kx1, digctrl_kx2; + u8 lane; + + lane = bnx2x_get_warpcore_lane(phy, params); + + /* Clear XFI clock comp in non-10G single lane mode. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); + + if (phy->req_line_speed == SPEED_AUTO_NEG) { + /* SGMII Autoneg */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, + val16 | 0x1000); + DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + val16 &= 0xcfbf; + switch (phy->req_line_speed) { + case SPEED_10: + break; + case SPEED_100: + val16 |= 0x2000; + break; + case SPEED_1000: + val16 |= 0x0040; + break; + default: + DP(NETIF_MSG_LINK, "Speed not supported: 0x%x" + "\n", phy->req_line_speed); + return; + } + + if (phy->req_duplex == DUPLEX_FULL) + val16 |= 0x0100; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16); + + DP(NETIF_MSG_LINK, "set SGMII force speed %d\n", + phy->req_line_speed); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + DP(NETIF_MSG_LINK, " (readback) %x\n", val16); + } + + /* SGMII Slave mode and disable signal detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1); + if (fiber_mode) + digctrl_kx1 = 1; + else + digctrl_kx1 &= 0xff4a; + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + digctrl_kx1); + + /* Turn off parallel detect */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 & ~(1<<2))); + + /* Re-enable parallel detect */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (digctrl_kx2 | (1<<2))); + + /* Enable autodet */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, + (digctrl_kx1 | 0x10)); +} + +static void bnx2x_warpcore_reset_lane(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 reset) +{ + u16 val; + /* Take lane out of reset after configuration is finished */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); + if (reset) + val |= 0xC000; + else + val &= 0x3FFF; + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, val); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC6, &val); +} + + + /* Clear SFI/XFI link settings registers */ +static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, + struct link_params *params, + u16 lane) +{ + struct bnx2x *bp = params->bp; + u16 val16; + + /* Set XFI clock comp as default. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); + + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0x014a); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0800); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); + bnx2x_warpcore_reset_lane(bp, phy, 0); +} + +static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, + u32 chip_id, + u32 shmem_base, u8 port, + u8 *gpio_num, u8 *gpio_port) +{ + u32 cfg_pin; + *gpio_num = 0; + *gpio_port = 0; + if (CHIP_IS_E3(bp)) { + cfg_pin = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_MOD_ABS_MASK) >> + PORT_HW_CFG_E3_MOD_ABS_SHIFT; + + /* + * Should not happen. This function called upon interrupt + * triggered by GPIO ( since EPIO can only generate interrupts + * to MCP). + * So if this function was called and none of the GPIOs was set, + * it means the shit hit the fan. + */ + if ((cfg_pin < PIN_CFG_GPIO0_P0) || + (cfg_pin > PIN_CFG_GPIO3_P1)) { + DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for " + "module detect indication\n", + cfg_pin); + return -EINVAL; + } + + *gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3; + *gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2; + } else { + *gpio_num = MISC_REGISTERS_GPIO_3; + *gpio_port = port; + } + DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); + return 0; +} + +static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 gpio_num, gpio_port; + u32 gpio_val; + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, + params->shmem_base, params->port, + &gpio_num, &gpio_port) != 0) + return 0; + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); + + /* Call the handling function in case module is detected */ + if (gpio_val == 0) + return 1; + else + return 0; +} + +static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 serdes_net_if; + u8 fiber_mode; + u16 lane = bnx2x_get_warpcore_lane(phy, params); + serdes_net_if = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, " + "serdes_net_if = 0x%x\n", + vars->line_speed, serdes_net_if); + bnx2x_set_aer_mmd(params, phy); + + vars->phy_flags |= PHY_XGXS_FLAG; + if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || + (phy->req_line_speed && + ((phy->req_line_speed == SPEED_100) || + (phy->req_line_speed == SPEED_10)))) { + vars->phy_flags |= PHY_SGMII_FLAG; + DP(NETIF_MSG_LINK, "Setting SGMII mode\n"); + bnx2x_warpcore_clear_regs(phy, params, lane); + bnx2x_warpcore_set_sgmii_speed(phy, params, 0); + } else { + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_KR: + /* Enable KR Auto Neg */ + if (params->loopback_mode == LOOPBACK_NONE) + bnx2x_warpcore_enable_AN_KR(phy, params, vars); + else { + DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n"); + bnx2x_warpcore_set_10G_KR(phy, params, vars); + } + break; + + case PORT_HW_CFG_NET_SERDES_IF_XFI: + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G XFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 1); + } else { + if (SINGLE_MEDIA_DIRECT(params)) { + DP(NETIF_MSG_LINK, "1G Fiber\n"); + fiber_mode = 1; + } else { + DP(NETIF_MSG_LINK, "10/100/1G SGMII\n"); + fiber_mode = 0; + } + bnx2x_warpcore_set_sgmii_speed(phy, + params, + fiber_mode); + } + + break; + + case PORT_HW_CFG_NET_SERDES_IF_SFI: + + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else if (vars->line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed(phy, params, 1); + } + /* Issue Module detection */ + if (bnx2x_is_sfp_module_plugged(phy, params)) + bnx2x_sfp_module_detection(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + if (vars->line_speed != SPEED_20000) { + DP(NETIF_MSG_LINK, "Speed not supported yet\n"); + return; + } + DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n"); + bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane); + /* Issue Module detection */ + + bnx2x_sfp_module_detection(phy, params); + break; + + case PORT_HW_CFG_NET_SERDES_IF_KR2: + if (vars->line_speed != SPEED_20000) { + DP(NETIF_MSG_LINK, "Speed not supported yet\n"); + return; + } + DP(NETIF_MSG_LINK, "Setting 20G KR2\n"); + bnx2x_warpcore_set_20G_KR2(bp, phy); + break; + + default: + DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface " + "0x%x\n", serdes_net_if); + return; + } + } + + /* Take lane out of reset after configuration is finished */ + bnx2x_warpcore_reset_lane(bp, phy, 0); + DP(NETIF_MSG_LINK, "Exit config init\n"); +} + +static void bnx2x_sfp_e3_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + + cfg_pin = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + /* Set the !tx_en since this pin is DISABLE_TX_LASER */ + DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en); + /* For 20G, the expected pin to be used is 3 pins after the current */ + + bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1); + if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) + bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1); +} + +static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16; + bnx2x_sfp_e3_set_transmitter(params, phy, 0); + bnx2x_set_mdio_clk(bp, params->chip_id, params->port); + bnx2x_set_aer_mmd(params, phy); + /* Global register */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + + /* Clear loopback settings (if any) */ + /* 10G & 20G */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 & + 0xBFFF); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe); + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + val16 & ~0x10); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16 & 0xff00); + +} + +static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val16; + u32 lane; + DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", + params->loopback_mode, phy->req_line_speed); + + if (phy->req_line_speed < SPEED_10000) { + /* 10/100/1000 */ + + /* Update those 1-copy registers */ + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0); + /* Enable 1G MDIO (1-copy) */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + val16 | 0x10); + /* Set 1G loopback based on lane (1-copy) */ + lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL2, + val16 | (1<<lane)); + + /* Switch back to 4-copy registers */ + bnx2x_set_aer_mmd(params, phy); + /* Global loopback, not recommended. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | + 0x4000); + } else { + /* 10G & 20G */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | + 0x4000); + + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1); + } +} + void bnx2x_link_status_update(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u8 link_10g; + u8 link_10g_plus; u8 port = params->port; + u32 sync_offset, media_types; + /* Update PHY configuration */ + set_phy_vars(params, vars); vars->link_status = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, port_mb[port].link_status)); vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); - + vars->phy_flags = PHY_XGXS_FLAG; if (vars->link_up) { DP(NETIF_MSG_LINK, "phy link up\n"); @@ -1664,27 +4392,9 @@ void bnx2x_link_status_update(struct link_params *params, case LINK_10GTFD: vars->line_speed = SPEED_10000; break; - - case LINK_12GTFD: - vars->line_speed = SPEED_12000; - break; - - case LINK_12_5GTFD: - vars->line_speed = SPEED_12500; - break; - - case LINK_13GTFD: - vars->line_speed = SPEED_13000; + case LINK_20GTFD: + vars->line_speed = SPEED_20000; break; - - case LINK_15GTFD: - vars->line_speed = SPEED_15000; - break; - - case LINK_16GTFD: - vars->line_speed = SPEED_16000; - break; - default: break; } @@ -1705,19 +4415,24 @@ void bnx2x_link_status_update(struct link_params *params, } else { vars->phy_flags &= ~PHY_SGMII_FLAG; } - + if (vars->line_speed && + USES_WARPCORE(bp) && + (vars->line_speed == SPEED_1000)) + vars->phy_flags |= PHY_SGMII_FLAG; /* anything 10 and over uses the bmac */ - link_10g = ((vars->line_speed == SPEED_10000) || - (vars->line_speed == SPEED_12000) || - (vars->line_speed == SPEED_12500) || - (vars->line_speed == SPEED_13000) || - (vars->line_speed == SPEED_15000) || - (vars->line_speed == SPEED_16000)); - if (link_10g) - vars->mac_type = MAC_TYPE_BMAC; - else - vars->mac_type = MAC_TYPE_EMAC; + link_10g_plus = (vars->line_speed >= SPEED_10000); + if (link_10g_plus) { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_XMAC; + else + vars->mac_type = MAC_TYPE_BMAC; + } else { + if (USES_WARPCORE(bp)) + vars->mac_type = MAC_TYPE_UMAC; + else + vars->mac_type = MAC_TYPE_EMAC; + } } else { /* link down */ DP(NETIF_MSG_LINK, "phy link down\n"); @@ -1731,8 +4446,40 @@ void bnx2x_link_status_update(struct link_params *params, vars->mac_type = MAC_TYPE_NONE; } - DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x\n", - vars->link_status, vars->phy_link_up); + /* Sync media type */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].media_type); + media_types = REG_RD(bp, sync_offset); + + params->phy[INT_PHY].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT; + params->phy[EXT_PHY1].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT; + params->phy[EXT_PHY2].media_type = + (media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >> + PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT; + DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types); + + /* Sync AEU offset */ + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + + vars->aeu_int_mask = REG_RD(bp, sync_offset); + + /* Sync PFC status */ + if (vars->link_status & LINK_STATUS_PFC_ENABLED) + params->feature_config_flags |= + FEATURE_CONFIG_PFC_ENABLED; + else + params->feature_config_flags &= + ~FEATURE_CONFIG_PFC_ENABLED; + + DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", + vars->link_status, vars->phy_link_up, vars->aeu_int_mask); DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", vars->line_speed, vars->duplex, vars->flow_ctrl); } @@ -1759,9 +4506,9 @@ static void bnx2x_set_master_ln(struct link_params *params, (new_master_ln | ser_lane)); } -static u8 bnx2x_reset_unicore(struct link_params *params, - struct bnx2x_phy *phy, - u8 set_serdes) +static int bnx2x_reset_unicore(struct link_params *params, + struct bnx2x_phy *phy, + u8 set_serdes) { struct bnx2x *bp = params->bp; u16 mii_control; @@ -2048,9 +4795,6 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, if (vars->line_speed == SPEED_10000) reg_val |= MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; - if (vars->line_speed == SPEED_13000) - reg_val |= - MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; } CL22_WR_OVER_CL45(bp, phy, @@ -2059,8 +4803,8 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, } -static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, - struct link_params *params) +static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u16 val = 0; @@ -2081,44 +4825,9 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, MDIO_OVER_1G_UP3, 0x400); } -static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, - struct link_params *params, u16 *ieee_fc) -{ - struct bnx2x *bp = params->bp; - *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /* - * Resolve pause mode and advertisement. - * Please refer to Table 28B-3 of the 802.3ab-1999 spec - */ - - switch (phy->req_flow_ctrl) { - case BNX2X_FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - else - *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - break; - case BNX2X_FLOW_CTRL_TX: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - break; - - case BNX2X_FLOW_CTRL_RX: - case BNX2X_FLOW_CTRL_BOTH: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - break; - - case BNX2X_FLOW_CTRL_NONE: - default: - *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; - break; - } - DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc); -} - -static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, - struct link_params *params, - u16 ieee_fc) +static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, + struct link_params *params, + u16 ieee_fc) { struct bnx2x *bp = params->bp; u16 val; @@ -2252,35 +4961,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, * link management */ -static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) -{ /* LD LP */ - switch (pause_result) { /* ASYM P ASYM P */ - case 0xb: /* 1 0 1 1 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; - break; - - case 0xe: /* 1 1 1 0 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; - break; - - case 0x5: /* 0 1 0 1 */ - case 0x7: /* 0 1 1 1 */ - case 0xd: /* 1 1 0 1 */ - case 0xf: /* 1 1 1 1 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; - break; - - default: - break; - } - if (pause_result & (1<<0)) - vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; - if (pause_result & (1<<1)) - vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; -} - -static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, - struct link_params *params) +static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u16 pd_10g, status2_1000x; @@ -2383,7 +5065,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u16 rx_status, ustat_val, cl37_fsm_recieved; + u16 rx_status, ustat_val, cl37_fsm_received; DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); /* Step 1: Make sure signal is detected */ CL22_RD_OVER_CL45(bp, phy, @@ -2421,15 +5103,15 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_REMOTE_PHY, MDIO_REMOTE_PHY_MISC_RX_STATUS, - &cl37_fsm_recieved); - if ((cl37_fsm_recieved & + &cl37_fsm_received); + if ((cl37_fsm_received & (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) { DP(NETIF_MSG_LINK, "No CL37 FSM were received. " "misc_rx_status(0x8330) = 0x%x\n", - cl37_fsm_recieved); + cl37_fsm_received); return; } /* @@ -2462,45 +5144,25 @@ static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy, vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED; } - -static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, +static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + struct link_vars *vars, + u16 is_link_up, + u16 speed_mask, + u16 is_duplex) { struct bnx2x *bp = params->bp; - u16 new_line_speed, gp_status; - u8 rc = 0; - - /* Read gp_status */ - CL22_RD_OVER_CL45(bp, phy, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); - if (phy->req_line_speed == SPEED_AUTO_NEG) vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { - DP(NETIF_MSG_LINK, "phy link up gp_status=0x%x\n", - gp_status); + if (is_link_up) { + DP(NETIF_MSG_LINK, "phy link up\n"); vars->phy_link_up = 1; vars->link_status |= LINK_STATUS_LINK_UP; - if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) - vars->duplex = DUPLEX_FULL; - else - vars->duplex = DUPLEX_HALF; - - if (SINGLE_MEDIA_DIRECT(params)) { - bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); - if (phy->req_line_speed == SPEED_AUTO_NEG) - bnx2x_xgxs_an_resolve(phy, params, vars, - gp_status); - } - - switch (gp_status & GP_STATUS_SPEED_MASK) { + switch (speed_mask) { case GP_STATUS_10M: - new_line_speed = SPEED_10; + vars->line_speed = SPEED_10; if (vars->duplex == DUPLEX_FULL) vars->link_status |= LINK_10TFD; else @@ -2508,7 +5170,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, break; case GP_STATUS_100M: - new_line_speed = SPEED_100; + vars->line_speed = SPEED_100; if (vars->duplex == DUPLEX_FULL) vars->link_status |= LINK_100TXFD; else @@ -2517,7 +5179,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, case GP_STATUS_1G: case GP_STATUS_1G_KX: - new_line_speed = SPEED_1000; + vars->line_speed = SPEED_1000; if (vars->duplex == DUPLEX_FULL) vars->link_status |= LINK_1000TFD; else @@ -2525,7 +5187,7 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, break; case GP_STATUS_2_5G: - new_line_speed = SPEED_2500; + vars->line_speed = SPEED_2500; if (vars->duplex == DUPLEX_FULL) vars->link_status |= LINK_2500TFD; else @@ -2536,50 +5198,28 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, case GP_STATUS_6G: DP(NETIF_MSG_LINK, "link speed unsupported gp_status 0x%x\n", - gp_status); + speed_mask); return -EINVAL; case GP_STATUS_10G_KX4: case GP_STATUS_10G_HIG: case GP_STATUS_10G_CX4: - new_line_speed = SPEED_10000; + case GP_STATUS_10G_KR: + case GP_STATUS_10G_SFI: + case GP_STATUS_10G_XFI: + vars->line_speed = SPEED_10000; vars->link_status |= LINK_10GTFD; break; - - case GP_STATUS_12G_HIG: - new_line_speed = SPEED_12000; - vars->link_status |= LINK_12GTFD; - break; - - case GP_STATUS_12_5G: - new_line_speed = SPEED_12500; - vars->link_status |= LINK_12_5GTFD; - break; - - case GP_STATUS_13G: - new_line_speed = SPEED_13000; - vars->link_status |= LINK_13GTFD; + case GP_STATUS_20G_DXGXS: + vars->line_speed = SPEED_20000; + vars->link_status |= LINK_20GTFD; break; - - case GP_STATUS_15G: - new_line_speed = SPEED_15000; - vars->link_status |= LINK_15GTFD; - break; - - case GP_STATUS_16G: - new_line_speed = SPEED_16000; - vars->link_status |= LINK_16GTFD; - break; - default: DP(NETIF_MSG_LINK, "link speed unsupported gp_status 0x%x\n", - gp_status); + speed_mask); return -EINVAL; } - - vars->line_speed = new_line_speed; - } else { /* link_down */ DP(NETIF_MSG_LINK, "phy link down\n"); @@ -2588,7 +5228,47 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, vars->duplex = DUPLEX_FULL; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; + } + DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n", + vars->phy_link_up, vars->line_speed); + return 0; +} + +static int bnx2x_link_settings_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + + struct bnx2x *bp = params->bp; + + u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask; + int rc = 0; + /* Read gp_status */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) + duplex = DUPLEX_FULL; + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) + link_up = 1; + speed_mask = gp_status & GP_STATUS_SPEED_MASK; + DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n", + gp_status, link_up, speed_mask); + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask, + duplex); + if (rc == -EINVAL) + return rc; + + if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { + if (SINGLE_MEDIA_DIRECT(params)) { + bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); + if (phy->req_line_speed == SPEED_AUTO_NEG) + bnx2x_xgxs_an_resolve(phy, params, vars, + gp_status); + } + } else { /* link_down */ if ((phy->req_line_speed == SPEED_AUTO_NEG) && SINGLE_MEDIA_DIRECT(params)) { /* Check signal is detected */ @@ -2596,13 +5276,86 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, } } - DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n", - gp_status, vars->phy_link_up, vars->line_speed); DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", vars->duplex, vars->flow_ctrl, vars->link_status); return rc; } +static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + + struct bnx2x *bp = params->bp; + + u8 lane; + u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL; + int rc = 0; + lane = bnx2x_get_warpcore_lane(phy, params); + /* Read gp_status */ + if (phy->req_line_speed > SPEED_10000) { + u16 temp_link_up; + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &temp_link_up); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + 1, &link_up); + DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n", + temp_link_up, link_up); + link_up &= (1<<2); + if (link_up) + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); + DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); + /* Check for either KR or generic link up. */ + gp_status1 = ((gp_status1 >> 8) & 0xf) | + ((gp_status1 >> 12) & 0xf); + link_up = gp_status1 & (1 << lane); + if (link_up && SINGLE_MEDIA_DIRECT(params)) { + u16 pd, gp_status4; + if (phy->req_line_speed == SPEED_AUTO_NEG) { + /* Check Autoneg complete */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_4, + &gp_status4); + if (gp_status4 & ((1<<12)<<lane)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + + /* Check parallel detect used */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_PAR_DET_10G_STATUS, + &pd); + if (pd & (1<<15)) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + } + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + } + + if (lane < 2) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed); + } else { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed); + } + DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed); + + if ((lane & 1) == 0) + gp_speed <<= 8; + gp_speed &= 0x3f00; + + + rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, + duplex); + + DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n", + vars->duplex, vars->flow_ctrl, vars->link_status); + return rc; +} static void bnx2x_set_gmii_tx_driver(struct link_params *params) { struct bnx2x *bp = params->bp; @@ -2642,8 +5395,8 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) } } -static u8 bnx2x_emac_program(struct link_params *params, - struct link_vars *vars) +static int bnx2x_emac_program(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -2713,9 +5466,9 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, } } -static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) || @@ -2742,11 +5495,11 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "not SGMII, AN\n"); /* AN enabled */ - bnx2x_set_brcm_cl37_advertisment(phy, params); + bnx2x_set_brcm_cl37_advertisement(phy, params); /* program duplex & pause advertisement (for aneg) */ - bnx2x_set_ieee_aneg_advertisment(phy, params, - vars->ieee_fc); + bnx2x_set_ieee_aneg_advertisement(phy, params, + vars->ieee_fc); /* enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73); @@ -2762,29 +5515,12 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, } } -static u8 bnx2x_init_serdes(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - u8 rc; - vars->phy_flags |= PHY_SGMII_FLAG; - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - bnx2x_set_aer_mmd_serdes(params->bp, phy); - rc = bnx2x_reset_unicore(params, phy, 1); - /* reset the SerDes and wait for reset bit return low */ - if (rc != 0) - return rc; - bnx2x_set_aer_mmd_serdes(params->bp, phy); - - return rc; -} - -static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, +static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u8 rc; - vars->phy_flags = PHY_XGXS_FLAG; + int rc; + vars->phy_flags |= PHY_XGXS_FLAG; if ((phy->req_line_speed && ((phy->req_line_speed == SPEED_100) || (phy->req_line_speed == SPEED_10))) || @@ -2792,26 +5528,28 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, (phy->speed_cap_mask >= PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) && (phy->speed_cap_mask < - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) - )) + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD)) vars->phy_flags |= PHY_SGMII_FLAG; else vars->phy_flags &= ~PHY_SGMII_FLAG; bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - bnx2x_set_aer_mmd_xgxs(params, phy); - bnx2x_set_master_ln(params, phy); + bnx2x_set_aer_mmd(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) + bnx2x_set_master_ln(params, phy); rc = bnx2x_reset_unicore(params, phy, 0); /* reset the SerDes and wait for reset bit return low */ if (rc != 0) return rc; - bnx2x_set_aer_mmd_xgxs(params, phy); - + bnx2x_set_aer_mmd(params, phy); /* setting the masterLn_def again after the reset */ - bnx2x_set_master_ln(params, phy); - bnx2x_set_swap_lanes(params, phy); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { + bnx2x_set_master_ln(params, phy); + bnx2x_set_swap_lanes(params, phy); + } return rc; } @@ -2823,8 +5561,13 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, u16 cnt, ctrl; /* Wait for soft reset to get cleared up to 1 sec */ for (cnt = 0; cnt < 1000; cnt++) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl); + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, &ctrl); + else + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, &ctrl); if (!(ctrl & (1<<15))) break; msleep(1); @@ -2845,7 +5588,11 @@ static void bnx2x_link_int_enable(struct link_params *params) struct bnx2x *bp = params->bp; /* Setting the status to report on link up for either XGXS or SerDes */ - if (params->switch_cfg == SWITCH_CFG_10G) { + if (CHIP_IS_E3(bp)) { + mask = NIG_MASK_XGXS0_LINK_STATUS; + if (!(SINGLE_MEDIA_DIRECT(params))) + mask |= NIG_MASK_MI_INT; + } else if (params->switch_cfg == SWITCH_CFG_10G) { mask = (NIG_MASK_XGXS0_LINK10G | NIG_MASK_XGXS0_LINK_STATUS); DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); @@ -2918,11 +5665,11 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, } static void bnx2x_link_int_ack(struct link_params *params, - struct link_vars *vars, u8 is_10g) + struct link_vars *vars, u8 is_10g_plus) { struct bnx2x *bp = params->bp; u8 port = params->port; - + u32 mask; /* * First reset all status we assume only one line will be * change at a time @@ -2932,47 +5679,34 @@ static void bnx2x_link_int_ack(struct link_params *params, NIG_STATUS_XGXS0_LINK_STATUS | NIG_STATUS_SERDES0_LINK_STATUS)); if (vars->phy_link_up) { - if (is_10g) { - /* - * Disable the 10G link interrupt by writing 1 to the - * status register - */ - DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); - bnx2x_bits_en(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - NIG_STATUS_XGXS0_LINK10G); - - } else if (params->switch_cfg == SWITCH_CFG_10G) { - /* - * Disable the link interrupt by writing 1 to the - * relevant lane in the status register - */ - u32 ser_lane = ((params->lane_config & + if (USES_WARPCORE(bp)) + mask = NIG_STATUS_XGXS0_LINK_STATUS; + else { + if (is_10g_plus) + mask = NIG_STATUS_XGXS0_LINK10G; + else if (params->switch_cfg == SWITCH_CFG_10G) { + /* + * Disable the link interrupt by writing 1 to + * the relevant lane in the status register + */ + u32 ser_lane = + ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - - DP(NETIF_MSG_LINK, "%d speed XGXS phy link up\n", - vars->line_speed); - bnx2x_bits_en(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - ((1 << ser_lane) << - NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); - - } else { /* SerDes */ - DP(NETIF_MSG_LINK, "SerDes phy link up\n"); - /* - * Disable the link interrupt by writing 1 to the status - * register - */ - bnx2x_bits_en(bp, - NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - NIG_STATUS_SERDES0_LINK_STATUS); + mask = ((1 << ser_lane) << + NIG_STATUS_XGXS0_LINK_STATUS_SIZE); + } else + mask = NIG_STATUS_SERDES0_LINK_STATUS; } - + DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n", + mask); + bnx2x_bits_en(bp, + NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, + mask); } } -static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len) +static int bnx2x_format_ver(u32 num, u8 *str, u16 *len) { u8 *str_ptr = str; u32 mask = 0xf0000000; @@ -3011,19 +5745,19 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 *len) } -static u8 bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) +static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) { str[0] = '\0'; (*len)--; return 0; } -u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len) +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, + u8 *version, u16 len) { struct bnx2x *bp; u32 spirom_ver = 0; - u8 status = 0; + int status = 0; u8 *ver_p = version; u16 remain_len = len; if (version == NULL || params == NULL) @@ -3065,15 +5799,18 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; if (phy->req_line_speed != SPEED_1000) { - u32 md_devad; + u32 md_devad = 0; DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); - /* change the uni_phy_addr in the nig */ - md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + - port*0x18)); + if (!CHIP_IS_E3(bp)) { + /* change the uni_phy_addr in the nig */ + md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + + port*0x18)); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + 0x5); + } bnx2x_cl45_write(bp, phy, 5, @@ -3088,10 +5825,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, 0x6041); msleep(200); /* set aer mmd back */ - bnx2x_set_aer_mmd_xgxs(params, phy); + bnx2x_set_aer_mmd(params, phy); - /* and md_devad */ - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); + if (!CHIP_IS_E3(bp)) { + /* and md_devad */ + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, + md_devad); + } } else { u16 mii_ctrl; DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); @@ -3107,12 +5847,13 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, } } -u8 bnx2x_set_led(struct link_params *params, - struct link_vars *vars, u8 mode, u32 speed) +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed) { u8 port = params->port; u16 hw_led_mode = params->hw_led_mode; - u8 rc = 0, phy_idx; + int rc = 0; + u8 phy_idx; u32 tmp; u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; struct bnx2x *bp = params->bp; @@ -3146,8 +5887,10 @@ u8 bnx2x_set_led(struct link_params *params, if (!vars->link_up) break; case LED_MODE_ON: - if (params->phy[EXT_PHY1].type == - PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && + if (((params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || + (params->phy[EXT_PHY1].type == + PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && CHIP_IS_E2(bp) && params->num_phys == 2) { /* * This is a work-around for E2+8727 Configurations @@ -3162,7 +5905,9 @@ u8 bnx2x_set_led(struct link_params *params, (tmp | EMAC_LED_OVERRIDE)); return rc; } - } else if (SINGLE_MEDIA_DIRECT(params)) { + } else if (SINGLE_MEDIA_DIRECT(params) && + (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) { /* * This is a work-around for HW issue found when link * is up in CL73 @@ -3214,21 +5959,49 @@ u8 bnx2x_set_led(struct link_params *params, * This function comes to reflect the actual link state read DIRECTLY from the * HW */ -u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, - u8 is_serdes) +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes) { struct bnx2x *bp = params->bp; u16 gp_status = 0, phy_index = 0; u8 ext_phy_link_up = 0, serdes_phy_type; struct link_vars temp_vars; - - CL22_RD_OVER_CL45(bp, ¶ms->phy[INT_PHY], + struct bnx2x_phy *int_phy = ¶ms->phy[INT_PHY]; + + if (CHIP_IS_E3(bp)) { + u16 link_up; + if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] + > SPEED_10000) { + /* Check 20G link */ + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + 1, &link_up); + link_up &= (1<<2); + } else { + /* Check 10G link and below*/ + u8 lane = bnx2x_get_warpcore_lane(int_phy, params); + bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, + &gp_status); + gp_status = ((gp_status >> 8) & 0xf) | + ((gp_status >> 12) & 0xf); + link_up = gp_status & (1 << lane); + } + if (!link_up) + return -ESRCH; + } else { + CL22_RD_OVER_CL45(bp, int_phy, MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); /* link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; + } + /* In XGXS loopback mode, do not check external PHY */ + if (params->loopback_mode == LOOPBACK_XGXS) + return 0; switch (params->num_phys) { case 1: @@ -3245,7 +6018,9 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, serdes_phy_type = ((params->phy[phy_index].media_type == ETH_PHY_SFP_FIBER) || (params->phy[phy_index].media_type == - ETH_PHY_XFP_FIBER)); + ETH_PHY_XFP_FIBER) || + (params->phy[phy_index].media_type == + ETH_PHY_DA_TWINAX)); if (is_serdes != serdes_phy_type) continue; @@ -3263,10 +6038,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, return -ESRCH; } -static u8 bnx2x_link_initialize(struct link_params *params, - struct link_vars *vars) +static int bnx2x_link_initialize(struct link_params *params, + struct link_vars *vars) { - u8 rc = 0; + int rc = 0; u8 phy_index, non_ext_phy; struct bnx2x *bp = params->bp; /* @@ -3282,12 +6057,8 @@ static u8 bnx2x_link_initialize(struct link_params *params, * (no external phys), or this board has external phy which requires * to first. */ - - if (params->phy[INT_PHY].config_init) - params->phy[INT_PHY].config_init( - ¶ms->phy[INT_PHY], - params, vars); - + if (!USES_WARPCORE(bp)) + bnx2x_prepare_xgxs(¶ms->phy[INT_PHY], params, vars); /* init ext phy and enable link state int */ non_ext_phy = (SINGLE_MEDIA_DIRECT(params) || (params->loopback_mode == LOOPBACK_XGXS)); @@ -3296,13 +6067,22 @@ static u8 bnx2x_link_initialize(struct link_params *params, (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) || (params->loopback_mode == LOOPBACK_EXT_PHY)) { struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; - if (vars->line_speed == SPEED_AUTO_NEG) + if (vars->line_speed == SPEED_AUTO_NEG && + (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) bnx2x_set_parallel_detection(phy, params); - bnx2x_init_internal_phy(phy, params, vars); + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init(phy, + params, + vars); } /* Init external phy*/ - if (!non_ext_phy) + if (non_ext_phy) { + if (params->phy[INT_PHY].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + } else { for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { /* @@ -3311,17 +6091,22 @@ static u8 bnx2x_link_initialize(struct link_params *params, * need to initialize the first phy, since they are * connected. */ + if (params->phy[phy_index].supported & + SUPPORTED_FIBRE) + vars->link_status |= LINK_STATUS_SERDES_LINK; + if (phy_index == EXT_PHY2 && (bnx2x_phy_selection(params) == PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { - DP(NETIF_MSG_LINK, "Ignoring second phy\n"); + DP(NETIF_MSG_LINK, "Not initializing" + " second phy\n"); continue; } params->phy[phy_index].config_init( ¶ms->phy[phy_index], params, vars); } - + } /* Reset the interrupt indication after phy was initialized */ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + params->port*4, @@ -3329,6 +6114,7 @@ static u8 bnx2x_link_initialize(struct link_params *params, NIG_STATUS_XGXS0_LINK_STATUS | NIG_STATUS_SERDES0_LINK_STATUS | NIG_MASK_MI_INT)); + bnx2x_update_mng(params, vars->link_status); return rc; } @@ -3359,20 +6145,25 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "reset external PHY\n"); } -static u8 bnx2x_update_link_down(struct link_params *params, - struct link_vars *vars) +static int bnx2x_update_link_down(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port = params->port; DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); - + vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; /* indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; /* update shared memory */ - vars->link_status = 0; + vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | + LINK_STATUS_LINK_UP | + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | + LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | + LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | + LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK); vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status); @@ -3380,26 +6171,34 @@ static u8 bnx2x_update_link_down(struct link_params *params, REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); /* disable emac */ - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); msleep(10); - - /* reset BigMac */ - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + /* reset BigMac/Xmac */ + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp)) { + bnx2x_bmac_rx_disable(bp, params->port); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + } + if (CHIP_IS_E3(bp)) + bnx2x_xmac_disable(params); + return 0; } -static u8 bnx2x_update_link_up(struct link_params *params, - struct link_vars *vars, - u8 link_10g) +static int bnx2x_update_link_up(struct link_params *params, + struct link_vars *vars, + u8 link_10g) { struct bnx2x *bp = params->bp; u8 port = params->port; - u8 rc = 0; + int rc = 0; vars->link_status |= LINK_STATUS_LINK_UP; + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) vars->link_status |= @@ -3408,25 +6207,48 @@ static u8 bnx2x_update_link_up(struct link_params *params, if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) vars->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; - - if (link_10g) { - bnx2x_bmac_enable(params, vars, 0); + if (USES_WARPCORE(bp)) { + if (link_10g) { + if (bnx2x_xmac_enable(params, vars, 0) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on XMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } + } else + bnx2x_umac_enable(params, vars, 0); bnx2x_set_led(params, vars, - LED_MODE_OPER, SPEED_10000); - } else { - rc = bnx2x_emac_program(params, vars); + LED_MODE_OPER, vars->line_speed); + } + if ((CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp))) { + if (link_10g) { + if (bnx2x_bmac_enable(params, vars, 0) == + -ESRCH) { + DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + vars->link_status &= ~LINK_STATUS_LINK_UP; + } - bnx2x_emac_enable(params, vars, 0); + bnx2x_set_led(params, vars, + LED_MODE_OPER, SPEED_10000); + } else { + rc = bnx2x_emac_program(params, vars); + bnx2x_emac_enable(params, vars, 0); - /* AN complete? */ - if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - && (!(vars->phy_flags & PHY_SGMII_FLAG)) && - SINGLE_MEDIA_DIRECT(params)) - bnx2x_set_gmii_tx_driver(params); + /* AN complete? */ + if ((vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + && (!(vars->phy_flags & PHY_SGMII_FLAG)) && + SINGLE_MEDIA_DIRECT(params)) + bnx2x_set_gmii_tx_driver(params); + } } /* PBF - link up */ - if (!(CHIP_IS_E2(bp))) + if (CHIP_IS_E1x(bp)) rc |= bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); @@ -3451,17 +6273,18 @@ static u8 bnx2x_update_link_up(struct link_params *params, * external phy needs to be up, and at least one of the 2 * external phy link must be up. */ -u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) +int bnx2x_link_update(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; struct link_vars phy_vars[MAX_PHYS]; u8 port = params->port; - u8 link_10g, phy_index; - u8 ext_phy_link_up = 0, cur_link_up, rc = 0; + u8 link_10g_plus, phy_index; + u8 ext_phy_link_up = 0, cur_link_up; + int rc = 0; u8 is_mi_int = 0; u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; u8 active_external_phy = INT_PHY; - vars->link_status = 0; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; for (phy_index = INT_PHY; phy_index < params->num_phys; phy_index++) { phy_vars[phy_index].flow_ctrl = 0; @@ -3470,8 +6293,12 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) phy_vars[phy_index].duplex = DUPLEX_FULL; phy_vars[phy_index].phy_link_up = 0; phy_vars[phy_index].link_up = 0; + phy_vars[phy_index].fault_detected = 0; } + if (USES_WARPCORE(bp)) + bnx2x_set_aer_mmd(params, ¶ms->phy[INT_PHY]); + DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", port, (vars->phy_flags & PHY_XGXS_FLAG), REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); @@ -3488,13 +6315,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); /* disable emac */ - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); /* * Step 1: * Check external link change only for external phys, and apply * priority selection between them in case the link on both phys - * is up. Note that the instead of the common vars, a temporary + * is up. Note that instead of the common vars, a temporary * vars argument is used since each phy may have different link/ * speed/duplex result */ @@ -3601,6 +6429,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) if (params->phy[active_external_phy].supported & SUPPORTED_FIBRE) vars->link_status |= LINK_STATUS_SERDES_LINK; + else + vars->link_status &= ~LINK_STATUS_SERDES_LINK; DP(NETIF_MSG_LINK, "Active external phy selected: %x\n", active_external_phy); } @@ -3640,14 +6470,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) } /* anything 10 and over uses the bmac */ - link_10g = ((vars->line_speed == SPEED_10000) || - (vars->line_speed == SPEED_12000) || - (vars->line_speed == SPEED_12500) || - (vars->line_speed == SPEED_13000) || - (vars->line_speed == SPEED_15000) || - (vars->line_speed == SPEED_16000)); + link_10g_plus = (vars->line_speed >= SPEED_10000); - bnx2x_link_int_ack(params, vars, link_10g); + bnx2x_link_int_ack(params, vars, link_10g_plus); /* * In case external phy link is up, and internal link is down @@ -3671,21 +6496,24 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars->phy_flags |= PHY_SGMII_FLAG; else vars->phy_flags &= ~PHY_SGMII_FLAG; - bnx2x_init_internal_phy(¶ms->phy[INT_PHY], - params, + + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init( + ¶ms->phy[INT_PHY], params, vars); } } /* * Link is up only if both local phy and external phy (in case of - * non-direct board) are up + * non-direct board) are up and no fault detected on active PHY. */ vars->link_up = (vars->phy_link_up && (ext_phy_link_up || - SINGLE_MEDIA_DIRECT(params))); + SINGLE_MEDIA_DIRECT(params)) && + (phy_vars[active_external_phy].fault_detected == 0)); if (vars->link_up) - rc = bnx2x_update_link_up(params, vars, link_10g); + rc = bnx2x_update_link_up(params, vars, link_10g_plus); else rc = bnx2x_update_link_down(params, vars); @@ -3729,69 +6557,6 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, phy->ver_addr); } -static void bnx2x_ext_phy_set_pause(struct link_params *params, - struct bnx2x_phy *phy, - struct link_vars *vars) -{ - u16 val; - struct bnx2x *bp = params->bp; - /* read modify write pause advertizing */ - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); - - val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; - - /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ - bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; - } - if ((vars->ieee_fc & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { - val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; - } - DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val); - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); -} - -static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u16 ld_pause; /* local */ - u16 lp_pause; /* link partner */ - u16 pause_result; - u8 ret = 0; - /* read twice */ - - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - - if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) - vars->flow_ctrl = phy->req_flow_ctrl; - else if (phy->req_line_speed != SPEED_AUTO_NEG) - vars->flow_ctrl = params->req_fc_auto_adv; - else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { - ret = 1; - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV_PAUSE, &ld_pause); - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); - pause_result = (ld_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; - pause_result |= (lp_pause & - MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; - DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", - pause_result); - bnx2x_pause_resolve(vars, pause_result); - } - return ret; -} - static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp, struct bnx2x_phy *phy, struct link_vars *vars) @@ -3845,13 +6610,13 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, pause_result); } } -static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, - struct bnx2x_phy *phy, - u8 port) +static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, + struct bnx2x_phy *phy, + u8 port) { u32 count = 0; u16 fw_ver1, fw_msgout; - u8 rc = 0; + int rc = 0; /* Boot port from external ROM */ /* EDC grst */ @@ -3926,7 +6691,7 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, /******************************************************************/ /* BCM8073 PHY SECTION */ /******************************************************************/ -static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) +static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) { /* This is only required for 8073A1, version 102 only */ u16 val; @@ -3952,7 +6717,7 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) return 1; } -static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) +static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) { u16 val, cnt, cnt1 ; @@ -4059,9 +6824,9 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params, msleep(500); } -static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8073_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 val = 0, tmp1; @@ -4081,9 +6846,9 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, /* enable LASI */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<2)); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x0004); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); bnx2x_8073_set_pause_cl37(params, phy, vars); @@ -4091,7 +6856,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); @@ -4225,7 +6990,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, u16 an1000_status = 0; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); @@ -4241,7 +7006,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, /* Check the LASI */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2); @@ -4367,9 +7132,9 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, /******************************************************************/ /* BCM8705 PHY SECTION */ /******************************************************************/ -static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8705_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "init 8705\n"); @@ -4430,6 +7195,30 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, /******************************************************************/ /* SFP+ module Section */ /******************************************************************/ +static void bnx2x_set_disable_pmd_transmit(struct link_params *params, + struct bnx2x_phy *phy, + u8 pmd_dis) +{ + struct bnx2x *bp = params->bp; + /* + * Disable transmitter only for bootcodes which can enable it afterwards + * (for D3 link) + */ + if (pmd_dis) { + if (params->feature_config_flags & + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) + DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n"); + else { + DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n"); + return; + } + } else + DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, pmd_dis); +} + static u8 bnx2x_get_gpio_port(struct link_params *params) { u8 gpio_port; @@ -4443,9 +7232,10 @@ static u8 bnx2x_get_gpio_port(struct link_params *params) swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); return gpio_port ^ (swap_val && swap_override); } -static void bnx2x_sfp_set_transmitter(struct link_params *params, - struct bnx2x_phy *phy, - u8 tx_en) + +static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) { u16 val; u8 port = params->port; @@ -4500,9 +7290,21 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params, } } -static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) +static void bnx2x_sfp_set_transmitter(struct link_params *params, + struct bnx2x_phy *phy, + u8 tx_en) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en); + if (CHIP_IS_E3(bp)) + bnx2x_sfp_e3_set_transmitter(params, phy, tx_en); + else + bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en); +} + +static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val = 0; @@ -4566,9 +7368,45 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } -static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) +static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, + u8 *o_buf) +{ + int rc = 0; + u8 i, j = 0, cnt = 0; + u32 data_array[4]; + u16 addr32; + struct bnx2x *bp = params->bp; + /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:" + " addr %d, cnt %d\n", + addr, byte_cnt);*/ + if (byte_cnt > 16) { + DP(NETIF_MSG_LINK, "Reading from eeprom is" + " is limited to 16 bytes\n"); + return -EINVAL; + } + + /* 4 byte aligned address */ + addr32 = addr & (~0x3); + do { + rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, + data_array); + } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); + + if (rc == 0) { + for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) { + o_buf[j] = *((u8 *)data_array + i); + j++; + } + } + + return rc; +} + +static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val, i; @@ -4653,27 +7491,39 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } -u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf) { - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) - return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) - return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); - return -EINVAL; + int rc = -EINVAL; + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, + byte_cnt, o_buf); + break; + } + return rc; } -static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, - struct link_params *params, - u16 *edc_mode) +static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, + struct link_params *params, + u16 *edc_mode) { struct bnx2x *bp = params->bp; + u32 sync_offset = 0, phy_idx, media_types; u8 val, check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING; + phy->media_type = ETH_PHY_UNSPECIFIED; /* First check for copper cable */ if (bnx2x_read_sfp_module_eeprom(phy, params, @@ -4688,7 +7538,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; - + phy->media_type = ETH_PHY_DA_TWINAX; /* * Check if its active cable (includes SFP+ module) * of passive cable @@ -4697,8 +7547,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, params, SFP_EEPROM_FC_TX_TECH_ADDR, 1, - &copper_module_type) != - 0) { + &copper_module_type) != 0) { DP(NETIF_MSG_LINK, "Failed to read copper-cable-type" " from SFP+ EEPROM\n"); @@ -4723,6 +7572,7 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, break; } case SFP_EEPROM_CON_TYPE_VAL_LC: + phy->media_type = ETH_PHY_SFP_FIBER; DP(NETIF_MSG_LINK, "Optic module detected\n"); check_limiting_mode = 1; break; @@ -4731,7 +7581,22 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, val); return -EINVAL; } - + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + /* Update media type for non-PMF sync */ + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (&(params->phy[phy_idx]) == phy) { + media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx)); + break; + } + } + REG_WR(bp, sync_offset, media_types); if (check_limiting_mode) { u8 options[SFP_EEPROM_OPTIONS_SIZE]; if (bnx2x_read_sfp_module_eeprom(phy, @@ -4755,8 +7620,8 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, * This function read the relevant field from the module (SFP+), and verify it * is compliant with this board */ -static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, - struct link_params *params) +static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u32 val, cmd; @@ -4825,8 +7690,8 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, return -EINVAL; } -static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, - struct link_params *params) +static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, + struct link_params *params) { u8 val; @@ -4858,8 +7723,8 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, * In the GPIO register, bit 4 is use to determine if the GPIOs are * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for * output - * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 - * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 + * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 + * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1 * where the 1st bit is the over-current(only input), and 2nd bit is * for power( only output ) * @@ -4868,15 +7733,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, */ if (phy->flags & FLAGS_NOC) return; - if (!(phy->flags & - FLAGS_NOC) && is_power_up) + if (is_power_up) val = (1<<4); else /* * Set GPIO control to OUTPUT, and set the power bit * to according to the is_power_up */ - val = ((!(is_power_up)) << 1); + val = (1<<1); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, @@ -4884,9 +7748,9 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, val); } -static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 edc_mode) +static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) { u16 cur_limiting_mode; @@ -4934,9 +7798,9 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, return 0; } -static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, - struct bnx2x_phy *phy, - u16 edc_mode) +static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 edc_mode) { u16 phy_identifier; u16 rom_ver2_val; @@ -4989,7 +7853,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, } } -static void bnx2x_set_sfp_module_fault_led(struct link_params *params, +static void bnx2x_set_e1e2_module_fault_led(struct link_params *params, u8 gpio_mode) { struct bnx2x *bp = params->bp; @@ -5021,12 +7885,146 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params, } } -static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, - struct link_params *params) +static void bnx2x_set_e3_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + u32 pin_cfg; + u8 port = params->port; + struct bnx2x *bp = params->bp; + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >> + PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT; + DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n", + gpio_mode, pin_cfg); + bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode); +} + +static void bnx2x_set_sfp_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); + if (CHIP_IS_E3(bp)) { + /* + * Low ==> if SFP+ module is supported otherwise + * High ==> if SFP+ module is not on the approved vendor list + */ + bnx2x_set_e3_module_fault_led(params, gpio_mode); + } else + bnx2x_set_e1e2_module_fault_led(params, gpio_mode); +} + +static void bnx2x_warpcore_power_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) +{ + u32 pin_cfg; + struct bnx2x *bp = params->bp; + + pin_cfg = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].e3_sfp_ctrl)) & + PORT_HW_CFG_E3_PWR_DIS_MASK) >> + PORT_HW_CFG_E3_PWR_DIS_SHIFT; + + if (pin_cfg == PIN_CFG_NA) + return; + DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", + power, pin_cfg); + /* + * Low ==> corresponding SFP+ module is powered + * high ==> the SFP+ module is powered down + */ + bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); +} + +static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + bnx2x_warpcore_power_module(params, phy, 0); +} + +static void bnx2x_power_sfp_module(struct link_params *params, + struct bnx2x_phy *phy, + u8 power) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power); + + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_power_module(params->bp, phy, power); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_power_module(params, phy, power); + break; + default: + break; + } +} +static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + u16 val = 0; + u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + struct bnx2x *bp = params->bp; + + u8 lane = bnx2x_get_warpcore_lane(phy, params); + /* This is a global register which controls all lanes */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + val &= ~(0xf << (lane << 2)); + + switch (edc_mode) { + case EDC_MODE_LINEAR: + case EDC_MODE_LIMITING: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT; + break; + case EDC_MODE_PASSIVE_DAC: + mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC; + break; + default: + break; + } + + val |= (mode << (lane << 2)); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val); + /* A must read */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + + +} + +static void bnx2x_set_limiting_mode(struct link_params *params, + struct bnx2x_phy *phy, + u16 edc_mode) +{ + switch (phy->type) { + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: + bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: + bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode); + break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: + bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode); + break; + } +} + +int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params) { struct bnx2x *bp = params->bp; u16 edc_mode; - u8 rc = 0; + int rc = 0; u32 val = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. @@ -5034,7 +8032,8 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", params->port); - + /* Power up module */ + bnx2x_power_sfp_module(params, phy, 1); if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; @@ -5046,12 +8045,11 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); - if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && - ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { - /* Shutdown SFP+ module */ + /* Check if need to power down the SFP+ module */ + if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) { DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n"); - bnx2x_8727_power_module(bp, phy, 0); + bnx2x_power_sfp_module(params, phy, 0); return rc; } } else { @@ -5059,18 +8057,12 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); } - /* power up the SFP module */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) - bnx2x_8727_power_module(bp, phy, 1); - /* * Check and set limiting mode / LRM mode on 8726. On 8727 it * is done automatically */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) - bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); - else - bnx2x_8727_set_limiting_mode(bp, phy, edc_mode); + bnx2x_set_limiting_mode(params, phy, edc_mode); + /* * Enable transmit for this module if the module is approved, or * if unapproved modules should also enable the Tx laser @@ -5088,23 +8080,33 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, void bnx2x_handle_module_detect_int(struct link_params *params) { struct bnx2x *bp = params->bp; - struct bnx2x_phy *phy = ¶ms->phy[EXT_PHY1]; + struct bnx2x_phy *phy; u32 gpio_val; - u8 port = params->port; + u8 gpio_num, gpio_port; + if (CHIP_IS_E3(bp)) + phy = ¶ms->phy[INT_PHY]; + else + phy = ¶ms->phy[EXT_PHY1]; + + if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, + params->port, &gpio_num, &gpio_port) == + -EINVAL) { + DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n"); + return; + } /* Set valid module led off */ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); /* Get current gpio val reflecting module plugged in / out*/ - gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); + gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port); /* Call the handling function in case module is detected */ if (gpio_val == 0) { - - bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, + bnx2x_power_sfp_module(params, phy, 1); + bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, - port); - + gpio_port); if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) bnx2x_sfp_module_detection(phy, params); else @@ -5115,13 +8117,14 @@ void bnx2x_handle_module_detect_int(struct link_params *params) port_feature_config[params->port]. config)); - bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, + bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, - port); + gpio_port); /* * Module was plugged out. * Disable transmit for this module */ + phy->media_type = ETH_PHY_NOT_PRESENT; if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) bnx2x_sfp_set_transmitter(params, phy, 0); @@ -5129,6 +8132,29 @@ void bnx2x_handle_module_detect_int(struct link_params *params) } /******************************************************************/ +/* Used by 8706 and 8727 */ +/******************************************************************/ +static void bnx2x_sfp_mask_fault(struct bnx2x *bp, + struct bnx2x_phy *phy, + u16 alarm_status_offset, + u16 alarm_ctrl_offset) +{ + u16 alarm_status, val; + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, alarm_status_offset, + &alarm_status); + /* Mask or enable the fault event. */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val); + if (alarm_status & (1<<0)) + val &= ~(1<<0); + else + val |= (1<<0); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val); +} +/******************************************************************/ /* common BCM8706/BCM8726 PHY SECTION */ /******************************************************************/ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, @@ -5141,12 +8167,16 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "XGXS 8706/8726\n"); /* Clear RX Alarm*/ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &val2); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2); + + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + /* clear LASI indication*/ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2); bnx2x_cl45_read(bp, phy, @@ -5173,6 +8203,17 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, bnx2x_ext_phy_resolve_fc(phy, params, vars); vars->duplex = DUPLEX_FULL; } + + /* Capture 10G link fault. Read twice to clear stale value. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + if (val1 & (1<<0)) + vars->fault_detected = 1; + } + return link_up; } @@ -5186,6 +8227,10 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, u32 tx_en_mode; u16 cnt, val, tmp1; struct bnx2x *bp = params->bp; + + /* SPF+ PHY: Set flag to check for Tx error */ + vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ @@ -5228,7 +8273,11 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, 0x400); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + /* Arm LASI for link and Tx fault. */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3); } else { /* Force 1Gbps using autoneg with 1G advertisement */ @@ -5251,10 +8300,10 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x0400); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); } bnx2x_save_bcm_spirom_ver(bp, phy, params->port); @@ -5281,9 +8330,9 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, return 0; } -static u8 bnx2x_8706_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8706_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { return bnx2x_8706_8726_read_status(phy, params, vars); } @@ -5358,15 +8407,16 @@ static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy, } -static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8726_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; - u32 val; - u32 swap_val, swap_override, aeu_gpio_mask, offset; DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); + /* SPF+ PHY: Set flag to check for Tx error */ + vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_wait_reset_complete(bp, phy, params); @@ -5387,9 +8437,9 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x5); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400); } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & @@ -5415,14 +8465,14 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, * change */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, 0x400); } else { /* Default 10G. Set only LASI control */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1); } /* Set TX PreEmphasis if needed */ @@ -5443,30 +8493,6 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } - /* Set GPIO3 to trigger SFP+ module insertion/removal */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); - - /* The GPIO should be swapped if the swap register is set and active */ - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); - - /* Select function upon port-swap configuration */ - if (params->port == 0) { - offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; - aeu_gpio_mask = (swap_val && swap_override) ? - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 : - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0; - } else { - offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; - aeu_gpio_mask = (swap_val && swap_override) ? - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 : - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1; - } - val = REG_RD(bp, offset); - /* add GPIO3 to group */ - val |= aeu_gpio_mask; - REG_WR(bp, offset, val); return 0; } @@ -5548,9 +8574,9 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } -static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8727_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u32 tx_en_mode; u16 tmp1, val, mod_abs, tmp2; @@ -5559,18 +8585,24 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ + /* SPF+ PHY: Set flag to check for Tx error */ + vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; + bnx2x_wait_reset_complete(bp, phy, params); rx_alarm_ctrl_val = (1<<2) | (1<<5) ; - lasi_ctrl_val = 0x0004; + /* Should be 0x6 to enable XS on Tx side. */ + lasi_ctrl_val = 0x0006; DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); /* enable LASI */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, rx_alarm_ctrl_val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); /* * Initially configure MOD_ABS to interrupt when module is @@ -5590,6 +8622,9 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 0); + /* Make MOD_ABS give interrupt on change */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, &val); @@ -5612,7 +8647,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &tmp1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); /* Set option 1G speed */ if (phy->req_line_speed == SPEED_1000) { @@ -5730,7 +8765,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, /* Module is absent */ DP(NETIF_MSG_LINK, "MOD_ABS indication " "show module is absent\n"); - + phy->media_type = ETH_PHY_NOT_PRESENT; /* * 1. Set mod_abs to detect next module * presence event @@ -5752,7 +8787,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); } else { /* Module is present */ @@ -5781,7 +8816,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == @@ -5805,26 +8840,29 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; - u8 link_up = 0; + u8 link_up = 0, oc_port = params->port; u16 link_status = 0; u16 rx_alarm_status, lasi_ctrl, val1; /* If PHY is not initialized, do not check link status */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, &lasi_ctrl); if (!lasi_ctrl) return 0; - /* Check the LASI */ + /* Check the LASI on Rx */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); vars->line_speed = 0; DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", rx_alarm_status); + bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, + MDIO_PMA_LASI_TXCTRL); + bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1); @@ -5843,8 +8881,10 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, &val1); if ((val1 & (1<<8)) == 0) { + if (!CHIP_IS_E1x(bp)) + oc_port = BP_PATH(bp) + (params->port << 1); DP(NETIF_MSG_LINK, "8727 Power fault has been detected" - " on port %d\n", params->port); + " on port %d\n", oc_port); netdev_err(bp->dev, "Error: Power fault on Port %d has" " been detected and the power to " "that SFP+ module has been removed" @@ -5852,11 +8892,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, " Please remove the SFP+ module and" " restart the system to clear this" " error.\n", - params->port); + oc_port); /* Disable all RX_ALARMs except for mod_abs */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); + MDIO_PMA_LASI_RXCTRL, (1<<5)); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -5869,7 +8909,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, /* Clear RX alarm */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); + MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); return 0; } } /* Over current check */ @@ -5879,7 +8919,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_8727_handle_mod_abs(phy, params); /* Enable all mod_abs and link detection bits */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, ((1<<5) | (1<<2))); } DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n"); @@ -5915,6 +8955,20 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "port %x: External link is down\n", params->port); } + + /* Capture 10G link fault. */ + if (vars->line_speed == SPEED_10000) { + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_LASI_TXSTAT, &val1); + + if (val1 & (1<<0)) { + vars->fault_detected = 1; + } + } + if (link_up) { bnx2x_ext_phy_resolve_fc(phy, params, vars); vars->duplex = DUPLEX_FULL; @@ -5945,10 +8999,14 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; + + /* Enable/Disable PHY transmitter output */ + bnx2x_set_disable_pmd_transmit(params, phy, 1); + /* Disable Transmitter */ bnx2x_sfp_set_transmitter(params, phy, 0); /* Clear LASI */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0); } @@ -5958,111 +9016,106 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct link_params *params) { - u16 val, fw_ver1, fw_ver2, cnt, adj; + u16 val, fw_ver1, fw_ver2, cnt; + u8 port; struct bnx2x *bp = params->bp; - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; + port = params->port; /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); if (val & 1) break; udelay(5); } if (cnt == 100) { DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n"); - bnx2x_save_spirom_version(bp, params->port, 0, + bnx2x_save_spirom_version(bp, port, 0, phy->ver_addr); return; } /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); if (val & 1) break; udelay(5); } if (cnt == 100) { DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n"); - bnx2x_save_spirom_version(bp, params->port, 0, + bnx2x_save_spirom_version(bp, port, 0, phy->ver_addr); return; } /* lower 16 bits of the register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); /* upper 16 bits of register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); - bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, + bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1, phy->ver_addr); } static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val, adj; - - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; + u16 val; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val); + MDIO_PMA_REG_8481_LINK_SIGNAL, &val); val &= 0xFE00; val |= 0x0092; bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val); + MDIO_PMA_REG_8481_LINK_SIGNAL, val); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK + adj, + MDIO_PMA_REG_8481_LED1_MASK, 0x80); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK + adj, + MDIO_PMA_REG_8481_LED2_MASK, 0x18); /* Select activity source by Tx and Rx, as suggested by PHY AE */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK + adj, + MDIO_PMA_REG_8481_LED3_MASK, 0x0006); /* Select the closest activity blink rate to that in 10/100/1000 */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_BLINK + adj, + MDIO_PMA_REG_8481_LED3_BLINK, 0); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); /* 'Interrupt Mask' */ bnx2x_cl45_write(bp, phy, @@ -6070,12 +9123,19 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, 0xFFFB, 0xFFFD); } -static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val; + u16 tmp_req_line_speed; + + tmp_req_line_speed = phy->req_line_speed; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + if (phy->req_line_speed == SPEED_10000) + phy->req_line_speed = SPEED_AUTO_NEG; + /* * This phy uses the NIG latch mechanism since link indication * arrives through its LED4 and not via its LASI signal, so we @@ -6122,11 +9182,14 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, an_1000_val); - /* set 10 speed advertisement */ + /* set 100 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { + (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) && + (phy->supported & + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full)))) { an_10_100_val |= (1<<7); /* Enable autoneg and restart autoneg for legacy speeds */ autoneg_val |= (1<<9 | 1<<12); @@ -6137,9 +9200,12 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, } /* set 10 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && - (phy->speed_cap_mask & - (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | - PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) && + (phy->supported & + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full)))) { an_10_100_val |= (1<<5); autoneg_val |= (1<<9 | 1<<12); if (phy->req_duplex == DUPLEX_FULL) @@ -6148,7 +9214,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, } /* Only 10/100 are allowed to work in FORCE mode */ - if (phy->req_line_speed == SPEED_100) { + if ((phy->req_line_speed == SPEED_100) && + (phy->supported & + (SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full))) { autoneg_val |= (1<<13); /* Enabled AUTO-MDIX when autoneg is disabled */ bnx2x_cl45_write(bp, phy, @@ -6156,7 +9225,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, (1<<15 | 1<<9 | 7<<0)); DP(NETIF_MSG_LINK, "Setting 100M force\n"); } - if (phy->req_line_speed == SPEED_10) { + if ((phy->req_line_speed == SPEED_10) && + (phy->supported & + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full))) { /* Enabled AUTO-MDIX when autoneg is disabled */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL, @@ -6179,10 +9251,10 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || (phy->req_line_speed == SPEED_10000)) { - DP(NETIF_MSG_LINK, "Advertising 10G\n"); - /* Restart autoneg for 10G*/ + DP(NETIF_MSG_LINK, "Advertising 10G\n"); + /* Restart autoneg for 10G*/ - bnx2x_cl45_write(bp, phy, + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200); } else if (phy->req_line_speed != SPEED_10 && @@ -6195,12 +9267,14 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, params); + phy->req_line_speed = tmp_req_line_speed; + return 0; } -static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_8481_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; /* Restore normal power mode*/ @@ -6215,33 +9289,200 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, return bnx2x_848xx_cmn_config_init(phy, params, vars); } -static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) + +#define PHY84833_HDSHK_WAIT 300 +static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + u32 idx; + u32 pair_swap; + u16 val; + u16 data; + struct bnx2x *bp = params->bp; + /* Do pair swap */ + + /* Check for configuration. */ + pair_swap = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].xgbt_phy_cfg)) & + PORT_HW_CFG_RJ45_PAIR_SWAP_MASK; + + if (pair_swap == 0) + return 0; + + data = (u16)pair_swap; + + /* Write CMD_OPEN_OVERRIDE to STATUS reg */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_OPEN_OVERRIDE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) + break; + msleep(1); + } + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n"); + return -EINVAL; + } + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG4, + data); + /* Issue pair swap command */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE); + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) + break; + msleep(1); + } + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "Pairswap: override failed.\n"); + return -EINVAL; + } + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data); + return 0; +} + + +static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u32 reset_pin[2]; + u32 idx; + u8 reset_gpios; + if (CHIP_IS_E3(bp)) { + /* Assume that these will be GPIOs, not EPIOs. */ + for (idx = 0; idx < 2; idx++) { + /* Map config param to register bit. */ + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].e3_cmn_pin_cfg)); + reset_pin[idx] = (reset_pin[idx] & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + reset_pin[idx] -= PIN_CFG_GPIO0_P0; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } else { + /* E2, look from diff place of shmem. */ + for (idx = 0; idx < 2; idx++) { + reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] + + offsetof(struct shmem_region, + dev_info.port_hw_config[0].default_cfg)); + reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK; + reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0; + reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT; + reset_pin[idx] = (1 << reset_pin[idx]); + } + reset_gpios = (u8)(reset_pin[0] | reset_pin[1]); + } + + return reset_gpios; +} + +static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u8 reset_gpios; + u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + other_shmem_base_addr)); + + u32 shmem_base_path[2]; + shmem_base_path[0] = params->shmem_base; + shmem_base_path[1] = other_shmem_base_addr; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, + params->chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n", + reset_gpios); + + return 0; +} + +static int bnx2x_84833_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 chip_id) +{ + u8 reset_gpios; + + reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id); + + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW); + udelay(10); + bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH); + msleep(800); + DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n", + reset_gpios); + + return 0; +} + +#define PHY84833_CONSTANT_LATENCY 1193 +static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 port, initialize = 1; - u16 val, adj; + u16 val; u16 temp; - u32 actual_phy_selection, cms_enable; - u8 rc = 0; - - /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = 3; + u32 actual_phy_selection, cms_enable, idx; + int rc = 0; msleep(1); - if (CHIP_IS_E2(bp)) + + if (!(CHIP_IS_E1(bp))) port = BP_PATH(bp); else port = params->port; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, - port); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + } else { + /* MDIO reset */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x8000); + /* Bring PHY out of super isolate mode */ + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); + val &= ~MDIO_84833_SUPER_ISOLATE; + bnx2x_cl45_write(bp, phy, + MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); + } + bnx2x_wait_reset_complete(bp, phy, params); + /* Wait for GPHY to come out of reset */ msleep(50); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bnx2x_84833_pair_swap_cfg(phy, params, vars); + /* * BCM84823 requires that XGXS links up first @ 10G for normal behavior */ @@ -6254,14 +9495,20 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA + adj, &val); + MDIO_CTL_REG_84823_MEDIA, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | MDIO_CTL_REG_84823_MEDIA_LINE_MASK | MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK | MDIO_CTL_REG_84823_MEDIA_FIBER_1G); - val |= MDIO_CTL_REG_84823_CTRL_MAC_XFI | - MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L; + + if (CHIP_IS_E3(bp)) { + val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | + MDIO_CTL_REG_84823_MEDIA_LINE_MASK); + } else { + val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI | + MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L); + } actual_phy_selection = bnx2x_phy_selection(params); @@ -6287,28 +9534,90 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA + adj, val); + MDIO_CTL_REG_84823_MEDIA, val); DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); + /* AutogrEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + /* Ensure that f/w is ready */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if (val == PHY84833_CMD_OPEN_FOR_CMDS) + break; + usleep_range(1000, 1000); + } + if (idx >= PHY84833_HDSHK_WAIT) { + DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n"); + return -EINVAL; + } + + /* Select EEE mode */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG3, + 0x2); + + /* Set Idle and Latency */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG4, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA3_REG, + PHY84833_CONSTANT_LATENCY + 1); + + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_DATA4_REG, + PHY84833_CONSTANT_LATENCY); + + /* Send EEE instruction to command register */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG0, + PHY84833_DIAG_CMD_SET_EEE_MODE); + + /* Ensure that the command has completed */ + for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) { + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, &val); + if ((val == PHY84833_CMD_COMPLETE_PASS) || + (val == PHY84833_CMD_COMPLETE_ERROR)) + break; + usleep_range(1000, 1000); + } + if ((idx >= PHY84833_HDSHK_WAIT) || + (val == PHY84833_CMD_COMPLETE_ERROR)) { + DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n"); + return -EINVAL; + } + + /* Reset command handler */ + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_84833_TOP_CFG_SCRATCH_REG2, + PHY84833_CMD_CLEAR_COMPLETE); + } + if (initialize) rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else bnx2x_save_848xx_spirom_version(phy, params); - cms_enable = REG_RD(bp, params->shmem_base + + /* 84833 PHY has a better feature and doesn't need to support this. */ + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + cms_enable = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info.port_hw_config[params->port].default_cfg)) & PORT_HW_CFG_ENABLE_CMS_MASK; - bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, &val); - if (cms_enable) - val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; - else - val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; - bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_USER_CTRL_REG, val); - + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + } return rc; } @@ -6318,20 +9627,16 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 val, val1, val2, adj; + u16 val, val1, val2; u8 link_up = 0; - /* Reg offset adjustment for 84833 */ - adj = 0; - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) - adj = -1; /* Check 10G-BaseT link status */ /* Check PMD signal ok */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 0xFFFA, &val1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2); DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); @@ -6403,9 +9708,10 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, return link_up; } -static u8 bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) + +static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) { - u8 status = 0; + int status = 0; u32 spirom_ver; spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F); status = bnx2x_format_ver(spirom_ver, str, len); @@ -6435,13 +9741,27 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u8 port; - if (CHIP_IS_E2(bp)) + u16 val16; + + if (!(CHIP_IS_E1(bp))) port = BP_PATH(bp); else port = params->port; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { + bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + } else { + bnx2x_cl45_read(bp, phy, + MDIO_CTL_DEVAD, + 0x400f, &val16); + /* Put to low power mode on newer FW */ + if ((val16 & 0x303f) > 0x1009) + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x800); + } } static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, @@ -6449,11 +9769,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 val; + u8 port; + + if (!(CHIP_IS_E1(bp))) + port = BP_PATH(bp); + else + port = params->port; switch (mode) { case LED_MODE_OFF: - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", params->port); + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port); if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == SHARED_HW_CFG_LED_EXTPHY1) { @@ -6489,7 +9815,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, case LED_MODE_FRONT_PANEL_OFF: DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n", - params->port); + port); if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == SHARED_HW_CFG_LED_EXTPHY1) { @@ -6524,7 +9850,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, break; case LED_MODE_ON: - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", params->port); + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port); if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == SHARED_HW_CFG_LED_EXTPHY1) { @@ -6571,7 +9897,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, case LED_MODE_OPER: - DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", params->port); + DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port); if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) == SHARED_HW_CFG_LED_EXTPHY1) { @@ -6633,7 +9959,388 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, } break; } + + /* + * This is a workaround for E3+84833 until autoneg + * restart is fixed in f/w + */ + if (CHIP_IS_E3(bp)) { + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_GP2_STATUS_GP_2_1, &val); + } +} + +/******************************************************************/ +/* 54618SE PHY SECTION */ +/******************************************************************/ +static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u8 port; + u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp; + u32 cfg_pin; + + DP(NETIF_MSG_LINK, "54618SE cfg init\n"); + usleep_range(1000, 1000); + + /* This works with E3 only, no need to check the chip + before determining the port. */ + port = params->port; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin high to bring the GPHY out of reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 1); + + /* wait for GPHY to reset */ + msleep(50); + + /* reset phy */ + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, 0x8000); + bnx2x_wait_reset_complete(bp, phy, params); + + /*wait for GPHY to reset */ + msleep(50); + + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + + /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_AUTO_DET_MED); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD; + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + + /* Set up fc */ + /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ + bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc); + fc_val = 0; + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) + fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + + if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) + fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; + + /* read all advertisement */ + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + bnx2x_cl22_read(bp, phy, + 0x04, + &an_10_100_val); + + bnx2x_cl22_read(bp, phy, + MDIO_PMA_REG_CTRL, + &autoneg_val); + + /* Disable forced speed */ + autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13)); + an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) | + (1<<11)); + + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || + (phy->req_line_speed == SPEED_1000)) { + an_1000_val |= (1<<8); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_1000_val |= (1<<9); + DP(NETIF_MSG_LINK, "Advertising 1G\n"); + } else + an_1000_val &= ~((1<<8) | (1<<9)); + + bnx2x_cl22_write(bp, phy, + 0x09, + an_1000_val); + bnx2x_cl22_read(bp, phy, + 0x09, + &an_1000_val); + + /* set 100 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) { + an_10_100_val |= (1<<7); + /* Enable autoneg and restart autoneg for legacy speeds */ + autoneg_val |= (1<<9 | 1<<12); + + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<8); + DP(NETIF_MSG_LINK, "Advertising 100M\n"); + } + + /* set 10 speed advertisement */ + if (((phy->req_line_speed == SPEED_AUTO_NEG) && + (phy->speed_cap_mask & + (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | + PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) { + an_10_100_val |= (1<<5); + autoneg_val |= (1<<9 | 1<<12); + if (phy->req_duplex == DUPLEX_FULL) + an_10_100_val |= (1<<6); + DP(NETIF_MSG_LINK, "Advertising 10M\n"); + } + + /* Only 10/100 are allowed to work in FORCE mode */ + if (phy->req_line_speed == SPEED_100) { + autoneg_val |= (1<<13); + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 100M force\n"); + } + if (phy->req_line_speed == SPEED_10) { + /* Enabled AUTO-MDIX when autoneg is disabled */ + bnx2x_cl22_write(bp, phy, + 0x18, + (1<<15 | 1<<9 | 7<<0)); + DP(NETIF_MSG_LINK, "Setting 10M force\n"); + } + + /* Check if we should turn on Auto-GrEEEn */ + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); + if (temp == MDIO_REG_GPHY_ID_54618SE) { + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); + } + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + MDIO_REG_GPHY_EEE_ADV); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + (0x1 << 14) | MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + temp); + } + + bnx2x_cl22_write(bp, phy, + 0x04, + an_10_100_val | fc_val); + + if (phy->req_duplex == DUPLEX_FULL) + autoneg_val |= (1<<8); + + bnx2x_cl22_write(bp, phy, + MDIO_PMA_REG_CTRL, autoneg_val); + + return 0; +} + +static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy, + struct link_params *params, u8 mode) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode); + switch (mode) { + case LED_MODE_FRONT_PANEL_OFF: + case LED_MODE_OFF: + case LED_MODE_OPER: + case LED_MODE_ON: + default: + break; + } + return; +} + +static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port; + + /* This works with E3 only, no need to check the chip + before determining the port. */ + port = params->port; + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & + PORT_HW_CFG_E3_PHY_RESET_MASK) >> + PORT_HW_CFG_E3_PHY_RESET_SHIFT; + + /* Drive pin low to put GPHY in reset. */ + bnx2x_set_cfg_pin(bp, cfg_pin, 0); +} + +static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 val; + u8 link_up = 0; + u16 legacy_status, legacy_speed; + + /* Get speed operation status */ + bnx2x_cl22_read(bp, phy, + 0x19, + &legacy_status); + DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); + + /* Read status to clear the PHY interrupt. */ + bnx2x_cl22_read(bp, phy, + MDIO_REG_INTR_STATUS, + &val); + + link_up = ((legacy_status & (1<<2)) == (1<<2)); + + if (link_up) { + legacy_speed = (legacy_status & (7<<8)); + if (legacy_speed == (7<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (6<<8)) { + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (5<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_FULL; + } + /* Omitting 100Base-T4 for now */ + else if (legacy_speed == (3<<8)) { + vars->line_speed = SPEED_100; + vars->duplex = DUPLEX_HALF; + } else if (legacy_speed == (2<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_FULL; + } else if (legacy_speed == (1<<8)) { + vars->line_speed = SPEED_10; + vars->duplex = DUPLEX_HALF; + } else /* Should not happen */ + vars->line_speed = 0; + + DP(NETIF_MSG_LINK, "Link is up in %dMbps," + " is_duplex_full= %d\n", vars->line_speed, + (vars->duplex == DUPLEX_FULL)); + + /* Check legacy speed AN resolution */ + bnx2x_cl22_read(bp, phy, + 0x01, + &val); + if (val & (1<<5)) + vars->link_status |= + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE; + bnx2x_cl22_read(bp, phy, + 0x06, + &val); + if ((val & (1<<0)) == 0) + vars->link_status |= + LINK_STATUS_PARALLEL_DETECTION_USED; + + DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", + vars->line_speed); + + /* Report whether EEE is resolved. */ + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); + if (val == MDIO_REG_GPHY_ID_54618SE) { + if (vars->link_status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) + val = 0; + else { + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + MDIO_AN_DEVAD); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + MDIO_REG_GPHY_EEE_RESOLVED); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_CL45_ADDR_REG, + (0x1 << 14) | MDIO_AN_DEVAD); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_CL45_DATA_REG, + &val); + } + DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); + } + + bnx2x_ext_phy_resolve_fc(phy, params, vars); + } + return link_up; } + +static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + u16 val; + u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + + DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n"); + + /* Enable master/slave manual mmode and set to master */ + /* mii write 9 [bits set 11 12] */ + bnx2x_cl22_write(bp, phy, 0x09, 3<<11); + + /* forced 1G and disable autoneg */ + /* set val [mii read 0] */ + /* set val [expr $val & [bits clear 6 12 13]] */ + /* set val [expr $val | [bits set 6 8]] */ + /* mii write 0 $val */ + bnx2x_cl22_read(bp, phy, 0x00, &val); + val &= ~((1<<6) | (1<<12) | (1<<13)); + val |= (1<<6) | (1<<8); + bnx2x_cl22_write(bp, phy, 0x00, val); + + /* Set external loopback and Tx using 6dB coding */ + /* mii write 0x18 7 */ + /* set val [mii read 0x18] */ + /* mii write 0x18 [expr $val | [bits set 10 15]] */ + bnx2x_cl22_write(bp, phy, 0x18, 7); + bnx2x_cl22_read(bp, phy, 0x18, &val); + bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15)); + + /* This register opens the gate for the UMAC despite its name */ + REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); + + /* + * Maximum Frame Length (RW). Defines a 14-Bit maximum frame + * length used by the MAC receive logic to check frames. + */ + REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); +} + /******************************************************************/ /* SFX7101 PHY SECTION */ /******************************************************************/ @@ -6646,9 +10353,9 @@ static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) +static int bnx2x_7101_config_init(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) { u16 fw_ver1, fw_ver2, val; struct bnx2x *bp = params->bp; @@ -6662,7 +10369,7 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1); DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3)); @@ -6694,9 +10401,9 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, u8 link_up; u16 val1, val2; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val2); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_STATUS, &val1); + MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n", val2, val1); bnx2x_cl45_read(bp, phy, @@ -6721,8 +10428,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, return link_up; } - -static u8 bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) +static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len) { if (*len < 5) return -EINVAL; @@ -6800,9 +10506,8 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, static struct bnx2x_phy phy_null = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, .addr = 0, - .flags = FLAGS_INIT_XGXS_FIRST, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_INIT_XGXS_FIRST, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6827,9 +10532,8 @@ static struct bnx2x_phy phy_null = { static struct bnx2x_phy phy_serdes = { .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, .addr = 0xff, - .flags = 0, .def_md_devad = 0, - .reserved = 0, + .flags = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6843,14 +10547,14 @@ static struct bnx2x_phy phy_serdes = { SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_UNSPECIFIED, + .media_type = ETH_PHY_BASE_T, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_init_serdes, + .config_init = (config_init_t)bnx2x_xgxs_config_init, .read_status = (read_status_t)bnx2x_link_settings_status, .link_reset = (link_reset_t)bnx2x_int_link_reset, .config_loopback = (config_loopback_t)NULL, @@ -6863,9 +10567,8 @@ static struct bnx2x_phy phy_serdes = { static struct bnx2x_phy phy_xgxs = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, - .flags = 0, .def_md_devad = 0, - .reserved = 0, + .flags = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6880,14 +10583,14 @@ static struct bnx2x_phy phy_xgxs = { SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_UNSPECIFIED, + .media_type = ETH_PHY_CX4, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t)bnx2x_init_xgxs, + .config_init = (config_init_t)bnx2x_xgxs_config_init, .read_status = (read_status_t)bnx2x_link_settings_status, .link_reset = (link_reset_t)bnx2x_int_link_reset, .config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback, @@ -6896,13 +10599,49 @@ static struct bnx2x_phy phy_xgxs = { .set_link_led = (set_link_led_t)NULL, .phy_specific_func = (phy_specific_func_t)NULL }; +static struct bnx2x_phy phy_warpcore = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_HW_LOCK_REQUIRED, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_UNSPECIFIED, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_warpcore_config_init, + .read_status = (read_status_t)bnx2x_warpcore_read_status, + .link_reset = (link_reset_t)bnx2x_warpcore_link_reset, + .config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)bnx2x_warpcore_hw_reset, + .set_link_led = (set_link_led_t)NULL, + .phy_specific_func = (phy_specific_func_t)NULL +}; + static struct bnx2x_phy phy_7101 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, .addr = 0xff, - .flags = FLAGS_FAN_FAILURE_DET_REQ, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6930,9 +10669,8 @@ static struct bnx2x_phy phy_7101 = { static struct bnx2x_phy phy_8073 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, .addr = 0xff, - .flags = FLAGS_HW_LOCK_REQUIRED, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_HW_LOCK_REQUIRED, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6943,7 +10681,7 @@ static struct bnx2x_phy phy_8073 = { SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_UNSPECIFIED, + .media_type = ETH_PHY_KR, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@ -6962,9 +10700,8 @@ static struct bnx2x_phy phy_8073 = { static struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, .addr = 0xff, - .flags = FLAGS_INIT_XGXS_FIRST, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_INIT_XGXS_FIRST, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -6991,9 +10728,8 @@ static struct bnx2x_phy phy_8705 = { static struct bnx2x_phy phy_8706 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, .addr = 0xff, - .flags = FLAGS_INIT_XGXS_FIRST, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_INIT_XGXS_FIRST, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -7022,10 +10758,9 @@ static struct bnx2x_phy phy_8706 = { static struct bnx2x_phy phy_8726 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, .addr = 0xff, + .def_md_devad = 0, .flags = (FLAGS_HW_LOCK_REQUIRED | FLAGS_INIT_XGXS_FIRST), - .def_md_devad = 0, - .reserved = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -7035,7 +10770,7 @@ static struct bnx2x_phy phy_8726 = { SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFP_FIBER, + .media_type = ETH_PHY_NOT_PRESENT, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@ -7055,9 +10790,8 @@ static struct bnx2x_phy phy_8726 = { static struct bnx2x_phy phy_8727 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .addr = 0xff, - .flags = FLAGS_FAN_FAILURE_DET_REQ, .def_md_devad = 0, - .reserved = 0, + .flags = FLAGS_FAN_FAILURE_DET_REQ, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -7066,7 +10800,7 @@ static struct bnx2x_phy phy_8727 = { SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFP_FIBER, + .media_type = ETH_PHY_NOT_PRESENT, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@ -7085,10 +10819,9 @@ static struct bnx2x_phy phy_8727 = { static struct bnx2x_phy phy_8481 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, .addr = 0xff, + .def_md_devad = 0, .flags = FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL, - .def_md_devad = 0, - .reserved = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -7122,10 +10855,9 @@ static struct bnx2x_phy phy_8481 = { static struct bnx2x_phy phy_84823 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, .addr = 0xff, + .def_md_devad = 0, .flags = FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL, - .def_md_devad = 0, - .reserved = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -7159,16 +10891,13 @@ static struct bnx2x_phy phy_84823 = { static struct bnx2x_phy phy_84833 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, .addr = 0xff, + .def_md_devad = 0, .flags = FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL, - .def_md_devad = 0, - .reserved = 0, .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, - .supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | + .supported = (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_10000baseT_Full | @@ -7188,11 +10917,44 @@ static struct bnx2x_phy phy_84833 = { .link_reset = (link_reset_t)bnx2x_848x3_link_reset, .config_loopback = (config_loopback_t)NULL, .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, - .hw_reset = (hw_reset_t)NULL, + .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, .phy_specific_func = (phy_specific_func_t)NULL }; +static struct bnx2x_phy phy_54618se = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, + .addr = 0xff, + .def_md_devad = 0, + .flags = FLAGS_INIT_XGXS_FIRST, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + /* req_duplex = */0, + /* rsrv = */0, + .config_init = (config_init_t)bnx2x_54618se_config_init, + .read_status = (read_status_t)bnx2x_54618se_read_status, + .link_reset = (link_reset_t)bnx2x_54618se_link_reset, + .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, + .format_fw_ver = (format_fw_ver_t)NULL, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; /*****************************************************************/ /* */ /* Populate the phy according. Main function: bnx2x_populate_phy */ @@ -7259,8 +11021,8 @@ static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base, return ext_phy_config; } -static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, - struct bnx2x_phy *phy) +static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, + struct bnx2x_phy *phy) { u32 phy_addr; u32 chip_id; @@ -7269,22 +11031,105 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, dev_info.port_feature_config[port].link_config)) & PORT_FEATURE_CONNECTED_SWITCH_MASK); chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16; - switch (switch_cfg) { - case SWITCH_CFG_1G: + DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id); + if (USES_WARPCORE(bp)) { + u32 serdes_net_if; phy_addr = REG_RD(bp, - NIG_REG_SERDES0_CTRL_PHY_ADDR + - port * 0x10); - *phy = phy_serdes; - break; - case SWITCH_CFG_10G: - phy_addr = REG_RD(bp, - NIG_REG_XGXS0_CTRL_PHY_ADDR + - port * 0x18); - *phy = phy_xgxs; - break; - default: - DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); - return -EINVAL; + MISC_REG_WC0_CTRL_PHY_ADDR); + *phy = phy_warpcore; + if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3) + phy->flags |= FLAGS_4_PORT_MODE; + else + phy->flags &= ~FLAGS_4_PORT_MODE; + /* Check Dual mode */ + serdes_net_if = (REG_RD(bp, shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[port].default_cfg)) & + PORT_HW_CFG_NET_SERDES_IF_MASK); + /* + * Set the appropriate supported and flags indications per + * interface type of the chip + */ + switch (serdes_net_if) { + case PORT_HW_CFG_NET_SERDES_IF_SGMII: + phy->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_BASE_T; + break; + case PORT_HW_CFG_NET_SERDES_IF_XFI: + phy->media_type = ETH_PHY_XFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_SFI: + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + phy->media_type = ETH_PHY_SFP_FIBER; + break; + case PORT_HW_CFG_NET_SERDES_IF_KR: + phy->media_type = ETH_PHY_KR; + phy->supported &= (SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_DXGXS: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseMLD2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + case PORT_HW_CFG_NET_SERDES_IF_KR2: + phy->media_type = ETH_PHY_KR; + phy->flags |= FLAGS_WC_DUAL_MODE; + phy->supported &= (SUPPORTED_20000baseKR2_Full | + SUPPORTED_FIBRE | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause); + break; + default: + DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", + serdes_net_if); + break; + } + + /* + * Enable MDC/MDIO work-around for E3 A0 since free running MDC + * was not set as expected. For B0, ECO will be enabled so there + * won't be an issue there + */ + if (CHIP_REV(bp) == CHIP_REV_Ax) + phy->flags |= FLAGS_MDC_MDIO_WA; + } else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + phy_addr = REG_RD(bp, + NIG_REG_SERDES0_CTRL_PHY_ADDR + + port * 0x10); + *phy = phy_serdes; + break; + case SWITCH_CFG_10G: + phy_addr = REG_RD(bp, + NIG_REG_XGXS0_CTRL_PHY_ADDR + + port * 0x18); + *phy = phy_xgxs; + break; + default: + DP(NETIF_MSG_LINK, "Invalid switch_cfg\n"); + return -EINVAL; + } } phy->addr = (u8)phy_addr; phy->mdio_ctrl = bnx2x_get_emac_base(bp, @@ -7302,12 +11147,12 @@ static u8 bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, return 0; } -static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, - u8 phy_index, - u32 shmem_base, - u32 shmem2_base, - u8 port, - struct bnx2x_phy *phy) +static int bnx2x_populate_ext_phy(struct bnx2x *bp, + u8 phy_index, + u32 shmem_base, + u32 shmem2_base, + u8 port, + struct bnx2x_phy *phy) { u32 ext_phy_config, phy_type, config2; u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH; @@ -7336,6 +11181,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, *phy = phy_8727; phy->flags |= FLAGS_NOC; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1; *phy = phy_8727; @@ -7349,6 +11195,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: *phy = phy_84833; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: + *phy = phy_54618se; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; break; @@ -7410,10 +11259,10 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, return 0; } -static u8 bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, - u32 shmem2_base, u8 port, struct bnx2x_phy *phy) +static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base, + u32 shmem2_base, u8 port, struct bnx2x_phy *phy) { - u8 status = 0; + int status = 0; phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN; if (phy_index == INT_PHY) return bnx2x_populate_int_phy(bp, shmem_base, port, phy); @@ -7527,10 +11376,10 @@ u32 bnx2x_phy_selection(struct link_params *params) } -u8 bnx2x_phy_probe(struct link_params *params) +int bnx2x_phy_probe(struct link_params *params) { u8 phy_index, actual_phy_idx, link_cfg_idx; - u32 phy_config_swapped; + u32 phy_config_swapped, sync_offset, media_types; struct bnx2x *bp = params->bp; struct bnx2x_phy *phy; params->num_phys = 0; @@ -7567,6 +11416,26 @@ u8 bnx2x_phy_probe(struct link_params *params) if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) break; + sync_offset = params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].media_type); + media_types = REG_RD(bp, sync_offset); + + /* + * Update media type for non-PMF sync only for the first time + * In case the media type changes afterwards, it will be updated + * using the update_status function + */ + if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx))) == 0) { + media_types |= ((phy->media_type & + PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) << + (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * + actual_phy_idx)); + } + REG_WR(bp, sync_offset, media_types); + bnx2x_phy_def_cfg(params, phy, phy_index); params->num_phys++; } @@ -7575,77 +11444,10 @@ u8 bnx2x_phy_probe(struct link_params *params) return 0; } -static void set_phy_vars(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u8 actual_phy_idx, phy_index, link_cfg_idx; - u8 phy_config_swapped = params->multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED; - for (phy_index = INT_PHY; phy_index < params->num_phys; - phy_index++) { - link_cfg_idx = LINK_CONFIG_IDX(phy_index); - actual_phy_idx = phy_index; - if (phy_config_swapped) { - if (phy_index == EXT_PHY1) - actual_phy_idx = EXT_PHY2; - else if (phy_index == EXT_PHY2) - actual_phy_idx = EXT_PHY1; - } - params->phy[actual_phy_idx].req_flow_ctrl = - params->req_flow_ctrl[link_cfg_idx]; - - params->phy[actual_phy_idx].req_line_speed = - params->req_line_speed[link_cfg_idx]; - - params->phy[actual_phy_idx].speed_cap_mask = - params->speed_cap_mask[link_cfg_idx]; - - params->phy[actual_phy_idx].req_duplex = - params->req_duplex[link_cfg_idx]; - - DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x," - " speed_cap_mask %x\n", - params->phy[actual_phy_idx].req_flow_ctrl, - params->phy[actual_phy_idx].req_line_speed, - params->phy[actual_phy_idx].speed_cap_mask); - } -} - -u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) +void bnx2x_init_bmac_loopback(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; - DP(NETIF_MSG_LINK, "Phy Initialization started\n"); - DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", - params->req_line_speed[0], params->req_flow_ctrl[0]); - DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", - params->req_line_speed[1], params->req_flow_ctrl[1]); - vars->link_status = 0; - vars->phy_link_up = 0; - vars->link_up = 0; - vars->line_speed = 0; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_NONE; - vars->phy_flags = 0; - - /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); - - bnx2x_emac_init(params, vars); - - if (params->num_phys == 0) { - DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); - return -EINVAL; - } - set_phy_vars(params); - - DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); - if (params->loopback_mode == LOOPBACK_BMAC) { - vars->link_up = 1; vars->line_speed = SPEED_10000; vars->duplex = DUPLEX_FULL; @@ -7660,9 +11462,12 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_bmac_enable(params, vars, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} - } else if (params->loopback_mode == LOOPBACK_EMAC) { - +void bnx2x_init_emac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; vars->link_up = 1; vars->line_speed = SPEED_1000; vars->duplex = DUPLEX_FULL; @@ -7676,29 +11481,81 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_emac_enable(params, vars, 1); bnx2x_emac_program(params, vars); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} - } else if ((params->loopback_mode == LOOPBACK_XGXS) || - (params->loopback_mode == LOOPBACK_EXT_PHY)) { +void bnx2x_init_xmac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + if (!params->req_line_speed[0]) + vars->line_speed = SPEED_10000; + else + vars->line_speed = params->req_line_speed[0]; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_XMAC; + vars->phy_flags = PHY_XGXS_FLAG; + /* + * Set WC to loopback mode since link is required to provide clock + * to the XMAC in 20G mode + */ + if (vars->line_speed == SPEED_20000) { + bnx2x_set_aer_mmd(params, ¶ms->phy[0]); + bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); + params->phy[INT_PHY].config_loopback( + ¶ms->phy[INT_PHY], + params); + } + bnx2x_xmac_enable(params, vars, 1); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_umac_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_UMAC; + vars->phy_flags = PHY_XGXS_FLAG; + bnx2x_umac_enable(params, vars, 1); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); +} + +void bnx2x_init_xgxs_loopback(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; vars->link_up = 1; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->duplex = DUPLEX_FULL; - if (params->req_line_speed[0] == SPEED_1000) { + if (params->req_line_speed[0] == SPEED_1000) vars->line_speed = SPEED_1000; - vars->mac_type = MAC_TYPE_EMAC; - } else { + else vars->line_speed = SPEED_10000; - vars->mac_type = MAC_TYPE_BMAC; - } + if (!USES_WARPCORE(bp)) bnx2x_xgxs_deassert(params); - bnx2x_link_initialize(params, vars); + bnx2x_link_initialize(params, vars); - if (params->req_line_speed[0] == SPEED_1000) { + if (params->req_line_speed[0] == SPEED_1000) { + if (USES_WARPCORE(bp)) + bnx2x_umac_enable(params, vars, 0); + else { bnx2x_emac_program(params, vars); bnx2x_emac_enable(params, vars, 0); - } else + } + } else { + if (USES_WARPCORE(bp)) + bnx2x_xmac_enable(params, vars, 0); + else bnx2x_bmac_enable(params, vars, 0); + } + if (params->loopback_mode == LOOPBACK_XGXS) { /* set 10G XGXS loopback */ params->phy[INT_PHY].config_loopback( @@ -7718,24 +11575,76 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) } REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - bnx2x_set_led(params, vars, - LED_MODE_OPER, vars->line_speed); - } else - /* No loopback */ - { - if (params->switch_cfg == SWITCH_CFG_10G) - bnx2x_xgxs_deassert(params); - else - bnx2x_serdes_deassert(bp, params->port); + bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); +} +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + DP(NETIF_MSG_LINK, "Phy Initialization started\n"); + DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[0], params->req_flow_ctrl[0]); + DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", + params->req_line_speed[1], params->req_flow_ctrl[1]); + vars->link_status = 0; + vars->phy_link_up = 0; + vars->link_up = 0; + vars->line_speed = 0; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_NONE; + vars->phy_flags = 0; + + /* disable attentions */ + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + bnx2x_emac_init(params, vars); + + if (params->num_phys == 0) { + DP(NETIF_MSG_LINK, "No phy found for initialization !!\n"); + return -EINVAL; + } + set_phy_vars(params, vars); + + DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); + switch (params->loopback_mode) { + case LOOPBACK_BMAC: + bnx2x_init_bmac_loopback(params, vars); + break; + case LOOPBACK_EMAC: + bnx2x_init_emac_loopback(params, vars); + break; + case LOOPBACK_XMAC: + bnx2x_init_xmac_loopback(params, vars); + break; + case LOOPBACK_UMAC: + bnx2x_init_umac_loopback(params, vars); + break; + case LOOPBACK_XGXS: + case LOOPBACK_EXT_PHY: + bnx2x_init_xgxs_loopback(params, vars); + break; + default: + if (!CHIP_IS_E3(bp)) { + if (params->switch_cfg == SWITCH_CFG_10G) + bnx2x_xgxs_deassert(params); + else + bnx2x_serdes_deassert(bp, params->port); + } bnx2x_link_initialize(params, vars); msleep(30); bnx2x_link_int_enable(params); + break; } return 0; } -u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy) + +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy) { struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; @@ -7753,14 +11662,19 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); /* disable nig egress interface */ - REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); + if (!CHIP_IS_E3(bp)) { + REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); + } /* Stop BigMac rx */ - bnx2x_bmac_rx_disable(bp, port); - + if (!CHIP_IS_E3(bp)) + bnx2x_bmac_rx_disable(bp, port); + else + bnx2x_xmac_disable(params); /* disable emac */ - REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); msleep(10); /* The PHY reset is controlled by GPIO 1 @@ -7796,21 +11710,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); /* disable nig ingress interface */ - REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); - REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); - REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); - REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); + if (!CHIP_IS_E3(bp)) { + REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); + REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); + } vars->link_up = 0; + vars->phy_flags = 0; return 0; } /****************************************************************************/ /* Common function */ /****************************************************************************/ -static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 chip_id) +static int bnx2x_8073_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) { struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; @@ -7826,14 +11741,14 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; /* In E2, same phy is using for port0 of the two paths */ - if (CHIP_IS_E2(bp)) { - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - port_of_path = 0; - } else { + if (CHIP_IS_E1x(bp)) { shmem_base = shmem_base_path[0]; shmem2_base = shmem2_base_path[0]; port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; } /* Extract the ext phy address for the port */ @@ -7877,10 +11792,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* PART2 - Download firmware to both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E2(bp)) - port_of_path = 0; - else + if (CHIP_IS_E1x(bp)) port_of_path = port; + else + port_of_path = 0; DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", phy_blk[port]->addr); @@ -7933,10 +11848,10 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, } return 0; } -static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 chip_id) +static int bnx2x_8726_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) { u32 val; s8 port; @@ -7954,12 +11869,12 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base, shmem2_base; /* In E2, same phy is using for port0 of the two paths */ - if (CHIP_IS_E2(bp)) { - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - } else { + if (CHIP_IS_E1x(bp)) { shmem_base = shmem_base_path[0]; shmem2_base = shmem2_base_path[0]; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; } /* Extract the ext phy address for the port */ if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base, @@ -8027,10 +11942,11 @@ static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, break; } } -static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, - u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 chip_id) + +static int bnx2x_8727_common_init_phy(struct bnx2x *bp, + u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 chip_id) { s8 port, reset_gpio; u32 swap_val, swap_override; @@ -8067,14 +11983,14 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base, shmem2_base; /* In E2, same phy is using for port0 of the two paths */ - if (CHIP_IS_E2(bp)) { - shmem_base = shmem_base_path[port]; - shmem2_base = shmem2_base_path[port]; - port_of_path = 0; - } else { + if (CHIP_IS_E1x(bp)) { shmem_base = shmem_base_path[0]; shmem2_base = shmem2_base_path[0]; port_of_path = port; + } else { + shmem_base = shmem_base_path[port]; + shmem2_base = shmem2_base_path[port]; + port_of_path = 0; } /* Extract the ext phy address for the port */ @@ -8109,25 +12025,29 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, } /* PART2 - Download firmware to both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E2(bp)) - port_of_path = 0; - else + if (CHIP_IS_E1x(bp)) port_of_path = port; + else + port_of_path = 0; DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", phy_blk[port]->addr); if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], port_of_path)) return -EINVAL; + /* Disable PHY transmitter output */ + bnx2x_cl45_write(bp, phy_blk[port], + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_DISABLE, 1); } return 0; } -static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u8 phy_index, - u32 ext_phy_type, u32 chip_id) +static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u8 phy_index, + u32 ext_phy_type, u32 chip_id) { - u8 rc = 0; + int rc = 0; switch (ext_phy_type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: @@ -8135,7 +12055,7 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], shmem2_base_path, phy_index, chip_id); break; - + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC: rc = bnx2x_8727_common_init_phy(bp, shmem_base_path, @@ -8152,6 +12072,13 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], shmem2_base_path, phy_index, chip_id); break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + /* + * GPIO3's are linked, and so both need to be toggled + * to obtain required 2us pulse. + */ + rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id); + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: rc = -EINVAL; break; @@ -8169,15 +12096,21 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], return rc; } -u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u32 chip_id) +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id) { - u8 rc = 0; - u32 phy_ver; - u8 phy_index; + int rc = 0; + u32 phy_ver, val; + u8 phy_index = 0; u32 ext_phy_type, ext_phy_config; + bnx2x_set_mdio_clk(bp, chip_id, PORT_0); + bnx2x_set_mdio_clk(bp, chip_id, PORT_1); DP(NETIF_MSG_LINK, "Begin common phy init\n"); - + if (CHIP_IS_E3(bp)) { + /* Enable EPIO */ + val = REG_RD(bp, MISC_REG_GEN_PURP_HWG); + REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1); + } /* Check if common init was already done */ phy_ver = REG_RD(bp, shmem_base_path[0] + offsetof(struct shmem_region, @@ -8203,6 +12136,135 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], return rc; } +static void bnx2x_check_over_curr(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 cfg_pin; + u8 port = params->port; + u32 pin_val; + + cfg_pin = (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) & + PORT_HW_CFG_E3_OVER_CURRENT_MASK) >> + PORT_HW_CFG_E3_OVER_CURRENT_SHIFT; + + /* Ignore check if no external input PIN available */ + if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0) + return; + + if (!pin_val) { + if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) { + netdev_err(bp->dev, "Error: Power fault on Port %d has" + " been detected and the power to " + "that SFP+ module has been removed" + " to prevent failure of the card." + " Please remove the SFP+ module and" + " restart the system to clear this" + " error.\n", + params->port); + vars->phy_flags |= PHY_OVER_CURRENT_FLAG; + } + } else + vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; +} + +static void bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 lss_status) +{ + struct bnx2x *bp = params->bp; + /* Compare new value with previous value */ + u8 led_mode; + u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; + + /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n", + vars->link_up, + half_open_conn, lss_status);*/ + + if ((lss_status ^ half_open_conn) == 0) + return; + + /* If values differ */ + DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, + half_open_conn, lss_status); + + /* + * a. Update shmem->link_status accordingly + * b. Update link_vars->link_up + */ + if (lss_status) { + vars->link_status &= ~LINK_STATUS_LINK_UP; + vars->link_up = 0; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; + /* + * Set LED mode to off since the PHY doesn't know about these + * errors + */ + led_mode = LED_MODE_OFF; + } else { + vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_up = 1; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; + led_mode = LED_MODE_OPER; + } + /* Update the LED according to the link state */ + bnx2x_set_led(params, vars, led_mode, SPEED_10000); + + /* Update link status in the shared memory */ + bnx2x_update_mng(params, vars->link_status); + + /* C. Trigger General Attention */ + vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; + bnx2x_notify_link_changed(bp); +} + +static void bnx2x_check_half_open_conn(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u32 lss_status = 0; + u32 mac_base; + /* In case link status is physically up @ 10G do */ + if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) + return; + + if (!CHIP_IS_E3(bp) && + (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) { + /* Check E1X / E2 BMAC */ + u32 lss_status_reg; + u32 wb_data[2]; + mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM; + /* Read BIGMAC_REGISTER_RX_LSS_STATUS */ + if (CHIP_IS_E2(bp)) + lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT; + else + lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS; + + REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); + lss_status = (wb_data[0] > 0); + + bnx2x_analyze_link_error(params, vars, lss_status); + } +} + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + if (!params) { + DP(NETIF_MSG_LINK, "Ininitliazed params !\n"); + return; + } + /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x + RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed, + REG_RD(bp, MISC_REG_RESET_REG_2)); */ + bnx2x_check_half_open_conn(params, vars); + if (CHIP_IS_E3(bp)) + bnx2x_check_over_curr(params, vars); +} + u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) { u8 phy_index; @@ -8245,7 +12307,15 @@ u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, void bnx2x_hw_reset_phy(struct link_params *params) { u8 phy_index; - for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + struct bnx2x *bp = params->bp; + bnx2x_update_mng(params, 0); + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); + + for (phy_index = INT_PHY; phy_index < MAX_PHYS; phy_index++) { if (params->phy[phy_index].hw_reset) { params->phy[phy_index].hw_reset( @@ -8255,3 +12325,72 @@ void bnx2x_hw_reset_phy(struct link_params *params) } } } + +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port) +{ + u8 gpio_num = 0xff, gpio_port = 0xff, phy_index; + u32 val; + u32 offset, aeu_mask, swap_val, swap_override, sync_offset; + if (CHIP_IS_E3(bp)) { + if (bnx2x_get_mod_abs_int_cfg(bp, chip_id, + shmem_base, + port, + &gpio_num, + &gpio_port) != 0) + return; + } else { + struct bnx2x_phy phy; + for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; + phy_index++) { + if (bnx2x_populate_phy(bp, phy_index, shmem_base, + shmem2_base, port, &phy) + != 0) { + DP(NETIF_MSG_LINK, "populate phy failed\n"); + return; + } + if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) { + gpio_num = MISC_REGISTERS_GPIO_3; + gpio_port = port; + break; + } + } + } + + if (gpio_num == 0xff) + return; + + /* Set GPIO3 to trigger SFP+ module insertion/removal */ + bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port); + + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + gpio_port ^= (swap_val && swap_override); + + vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 << + (gpio_num + (gpio_port << 2)); + + sync_offset = shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].aeu_int_mask); + REG_WR(bp, sync_offset, vars->aeu_int_mask); + + DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n", + gpio_num, gpio_port, vars->aeu_int_mask); + + if (port == 0) + offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; + else + offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0; + + /* Open appropriate AEU for interrupts */ + aeu_mask = REG_RD(bp, offset); + aeu_mask |= vars->aeu_int_mask; + REG_WR(bp, offset, aeu_mask); + + /* Enable the GPIO to trigger interrupt */ + val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN); + val |= 1 << (gpio_num + (gpio_port << 2)); + REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); +} diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index 92f36b6950d..6a7708d5da3 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h @@ -33,12 +33,13 @@ #define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH #define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE +#define NET_SERDES_IF_XFI 1 +#define NET_SERDES_IF_SFI 2 +#define NET_SERDES_IF_KR 3 +#define NET_SERDES_IF_DXGXS 4 + #define SPEED_AUTO_NEG 0 -#define SPEED_12000 12000 -#define SPEED_12500 12500 -#define SPEED_13000 13000 -#define SPEED_15000 15000 -#define SPEED_16000 16000 +#define SPEED_20000 20000 #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 #define SFP_EEPROM_VENDOR_NAME_SIZE 16 @@ -46,6 +47,12 @@ #define SFP_EEPROM_VENDOR_OUI_SIZE 3 #define SFP_EEPROM_PART_NO_ADDR 0x28 #define SFP_EEPROM_PART_NO_SIZE 16 +#define SFP_EEPROM_REVISION_ADDR 0x38 +#define SFP_EEPROM_REVISION_SIZE 4 +#define SFP_EEPROM_SERIAL_ADDR 0x44 +#define SFP_EEPROM_SERIAL_SIZE 16 +#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ +#define SFP_EEPROM_DATE_SIZE 6 #define PWR_FLT_ERR_MSG_LEN 250 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ @@ -62,25 +69,26 @@ #define SINGLE_MEDIA(params) (params->num_phys == 2) /* Dual Media board contains two external phy with different media */ #define DUAL_MEDIA(params) (params->num_phys == 3) + +#define FW_PARAM_PHY_ADDR_MASK 0x000000FF +#define FW_PARAM_PHY_TYPE_MASK 0x0000FF00 +#define FW_PARAM_MDIO_CTRL_MASK 0xFFFF0000 #define FW_PARAM_MDIO_CTRL_OFFSET 16 +#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \ + FW_PARAM_PHY_ADDR_MASK) +#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \ + FW_PARAM_PHY_TYPE_MASK) +#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \ + FW_PARAM_MDIO_CTRL_MASK) >> \ + FW_PARAM_MDIO_CTRL_OFFSET) #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) -#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170 -#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0 - -#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250 -#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0 - -#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10 -#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90 - -#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50 -#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250 #define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170 #define PFC_BRB_FULL_LB_XON_THRESHOLD 250 +#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b)) /***********************************************************/ /* Structs */ /***********************************************************/ @@ -121,8 +129,8 @@ struct bnx2x_phy { /* Loaded during init */ u8 addr; - - u8 flags; + u8 def_md_devad; + u16 flags; /* Require HW lock */ #define FLAGS_HW_LOCK_REQUIRED (1<<0) /* No Over-Current detection */ @@ -131,11 +139,13 @@ struct bnx2x_phy { #define FLAGS_FAN_FAILURE_DET_REQ (1<<2) /* Initialize first the XGXS and only then the phy itself */ #define FLAGS_INIT_XGXS_FIRST (1<<3) +#define FLAGS_WC_DUAL_MODE (1<<4) +#define FLAGS_4_PORT_MODE (1<<5) #define FLAGS_REARM_LATCH_SIGNAL (1<<6) #define FLAGS_SFP_NOT_APPROVED (1<<7) +#define FLAGS_MDC_MDIO_WA (1<<8) +#define FLAGS_DUMMY_READ (1<<9) - u8 def_md_devad; - u8 reserved; /* preemphasis values for the rx side */ u16 rx_preemphasis[4]; @@ -153,6 +163,8 @@ struct bnx2x_phy { #define ETH_PHY_XFP_FIBER 0x2 #define ETH_PHY_DA_TWINAX 0x3 #define ETH_PHY_BASE_T 0x4 +#define ETH_PHY_KR 0xf0 +#define ETH_PHY_CX4 0xf1 #define ETH_PHY_NOT_PRESENT 0xff /* The address in which version is located*/ @@ -238,6 +250,8 @@ struct link_params { #define FEATURE_CONFIG_PFC_ENABLED (1<<1) #define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) +#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) +#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) /* Will be populated during common init */ struct bnx2x_phy phy[MAX_PHYS]; @@ -257,11 +271,19 @@ struct link_params { /* Output parameters */ struct link_vars { u8 phy_flags; +#define PHY_XGXS_FLAG (1<<0) +#define PHY_SGMII_FLAG (1<<1) +#define PHY_PHYSICAL_LINK_FLAG (1<<2) +#define PHY_HALF_OPEN_CONN_FLAG (1<<3) +#define PHY_OVER_CURRENT_FLAG (1<<4) +#define PHY_TX_ERROR_CHECK_FLAG (1<<5) u8 mac_type; #define MAC_TYPE_NONE 0 #define MAC_TYPE_EMAC 1 #define MAC_TYPE_BMAC 2 +#define MAC_TYPE_UMAC 3 +#define MAC_TYPE_XMAC 4 u8 phy_link_up; /* internal phy link indication */ u8 link_up; @@ -274,45 +296,52 @@ struct link_vars { /* The same definitions as the shmem parameter */ u32 link_status; + u8 fault_detected; + u8 rsrv1; + u16 periodic_flags; +#define PERIODIC_FLAGS_LINK_EVENT 0x0001 + + u32 aeu_int_mask; }; /***********************************************************/ /* Functions */ /***********************************************************/ -u8 bnx2x_phy_init(struct link_params *input, struct link_vars *output); +int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); /* Reset the link. Should be called when driver or interface goes down Before calling phy firmware upgrade, the reset_ext_phy should be set to 0 */ -u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy); +int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, + u8 reset_ext_phy); /* bnx2x_link_update should be called upon link interrupt */ -u8 bnx2x_link_update(struct link_params *input, struct link_vars *output); +int bnx2x_link_update(struct link_params *params, struct link_vars *vars); /* use the following phy functions to read/write from external_phy In order to use it to read/write internal phy registers, use DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as the register */ -u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 *ret_val); +int bnx2x_phy_read(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 *ret_val); + +int bnx2x_phy_write(struct link_params *params, u8 phy_addr, + u8 devad, u16 reg, u16 val); -u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, - u8 devad, u16 reg, u16 val); /* Reads the link_status from the shmem, and update the link vars accordingly */ void bnx2x_link_status_update(struct link_params *input, struct link_vars *output); /* returns string representing the fw_version of the external phy */ -u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, - u8 *version, u16 len); +int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, + u8 *version, u16 len); /* Set/Unset the led Basically, the CLC takes care of the led for the link, but in case one needs to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to blink the led, and LED_MODE_OFF to set the led off.*/ -u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars, - u8 mode, u32 speed); +int bnx2x_set_led(struct link_params *params, + struct link_vars *vars, u8 mode, u32 speed); #define LED_MODE_OFF 0 #define LED_MODE_ON 1 #define LED_MODE_OPER 2 @@ -324,12 +353,12 @@ void bnx2x_handle_module_detect_int(struct link_params *params); /* Get the actual link status. In case it returns 0, link is up, otherwise link is down*/ -u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars, - u8 is_serdes); +int bnx2x_test_link(struct link_params *params, struct link_vars *vars, + u8 is_serdes); /* One-time initialization for external phy after power up */ -u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], - u32 shmem2_base_path[], u32 chip_id); +int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], + u32 shmem2_base_path[], u32 chip_id); /* Reset the external PHY using GPIO */ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); @@ -338,9 +367,9 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); /* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ -u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf); +int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf); void bnx2x_hw_reset_phy(struct link_params *params); @@ -352,11 +381,28 @@ u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 bnx2x_phy_selection(struct link_params *params); /* Probe the phys on board, and populate them in "params" */ -u8 bnx2x_phy_probe(struct link_params *params); +int bnx2x_phy_probe(struct link_params *params); + /* Checks if fan failure detection is required on one of the phys on board */ u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base, u8 port); + + +/* DCBX structs */ + +/* Number of maximum COS per chip */ +#define DCBX_E2E3_MAX_NUM_COS (2) +#define DCBX_E3B0_MAX_NUM_COS_PORT0 (6) +#define DCBX_E3B0_MAX_NUM_COS_PORT1 (3) +#define DCBX_E3B0_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \ + DCBX_E3B0_MAX_NUM_COS_PORT1)) + +#define DCBX_MAX_NUM_COS ( \ + MAXVAL(DCBX_E3B0_MAX_NUM_COS, \ + DCBX_E2E3_MAX_NUM_COS)) + /* PFC port configuration params */ struct bnx2x_nig_brb_pfc_port_params { /* NIG */ @@ -364,8 +410,8 @@ struct bnx2x_nig_brb_pfc_port_params { u32 llfc_out_en; u32 llfc_enable; u32 pkt_priority_to_cos; - u32 rx_cos0_priority_mask; - u32 rx_cos1_priority_mask; + u8 num_of_rx_cos_priority_mask; + u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; u32 llfc_high_priority_classes; u32 llfc_low_priority_classes; /* BRB */ @@ -373,27 +419,74 @@ struct bnx2x_nig_brb_pfc_port_params { u32 cos1_pauseable; }; + +/* ETS port configuration params */ +struct bnx2x_ets_bw_params { + u8 bw; +}; + +struct bnx2x_ets_sp_params { + /** + * valid values are 0 - 5. 0 is highest strict priority. + * There can't be two COS's with the same pri. + */ + u8 pri; +}; + +enum bnx2x_cos_state { + bnx2x_cos_state_strict = 0, + bnx2x_cos_state_bw = 1, +}; + +struct bnx2x_ets_cos_params { + enum bnx2x_cos_state state ; + union { + struct bnx2x_ets_bw_params bw_params; + struct bnx2x_ets_sp_params sp_params; + } params; +}; + +struct bnx2x_ets_params { + u8 num_of_cos; /* Number of valid COS entries*/ + struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; +}; + /** * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB * when link is already up */ -void bnx2x_update_pfc(struct link_params *params, +int bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params); /* Used to configure the ETS to disable */ -void bnx2x_ets_disabled(struct link_params *params); +int bnx2x_ets_disabled(struct link_params *params, + struct link_vars *vars); /* Used to configure the ETS to BW limited */ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, const u32 cos1_bw); /* Used to configure the ETS to strict */ -u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); +int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); + +/* Configure the COS to ETS according to BW and SP settings.*/ +int bnx2x_ets_e3b0_config(const struct link_params *params, + const struct link_vars *vars, + const struct bnx2x_ets_params *ets_params); /* Read pfc statistic*/ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, u32 pfc_frames_sent[2], u32 pfc_frames_received[2]); +void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars, + u32 chip_id, u32 shmem_base, u32 shmem2_base, + u8 port); + +int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, + struct link_params *params); + +void bnx2x_period_func(struct link_params *params, struct link_vars *vars); + #endif /* BNX2X_LINK_H */ diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 4b70311a11e..15070911154 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -39,6 +39,7 @@ #include <linux/mii.h> #include <linux/if_vlan.h> #include <net/ip.h> +#include <net/ipv6.h> #include <net/tcp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> @@ -49,13 +50,14 @@ #include <linux/zlib.h> #include <linux/io.h> #include <linux/stringify.h> +#include <linux/vmalloc.h> -#define BNX2X_MAIN #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" #include "bnx2x_cmn.h" #include "bnx2x_dcb.h" +#include "bnx2x_sp.h" #include <linux/firmware.h> #include "bnx2x_fw_file_hdr.h" @@ -73,12 +75,14 @@ #define TX_TIMEOUT (5*HZ) static char version[] __devinitdata = - "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver " + "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Eliezer Tamir"); MODULE_DESCRIPTION("Broadcom NetXtreme II " - "BCM57710/57711/57711E/57712/57712E Driver"); + "BCM57710/57711/57711E/" + "57712/57712_MF/57800/57800_MF/57810/57810_MF/" + "57840/57840_MF Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_FILE_NAME_E1); @@ -99,9 +103,11 @@ static int disable_tpa; module_param(disable_tpa, int, 0); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); +#define INT_MODE_INTx 1 +#define INT_MODE_MSI 2 static int int_mode; module_param(int_mode, int, 0); -MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X " +MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); static int dropless_fc; @@ -120,37 +126,87 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Default debug msglevel"); -static struct workqueue_struct *bnx2x_wq; -#ifdef BCM_CNIC -static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01}; -#endif + +struct workqueue_struct *bnx2x_wq; enum bnx2x_board_type { BCM57710 = 0, - BCM57711 = 1, - BCM57711E = 2, - BCM57712 = 3, - BCM57712E = 4 + BCM57711, + BCM57711E, + BCM57712, + BCM57712_MF, + BCM57800, + BCM57800_MF, + BCM57810, + BCM57810_MF, + BCM57840, + BCM57840_MF }; /* indexed by board_type, above */ static struct { char *name; } board_info[] __devinitdata = { - { "Broadcom NetXtreme II BCM57710 XGb" }, - { "Broadcom NetXtreme II BCM57711 XGb" }, - { "Broadcom NetXtreme II BCM57711E XGb" }, - { "Broadcom NetXtreme II BCM57712 XGb" }, - { "Broadcom NetXtreme II BCM57712E XGb" } + { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, + { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, + { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " + "Ethernet Multi Function"} }; +#ifndef PCI_DEVICE_ID_NX2_57710 +#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711 +#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711 +#endif +#ifndef PCI_DEVICE_ID_NX2_57711E +#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E +#endif +#ifndef PCI_DEVICE_ID_NX2_57712 +#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712 +#endif +#ifndef PCI_DEVICE_ID_NX2_57712_MF +#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57800 +#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800 +#endif +#ifndef PCI_DEVICE_ID_NX2_57800_MF +#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57810 +#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810 +#endif +#ifndef PCI_DEVICE_ID_NX2_57810_MF +#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF +#endif +#ifndef PCI_DEVICE_ID_NX2_57840 +#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840 +#endif +#ifndef PCI_DEVICE_ID_NX2_57840_MF +#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF +#endif static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 }, - { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, + { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, { 0 } }; @@ -167,48 +223,6 @@ static inline void __storm_memset_dma_mapping(struct bnx2x *bp, REG_WR(bp, addr + 4, U64_HI(mapping)); } -static inline void __storm_memset_fill(struct bnx2x *bp, - u32 addr, size_t size, u32 val) -{ - int i; - for (i = 0; i < size/4; i++) - REG_WR(bp, addr + (i * 4), val); -} - -static inline void storm_memset_ustats_zero(struct bnx2x *bp, - u8 port, u16 stat_id) -{ - size_t size = sizeof(struct ustorm_per_client_stats); - - u32 addr = BAR_USTRORM_INTMEM + - USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); - - __storm_memset_fill(bp, addr, size, 0); -} - -static inline void storm_memset_tstats_zero(struct bnx2x *bp, - u8 port, u16 stat_id) -{ - size_t size = sizeof(struct tstorm_per_client_stats); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); - - __storm_memset_fill(bp, addr, size, 0); -} - -static inline void storm_memset_xstats_zero(struct bnx2x *bp, - u8 port, u16 stat_id) -{ - size_t size = sizeof(struct xstorm_per_client_stats); - - u32 addr = BAR_XSTRORM_INTMEM + - XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id); - - __storm_memset_fill(bp, addr, size, 0); -} - - static inline void storm_memset_spq_addr(struct bnx2x *bp, dma_addr_t mapping, u16 abs_fid) { @@ -218,103 +232,6 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp, __storm_memset_dma_mapping(bp, addr, mapping); } -static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid) -{ - REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov); -} - -static inline void storm_memset_func_cfg(struct bnx2x *bp, - struct tstorm_eth_function_common_config *tcfg, - u16 abs_fid) -{ - size_t size = sizeof(struct tstorm_eth_function_common_config); - - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)tcfg); -} - -static inline void storm_memset_xstats_flags(struct bnx2x *bp, - struct stats_indication_flags *flags, - u16 abs_fid) -{ - size_t size = sizeof(struct stats_indication_flags); - - u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)flags); -} - -static inline void storm_memset_tstats_flags(struct bnx2x *bp, - struct stats_indication_flags *flags, - u16 abs_fid) -{ - size_t size = sizeof(struct stats_indication_flags); - - u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)flags); -} - -static inline void storm_memset_ustats_flags(struct bnx2x *bp, - struct stats_indication_flags *flags, - u16 abs_fid) -{ - size_t size = sizeof(struct stats_indication_flags); - - u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)flags); -} - -static inline void storm_memset_cstats_flags(struct bnx2x *bp, - struct stats_indication_flags *flags, - u16 abs_fid) -{ - size_t size = sizeof(struct stats_indication_flags); - - u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid); - - __storm_memset_struct(bp, addr, size, (u32 *)flags); -} - -static inline void storm_memset_xstats_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) -{ - u32 addr = BAR_XSTRORM_INTMEM + - XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); - - __storm_memset_dma_mapping(bp, addr, mapping); -} - -static inline void storm_memset_tstats_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) -{ - u32 addr = BAR_TSTRORM_INTMEM + - TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); - - __storm_memset_dma_mapping(bp, addr, mapping); -} - -static inline void storm_memset_ustats_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) -{ - u32 addr = BAR_USTRORM_INTMEM + - USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); - - __storm_memset_dma_mapping(bp, addr, mapping); -} - -static inline void storm_memset_cstats_addr(struct bnx2x *bp, - dma_addr_t mapping, u16 abs_fid) -{ - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid); - - __storm_memset_dma_mapping(bp, addr, mapping); -} - static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, u16 pf_id) { @@ -359,45 +276,6 @@ static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, REG_WR16(bp, addr, eq_prod); } -static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, - u16 fw_sb_id, u8 sb_index, - u8 ticks) -{ - - int index_offset = CHIP_IS_E2(bp) ? - offsetof(struct hc_status_block_data_e2, index_data) : - offsetof(struct hc_status_block_data_e1x, index_data); - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + - index_offset + - sizeof(struct hc_index_data)*sb_index + - offsetof(struct hc_index_data, timeout); - REG_WR8(bp, addr, ticks); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n", - port, fw_sb_id, sb_index, ticks); -} -static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, - u16 fw_sb_id, u8 sb_index, - u8 disable) -{ - u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); - int index_offset = CHIP_IS_E2(bp) ? - offsetof(struct hc_status_block_data_e2, index_data) : - offsetof(struct hc_status_block_data_e1x, index_data); - u32 addr = BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + - index_offset + - sizeof(struct hc_index_data)*sb_index + - offsetof(struct hc_index_data, flags); - u16 flags = REG_RD16(bp, addr); - /* clear and set */ - flags &= ~HC_INDEX_DATA_HC_ENABLED; - flags |= enable_flag; - REG_WR16(bp, addr, flags); - DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n", - port, fw_sb_id, sb_index, disable); -} - /* used only at init * locking is done by mcp */ @@ -491,13 +369,6 @@ static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, } -const u32 dmae_reg_go_c[] = { - DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, - DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, - DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, - DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 -}; - /* copy command into DMAE command memory and set DMAE command go */ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) { @@ -578,7 +449,11 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); - /* lock the dmae channel */ + /* + * Lock the dmae channel. Disable BHs to prevent a dead-lock + * as long as this code is called both from syscall context and + * from ndo_set_rx_mode() flow that may be called from BH. + */ spin_lock_bh(&bp->dmae_lock); /* reset completion */ @@ -833,9 +708,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp) return rc; } -static void bnx2x_fw_dump(struct bnx2x *bp) +void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) { - u32 addr; + u32 addr, val; u32 mark, offset; __be32 data[9]; int word; @@ -844,6 +719,14 @@ static void bnx2x_fw_dump(struct bnx2x *bp) BNX2X_ERR("NO MCP - can not dump\n"); return; } + netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n", + (bp->common.bc_ver & 0xff0000) >> 16, + (bp->common.bc_ver & 0xff00) >> 8, + (bp->common.bc_ver & 0xff)); + + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); + if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) + printk("%s" "MCP PC at 0x%x\n", lvl, val); if (BP_PATH(bp) == 0) trace_shmem_base = bp->common.shmem_base; @@ -853,9 +736,9 @@ static void bnx2x_fw_dump(struct bnx2x *bp) mark = REG_RD(bp, addr); mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH) + ((mark + 0x3) & ~0x3) - 0x08000000; - pr_err("begin fw dump (mark 0x%x)\n", mark); + printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark); - pr_err(""); + printk("%s", lvl); for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) { for (word = 0; word < 8; word++) data[word] = htonl(REG_RD(bp, offset + 4*word)); @@ -868,7 +751,12 @@ static void bnx2x_fw_dump(struct bnx2x *bp) data[8] = 0x0; pr_cont("%s", (char *)data); } - pr_err("end of fw dump\n"); + printk("%s" "end of fw dump\n", lvl); +} + +static inline void bnx2x_fw_dump(struct bnx2x *bp) +{ + bnx2x_fw_dump_lvl(bp, KERN_ERR); } void bnx2x_panic_dump(struct bnx2x *bp) @@ -879,6 +767,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) int func = BP_FUNC(bp); #ifdef BNX2X_STOP_ON_ERROR u16 start = 0, end = 0; + u8 cos; #endif bp->stats_state = STATS_STATE_DISABLED; @@ -889,9 +778,9 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* Indices */ /* Common */ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)" - " spq_prod_idx(0x%x)\n", - bp->def_idx, bp->def_att_idx, - bp->attn_state, bp->spq_prod_idx); + " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", + bp->def_idx, bp->def_att_idx, bp->attn_state, + bp->spq_prod_idx, bp->stats_counter); BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", bp->def_status_blk->atten_status_block.attn_bits, bp->def_status_blk->atten_status_block.attn_bits_ack, @@ -908,15 +797,17 @@ void bnx2x_panic_dump(struct bnx2x *bp) CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + i*sizeof(u32)); - pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) " + pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) " "pf_id(0x%x) vnic_id(0x%x) " - "vf_id(0x%x) vf_valid (0x%x)\n", + "vf_id(0x%x) vf_valid (0x%x) " + "state(0x%x)\n", sp_sb_data.igu_sb_id, sp_sb_data.igu_seg_id, sp_sb_data.p_func.pf_id, sp_sb_data.p_func.vnic_id, sp_sb_data.p_func.vf_id, - sp_sb_data.p_func.vf_valid); + sp_sb_data.p_func.vf_valid, + sp_sb_data.state); for_each_eth_queue(bp, i) { @@ -925,15 +816,16 @@ void bnx2x_panic_dump(struct bnx2x *bp) struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p = - CHIP_IS_E2(bp) ? - sb_data_e2.common.state_machine : - sb_data_e1x.common.state_machine; + CHIP_IS_E1x(bp) ? + sb_data_e1x.common.state_machine : + sb_data_e2.common.state_machine; struct hc_index_data *hc_index_p = - CHIP_IS_E2(bp) ? - sb_data_e2.index_data : - sb_data_e1x.index_data; - int data_size; + CHIP_IS_E1x(bp) ? + sb_data_e1x.index_data : + sb_data_e2.index_data; + u8 data_size, cos; u32 *sb_data_p; + struct bnx2x_fp_txdata txdata; /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)" @@ -948,14 +840,20 @@ void bnx2x_panic_dump(struct bnx2x *bp) le16_to_cpu(fp->fp_hc_idx)); /* Tx */ - BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" - " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" - " *tx_cons_sb(0x%x)\n", - i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, - fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); + for_each_cos_in_tx_queue(fp, cos) + { + txdata = fp->txdata[cos]; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)" + " tx_bd_prod(0x%x) tx_bd_cons(0x%x)" + " *tx_cons_sb(0x%x)\n", + i, txdata.tx_pkt_prod, + txdata.tx_pkt_cons, txdata.tx_bd_prod, + txdata.tx_bd_cons, + le16_to_cpu(*txdata.tx_cons_sb)); + } - loop = CHIP_IS_E2(bp) ? - HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X; + loop = CHIP_IS_E1x(bp) ? + HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2; /* host sb data */ @@ -975,35 +873,39 @@ void bnx2x_panic_dump(struct bnx2x *bp) fp->sb_index_values[j], (j == loop - 1) ? ")" : " "); /* fw sb data */ - data_size = CHIP_IS_E2(bp) ? - sizeof(struct hc_status_block_data_e2) : - sizeof(struct hc_status_block_data_e1x); + data_size = CHIP_IS_E1x(bp) ? + sizeof(struct hc_status_block_data_e1x) : + sizeof(struct hc_status_block_data_e2); data_size /= sizeof(u32); - sb_data_p = CHIP_IS_E2(bp) ? - (u32 *)&sb_data_e2 : - (u32 *)&sb_data_e1x; + sb_data_p = CHIP_IS_E1x(bp) ? + (u32 *)&sb_data_e1x : + (u32 *)&sb_data_e2; /* copy sb data in here */ for (j = 0; j < data_size; j++) *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) + j * sizeof(u32)); - if (CHIP_IS_E2(bp)) { - pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", + if (!CHIP_IS_E1x(bp)) { + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", sb_data_e2.common.p_func.pf_id, sb_data_e2.common.p_func.vf_id, sb_data_e2.common.p_func.vf_valid, sb_data_e2.common.p_func.vnic_id, - sb_data_e2.common.same_igu_sb_1b); + sb_data_e2.common.same_igu_sb_1b, + sb_data_e2.common.state); } else { - pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) " - "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n", + pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) " + "vnic_id(0x%x) same_igu_sb_1b(0x%x) " + "state(0x%x)\n", sb_data_e1x.common.p_func.pf_id, sb_data_e1x.common.p_func.vf_id, sb_data_e1x.common.p_func.vf_valid, sb_data_e1x.common.p_func.vnic_id, - sb_data_e1x.common.same_igu_sb_1b); + sb_data_e1x.common.same_igu_sb_1b, + sb_data_e1x.common.state); } /* SB_SMs data */ @@ -1067,23 +969,31 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* Tx */ for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + for_each_cos_in_tx_queue(fp, cos) { + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; + + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); + end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); + for (j = start; j != end; j = TX_BD(j + 1)) { + struct sw_tx_bd *sw_bd = + &txdata->tx_buf_ring[j]; + + BNX2X_ERR("fp%d: txdata %d, " + "packet[%x]=[%p,%x]\n", + i, cos, j, sw_bd->skb, + sw_bd->first_bd); + } - start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); - end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); - for (j = start; j != end; j = TX_BD(j + 1)) { - struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j]; - - BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n", - i, j, sw_bd->skb, sw_bd->first_bd); - } - - start = TX_BD(fp->tx_bd_cons - 10); - end = TX_BD(fp->tx_bd_cons + 254); - for (j = start; j != end; j = TX_BD(j + 1)) { - u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j]; + start = TX_BD(txdata->tx_bd_cons - 10); + end = TX_BD(txdata->tx_bd_cons + 254); + for (j = start; j != end; j = TX_BD(j + 1)) { + u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j]; - BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n", - i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]); + BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=" + "[%x:%x:%x:%x]\n", + i, cos, j, tx_bd[0], tx_bd[1], + tx_bd[2], tx_bd[3]); + } } } #endif @@ -1092,6 +1002,373 @@ void bnx2x_panic_dump(struct bnx2x *bp) BNX2X_ERR("end crash dump -----------------\n"); } +/* + * FLR Support for E2 + * + * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW + * initialization. + */ +#define FLR_WAIT_USEC 10000 /* 10 miliseconds */ +#define FLR_WAIT_INTERAVAL 50 /* usec */ +#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */ + +struct pbf_pN_buf_regs { + int pN; + u32 init_crd; + u32 crd; + u32 crd_freed; +}; + +struct pbf_pN_cmd_regs { + int pN; + u32 lines_occup; + u32 lines_freed; +}; + +static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp, + struct pbf_pN_buf_regs *regs, + u32 poll_count) +{ + u32 init_crd, crd, crd_start, crd_freed, crd_freed_start; + u32 cur_cnt = poll_count; + + crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed); + crd = crd_start = REG_RD(bp, regs->crd); + init_crd = REG_RD(bp, regs->init_crd); + + DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); + DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); + + while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) < + (init_crd - crd_start))) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + crd = REG_RD(bp, regs->crd); + crd_freed = REG_RD(bp, regs->crd_freed); + } else { + DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n", + regs->pN, crd); + DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n", + regs->pN, crd_freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp, + struct pbf_pN_cmd_regs *regs, + u32 poll_count) +{ + u32 occup, to_free, freed, freed_start; + u32 cur_cnt = poll_count; + + occup = to_free = REG_RD(bp, regs->lines_occup); + freed = freed_start = REG_RD(bp, regs->lines_freed); + + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); + + while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) { + if (cur_cnt--) { + udelay(FLR_WAIT_INTERAVAL); + occup = REG_RD(bp, regs->lines_occup); + freed = REG_RD(bp, regs->lines_freed); + } else { + DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n", + regs->pN); + DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", + regs->pN, occup); + DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", + regs->pN, freed); + break; + } + } + DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n", + poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN); +} + +static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, + u32 expected, u32 poll_count) +{ + u32 cur_cnt = poll_count; + u32 val; + + while ((val = REG_RD(bp, reg)) != expected && cur_cnt--) + udelay(FLR_WAIT_INTERAVAL); + + return val; +} + +static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, + char *msg, u32 poll_cnt) +{ + u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); + if (val != 0) { + BNX2X_ERR("%s usage count=%d\n", msg, val); + return 1; + } + return 0; +} + +static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp) +{ + /* adjust polling timeout */ + if (CHIP_REV_IS_EMUL(bp)) + return FLR_POLL_CNT * 2000; + + if (CHIP_REV_IS_FPGA(bp)) + return FLR_POLL_CNT * 120; + + return FLR_POLL_CNT; +} + +static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count) +{ + struct pbf_pN_cmd_regs cmd_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q0 : + PBF_REG_P0_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q0 : + PBF_REG_P0_TQ_LINES_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_Q1 : + PBF_REG_P1_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_Q1 : + PBF_REG_P1_TQ_LINES_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_OCCUPANCY_LB_Q : + PBF_REG_P4_TQ_OCCUPANCY, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_TQ_LINES_FREED_CNT_LB_Q : + PBF_REG_P4_TQ_LINES_FREED_CNT} + }; + + struct pbf_pN_buf_regs buf_regs[] = { + {0, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q0 : + PBF_REG_P0_INIT_CRD , + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q0 : + PBF_REG_P0_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : + PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, + {1, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_Q1 : + PBF_REG_P1_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_Q1 : + PBF_REG_P1_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : + PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, + {4, (CHIP_IS_E3B0(bp)) ? + PBF_REG_INIT_CRD_LB_Q : + PBF_REG_P4_INIT_CRD, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_CREDIT_LB_Q : + PBF_REG_P4_CREDIT, + (CHIP_IS_E3B0(bp)) ? + PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : + PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, + }; + + int i; + + /* Verify the command queues are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) + bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count); + + + /* Verify the transmission buffers are flushed P0, P1, P4 */ + for (i = 0; i < ARRAY_SIZE(buf_regs); i++) + bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count); +} + +#define OP_GEN_PARAM(param) \ + (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) + +#define OP_GEN_TYPE(type) \ + (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) + +#define OP_GEN_AGG_VECT(index) \ + (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) + + +static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, + u32 poll_cnt) +{ + struct sdm_op_gen op_gen = {0}; + + u32 comp_addr = BAR_CSTRORM_INTMEM + + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func); + int ret = 0; + + if (REG_RD(bp, comp_addr)) { + BNX2X_ERR("Cleanup complete is not 0\n"); + return 1; + } + + op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); + op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); + op_gen.command |= OP_GEN_AGG_VECT(clnup_func); + op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; + + DP(BNX2X_MSG_SP, "FW Final cleanup\n"); + REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command); + + if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) { + BNX2X_ERR("FW final cleanup did not succeed\n"); + ret = 1; + } + /* Zero completion for nxt FLR */ + REG_WR(bp, comp_addr, 0); + + return ret; +} + +static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) +{ + int pos; + u16 status; + + pos = pci_pcie_cap(dev); + if (!pos) + return false; + + pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); + return status & PCI_EXP_DEVSTA_TRPND; +} + +/* PF FLR specific routines +*/ +static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt) +{ + + /* wait for CFC PF usage-counter to zero (includes all the VFs) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + CFC_REG_NUM_LCIDS_INSIDE_PF, + "CFC PF usage counter timed out", + poll_cnt)) + return 1; + + + /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + DORQ_REG_PF_USAGE_CNT, + "DQ PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp), + "QM PF usage counter timed out", + poll_cnt)) + return 1; + + /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp), + "Timers VNIC usage counter timed out", + poll_cnt)) + return 1; + if (bnx2x_flr_clnup_poll_hw_counter(bp, + TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp), + "Timers NUM_SCANS usage counter timed out", + poll_cnt)) + return 1; + + /* Wait DMAE PF usage counter to zero */ + if (bnx2x_flr_clnup_poll_hw_counter(bp, + dmae_reg_go_c[INIT_DMAE_C(bp)], + "DMAE dommand register timed out", + poll_cnt)) + return 1; + + return 0; +} + +static void bnx2x_hw_enable_status(struct bnx2x *bp) +{ + u32 val; + + val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF); + DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, PBF_REG_DISABLE_PF); + DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); + + val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK); + DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); + + val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", + val); +} + +static int bnx2x_pf_flr_clnup(struct bnx2x *bp) +{ + u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); + + DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp)); + + /* Re-enable PF target read access */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + + /* Poll HW usage counters */ + if (bnx2x_poll_hw_usage_counters(bp, poll_cnt)) + return -EBUSY; + + /* Zero the igu 'trailing edge' and 'leading edge' */ + + /* Send the FW cleanup command */ + if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt)) + return -EBUSY; + + /* ATC cleanup */ + + /* Verify TX hw is flushed */ + bnx2x_tx_hw_flushed(bp, poll_cnt); + + /* Wait 100ms (not adjusted according to platform) */ + msleep(100); + + /* Verify no pending pci transactions */ + if (bnx2x_is_pcie_pending(bp->pdev)) + BNX2X_ERR("PCIE Transactions still pending\n"); + + /* Debug */ + bnx2x_hw_enable_status(bp); + + /* + * Master enable - Due to WB DMAE writes performed before this + * register is re-initialized as part of the regular function init + */ + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); + + return 0; +} + static void bnx2x_hc_int_enable(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -1272,7 +1549,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp) BNX2X_ERR("BUG! proper val not read from IGU!\n"); } -static void bnx2x_int_disable(struct bnx2x *bp) +void bnx2x_int_disable(struct bnx2x *bp) { if (bp->common.int_block == INT_BLOCK_HC) bnx2x_hc_int_disable(bp); @@ -1285,10 +1562,6 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; int i, offset; - /* disable interrupt handling */ - atomic_inc(&bp->intr_sem); - smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - if (disable_hw) /* prevent the HW from sending interrupts */ bnx2x_int_disable(bp); @@ -1301,12 +1574,13 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw) offset++; #endif for_each_eth_queue(bp, i) - synchronize_irq(bp->msix_table[i + offset].vector); + synchronize_irq(bp->msix_table[offset++].vector); } else synchronize_irq(bp->pdev->irq); /* make sure sp_task is not running */ cancel_delayed_work(&bp->sp_task); + cancel_delayed_work(&bp->period_task); flush_workqueue(bnx2x_wq); } @@ -1350,59 +1624,129 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource) return false; } +/** + * bnx2x_get_leader_lock_resource - get the recovery leader resource id + * + * @bp: driver handle + * + * Returns the recovery leader resource id according to the engine this function + * belongs to. Currently only only 2 engines is supported. + */ +static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) +{ + if (BP_PATH(bp)) + return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; + else + return HW_LOCK_RESOURCE_RECOVERY_LEADER_0; +} + +/** + * bnx2x_trylock_leader_lock- try to aquire a leader lock. + * + * @bp: driver handle + * + * Tries to aquire a leader lock for cuurent engine. + */ +static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) +{ + return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + #ifdef BCM_CNIC -static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid); +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); #endif -void bnx2x_sp_event(struct bnx2x_fastpath *fp, - union eth_rx_cqe *rr_cqe) +void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) { struct bnx2x *bp = fp->bp; int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); + enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; + struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.ramrod_type); - switch (command | fp->state) { - case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING): - DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid); - fp->state = BNX2X_FP_STATE_OPEN; + switch (command) { + case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): + DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid); + drv_cmd = BNX2X_Q_CMD_UPDATE; break; - case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING): - DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid); - fp->state = BNX2X_FP_STATE_HALTED; + case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): + DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP; break; - case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING): - DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid); - fp->state = BNX2X_FP_STATE_TERMINATED; + case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): + DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; break; - default: - BNX2X_ERR("unexpected MC reply (%d) " - "fp[%d] state is %x\n", - command, fp->index, fp->state); + case (RAMROD_CMD_ID_ETH_HALT): + DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_HALT; break; + + case (RAMROD_CMD_ID_ETH_TERMINATE): + DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_TERMINATE; + break; + + case (RAMROD_CMD_ID_ETH_EMPTY): + DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid); + drv_cmd = BNX2X_Q_CMD_EMPTY; + break; + + default: + BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", + command, fp->index); + return; } + if ((drv_cmd != BNX2X_Q_CMD_MAX) && + q_obj->complete_cmd(bp, q_obj, drv_cmd)) + /* q_obj->complete_cmd() failure means that this was + * an unexpected completion. + * + * In this case we don't want to increase the bp->spq_left + * because apparently we haven't sent this command the first + * place. + */ +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#else + return; +#endif + smp_mb__before_atomic_inc(); atomic_inc(&bp->cq_spq_left); - /* push the change in fp->state and towards the memory */ - smp_wmb(); + /* push the change in bp->spq_left and towards the memory */ + smp_mb__after_atomic_inc(); + + DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); return; } +void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) +{ + u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; + + bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, + start); +} + irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { struct bnx2x *bp = netdev_priv(dev_instance); u16 status = bnx2x_ack_int(bp); u16 mask; int i; + u8 cos; /* Return here if interrupt is shared and it's not for us */ if (unlikely(status == 0)) { @@ -1411,12 +1755,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) } DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } - #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) return IRQ_HANDLED; @@ -1425,11 +1763,12 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); + mask = 0x2 << (fp->index + CNIC_PRESENT); if (status & mask) { - /* Handle Rx and Tx according to SB id */ + /* Handle Rx or Tx according to SB id */ prefetch(fp->rx_cons_sb); - prefetch(fp->tx_cons_sb); + for_each_cos_in_tx_queue(fp, cos) + prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; @@ -1441,11 +1780,13 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) if (status & (mask | 0x1)) { struct cnic_ops *c_ops = NULL; - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - c_ops->cnic_handler(bp->cnic_data, NULL); - rcu_read_unlock(); + if (likely(bp->state == BNX2X_STATE_OPEN)) { + rcu_read_lock(); + c_ops = rcu_dereference(bp->cnic_ops); + if (c_ops) + c_ops->cnic_handler(bp->cnic_data, NULL); + rcu_read_unlock(); + } status &= ~mask; } @@ -1466,9 +1807,6 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) return IRQ_HANDLED; } -/* end of fast path */ - - /* Link */ /* @@ -1520,6 +1858,11 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) return -EAGAIN; } +int bnx2x_release_leader_lock(struct bnx2x *bp) +{ + return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); +} + int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource) { u32 lock_status; @@ -1640,6 +1983,53 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) return 0; } +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode) +{ + u32 gpio_reg = 0; + int rc = 0; + + /* Any port swapping should be handled by caller. */ + + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + /* read GPIO and mask except the float bits */ + gpio_reg = REG_RD(bp, MISC_REG_GPIO); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); + gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); + + switch (mode) { + case MISC_REGISTERS_GPIO_OUTPUT_LOW: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins); + /* set CLR */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); + break; + + case MISC_REGISTERS_GPIO_OUTPUT_HIGH: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins); + /* set SET */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); + break; + + case MISC_REGISTERS_GPIO_INPUT_HI_Z: + DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins); + /* set FLOAT */ + gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); + break; + + default: + BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode); + rc = -EINVAL; + break; + } + + if (rc == 0) + REG_WR(bp, MISC_REG_GPIO, gpio_reg); + + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); + + return rc; +} + int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port) { /* The GPIO should be swapped if swap register is set and active */ @@ -1732,45 +2122,6 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) return 0; } -int bnx2x_get_link_cfg_idx(struct bnx2x *bp) -{ - u32 sel_phy_idx = 0; - if (bp->link_vars.link_up) { - sel_phy_idx = EXT_PHY1; - /* In case link is SERDES, check if the EXT_PHY2 is the one */ - if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) && - (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE)) - sel_phy_idx = EXT_PHY2; - } else { - - switch (bnx2x_phy_selection(&bp->link_params)) { - case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: - case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - sel_phy_idx = EXT_PHY1; - break; - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: - case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - sel_phy_idx = EXT_PHY2; - break; - } - } - /* - * The selected actived PHY is always after swapping (in case PHY - * swapping is enabled). So when swapping is enabled, we need to reverse - * the configuration - */ - - if (bp->link_params.multi_phy_config & - PORT_HW_CFG_PHY_SWAPPED_ENABLED) { - if (sel_phy_idx == EXT_PHY1) - sel_phy_idx = EXT_PHY2; - else if (sel_phy_idx == EXT_PHY2) - sel_phy_idx = EXT_PHY1; - } - return LINK_CONFIG_IDX(sel_phy_idx); -} - void bnx2x_calc_fc_adv(struct bnx2x *bp) { u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); @@ -1803,10 +2154,12 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) u8 rc; int cfx_idx = bnx2x_get_link_cfg_idx(bp); u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; - /* Initialize link parameters structure variables */ - /* It is recommended to turn off RX FC for jumbo frames - for better performance */ - if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000)) + /* + * Initialize link parameters structure variables + * It is recommended to turn off RX FC for jumbo frames + * for better performance + */ + if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000)) bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX; else bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; @@ -1814,8 +2167,18 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) bnx2x_acquire_phy_lock(bp); if (load_mode == LOAD_DIAG) { - bp->link_params.loopback_mode = LOOPBACK_XGXS; - bp->link_params.req_line_speed[cfx_idx] = SPEED_10000; + struct link_params *lp = &bp->link_params; + lp->loopback_mode = LOOPBACK_XGXS; + /* do PHY loopback at 10G speed, if possible */ + if (lp->req_line_speed[cfx_idx] < SPEED_10000) { + if (lp->speed_cap_mask[cfx_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + lp->req_line_speed[cfx_idx] = + SPEED_10000; + else + lp->req_line_speed[cfx_idx] = + SPEED_1000; + } } rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); @@ -1827,7 +2190,8 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); bnx2x_link_report(bp); - } + } else + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); bp->link_params.req_line_speed[cfx_idx] = req_line_speed; return rc; } @@ -1941,8 +2305,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) bp->vn_weight_sum += vn_min_rate; } - /* ... only if all min rates are zeros - disable fairness */ - if (all_zero) { + /* if ETS or all min rates are zeros - disable fairness */ + if (BNX2X_IS_ETS_ENABLED(bp)) { + bp->cmng.flags.cmng_enables &= + ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; + DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); + } else if (all_zero) { bp->cmng.flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; DP(NETIF_MSG_IFUP, "All MIN values are zeroes" @@ -2143,11 +2511,11 @@ static void bnx2x_link_attn(struct bnx2x *bp) pause_enabled); } - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { + if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { struct host_port_stats *pstats; pstats = bnx2x_sp(bp, port_stats); - /* reset old bmac stats */ + /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } @@ -2197,12 +2565,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp) bp->port.pmf = 1; DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf); + /* + * We need the mb() to ensure the ordering between the writing to + * bp->port.pmf here and reading it from the bnx2x_periodic_task(). + */ + smp_mb(); + + /* queue a periodic task */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 0); + + bnx2x_dcbx_pmf_update(bp); + /* enable nig attention */ val = (0xff0f | (1 << (BP_E1HVN(bp) + 4))); if (bp->common.int_block == INT_BLOCK_HC) { REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val); - } else if (CHIP_IS_E2(bp)) { + } else if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val); } @@ -2232,7 +2611,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); - DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); + DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n", + (command | seq), param); do { /* let the FW do it's magic ... */ @@ -2263,182 +2643,118 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp) { #ifdef BCM_CNIC - if (IS_FCOE_FP(fp) && IS_MF(bp)) + /* Statistics are not supported for CNIC Clients at the moment */ + if (IS_FCOE_FP(fp)) return false; #endif return true; } -/* must be called under rtnl_lock */ -static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) { - u32 mask = (1 << cl_id); - - /* initial seeting is BNX2X_ACCEPT_NONE */ - u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1; - u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; - u8 unmatched_unicast = 0; - - if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST) - unmatched_unicast = 1; + if (CHIP_IS_E1x(bp)) { + struct tstorm_eth_function_common_config tcfg = {0}; - if (filters & BNX2X_PROMISCUOUS_MODE) { - /* promiscious - accept all, drop none */ - drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; - accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; - if (IS_MF_SI(bp)) { - /* - * SI mode defines to accept in promiscuos mode - * only unmatched packets - */ - unmatched_unicast = 1; - accp_all_ucast = 0; - } - } - if (filters & BNX2X_ACCEPT_UNICAST) { - /* accept matched ucast */ - drop_all_ucast = 0; + storm_memset_func_cfg(bp, &tcfg, p->func_id); } - if (filters & BNX2X_ACCEPT_MULTICAST) - /* accept matched mcast */ - drop_all_mcast = 0; - if (filters & BNX2X_ACCEPT_ALL_UNICAST) { - /* accept all mcast */ - drop_all_ucast = 0; - accp_all_ucast = 1; - } - if (filters & BNX2X_ACCEPT_ALL_MULTICAST) { - /* accept all mcast */ - drop_all_mcast = 0; - accp_all_mcast = 1; - } - if (filters & BNX2X_ACCEPT_BROADCAST) { - /* accept (all) bcast */ - drop_all_bcast = 0; - accp_all_bcast = 1; - } - - bp->mac_filters.ucast_drop_all = drop_all_ucast ? - bp->mac_filters.ucast_drop_all | mask : - bp->mac_filters.ucast_drop_all & ~mask; - - bp->mac_filters.mcast_drop_all = drop_all_mcast ? - bp->mac_filters.mcast_drop_all | mask : - bp->mac_filters.mcast_drop_all & ~mask; - - bp->mac_filters.bcast_drop_all = drop_all_bcast ? - bp->mac_filters.bcast_drop_all | mask : - bp->mac_filters.bcast_drop_all & ~mask; - - bp->mac_filters.ucast_accept_all = accp_all_ucast ? - bp->mac_filters.ucast_accept_all | mask : - bp->mac_filters.ucast_accept_all & ~mask; - - bp->mac_filters.mcast_accept_all = accp_all_mcast ? - bp->mac_filters.mcast_accept_all | mask : - bp->mac_filters.mcast_accept_all & ~mask; - - bp->mac_filters.bcast_accept_all = accp_all_bcast ? - bp->mac_filters.bcast_accept_all | mask : - bp->mac_filters.bcast_accept_all & ~mask; + /* Enable the function in the FW */ + storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); + storm_memset_func_en(bp, p->func_id, 1); - bp->mac_filters.unmatched_unicast = unmatched_unicast ? - bp->mac_filters.unmatched_unicast | mask : - bp->mac_filters.unmatched_unicast & ~mask; + /* spq */ + if (p->func_flgs & FUNC_FLG_SPQ) { + storm_memset_spq_addr(bp, p->spq_map, p->func_id); + REG_WR(bp, XSEM_REG_FAST_MEMORY + + XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); + } } -static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) +/** + * bnx2x_get_tx_only_flags - Return common flags + * + * @bp device handle + * @fp queue handle + * @zero_stats TRUE if statistics zeroing is needed + * + * Return the flags that are common for the Tx-only and not normal connections. + */ +static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool zero_stats) { - struct tstorm_eth_function_common_config tcfg = {0}; - u16 rss_flgs; - - /* tpa */ - if (p->func_flgs & FUNC_FLG_TPA) - tcfg.config_flags |= - TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA; - - /* set rss flags */ - rss_flgs = (p->rss->mode << - TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT); - - if (p->rss->cap & RSS_IPV4_CAP) - rss_flgs |= RSS_IPV4_CAP_MASK; - if (p->rss->cap & RSS_IPV4_TCP_CAP) - rss_flgs |= RSS_IPV4_TCP_CAP_MASK; - if (p->rss->cap & RSS_IPV6_CAP) - rss_flgs |= RSS_IPV6_CAP_MASK; - if (p->rss->cap & RSS_IPV6_TCP_CAP) - rss_flgs |= RSS_IPV6_TCP_CAP_MASK; + unsigned long flags = 0; - tcfg.config_flags |= rss_flgs; - tcfg.rss_result_mask = p->rss->result_mask; + /* PF driver will always initialize the Queue to an ACTIVE state */ + __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); - storm_memset_func_cfg(bp, &tcfg, p->func_id); - - /* Enable the function in the FW */ - storm_memset_vf_to_pf(bp, p->func_id, p->pf_id); - storm_memset_func_en(bp, p->func_id, 1); + /* tx only connections collect statistics (on the same index as the + * parent connection). The statistics are zeroed when the parent + * connection is initialized. + */ + if (stat_counter_valid(bp, fp)) { + __set_bit(BNX2X_Q_FLG_STATS, &flags); + if (zero_stats) + __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); + } - /* statistics */ - if (p->func_flgs & FUNC_FLG_STATS) { - struct stats_indication_flags stats_flags = {0}; - stats_flags.collect_eth = 1; + return flags; +} - storm_memset_xstats_flags(bp, &stats_flags, p->func_id); - storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id); +static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + bool leading) +{ + unsigned long flags = 0; - storm_memset_tstats_flags(bp, &stats_flags, p->func_id); - storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id); + /* calculate other queue flags */ + if (IS_MF_SD(bp)) + __set_bit(BNX2X_Q_FLG_OV, &flags); - storm_memset_ustats_flags(bp, &stats_flags, p->func_id); - storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id); + if (IS_FCOE_FP(fp)) + __set_bit(BNX2X_Q_FLG_FCOE, &flags); - storm_memset_cstats_flags(bp, &stats_flags, p->func_id); - storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id); + if (!fp->disable_tpa) { + __set_bit(BNX2X_Q_FLG_TPA, &flags); + __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); } - /* spq */ - if (p->func_flgs & FUNC_FLG_SPQ) { - storm_memset_spq_addr(bp, p->spq_map, p->func_id); - REG_WR(bp, XSEM_REG_FAST_MEMORY + - XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod); + if (leading) { + __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags); + __set_bit(BNX2X_Q_FLG_MCAST, &flags); } -} -static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp, - struct bnx2x_fastpath *fp) -{ - u16 flags = 0; + /* Always set HW VLAN stripping */ + __set_bit(BNX2X_Q_FLG_VLAN, &flags); - /* calculate queue flags */ - flags |= QUEUE_FLG_CACHE_ALIGN; - flags |= QUEUE_FLG_HC; - flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0; - flags |= QUEUE_FLG_VLAN; - DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); + return flags | bnx2x_get_common_flags(bp, fp, true); +} - if (!fp->disable_tpa) - flags |= QUEUE_FLG_TPA; +static void bnx2x_pf_q_prep_general(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init, + u8 cos) +{ + gen_init->stat_id = bnx2x_stats_id(fp); + gen_init->spcl_id = fp->cl_id; - flags = stat_counter_valid(bp, fp) ? - (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS); + /* Always use mini-jumbo MTU for FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + gen_init->mtu = bp->dev->mtu; - return flags; + gen_init->cos = cos; } -static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, +static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct rxq_pause_params *pause, - struct bnx2x_rxq_init_params *rxq_init) + struct bnx2x_rxq_setup_params *rxq_init) { - u16 max_sge = 0; + u8 max_sge = 0; u16 sge_sz = 0; u16 tpa_agg_size = 0; - /* calculate queue flags */ - u16 flags = bnx2x_get_cl_flags(bp, fp); - if (!fp->disable_tpa) { pause->sge_th_hi = 250; pause->sge_th_lo = 150; @@ -2459,80 +2775,74 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, pause->bd_th_lo = 250; pause->rcq_th_hi = 350; pause->rcq_th_lo = 250; - pause->sge_th_hi = 0; - pause->sge_th_lo = 0; + pause->pri_map = 1; } /* rxq setup */ - rxq_init->flags = flags; - rxq_init->cxt = &bp->context.vcxt[fp->cid].eth; rxq_init->dscr_map = fp->rx_desc_mapping; rxq_init->sge_map = fp->rx_sge_mapping; rxq_init->rcq_map = fp->rx_comp_mapping; rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; - /* Always use mini-jumbo MTU for FCoE L2 ring */ - if (IS_FCOE_FP(fp)) - rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; - else - rxq_init->mtu = bp->dev->mtu; + /* This should be a maximum number of data bytes that may be + * placed on the BD (not including paddings). + */ + rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN - + IP_HEADER_ALIGNMENT_PADDING; - rxq_init->buf_sz = fp->rx_buf_size; rxq_init->cl_qzone_id = fp->cl_qzone_id; - rxq_init->cl_id = fp->cl_id; - rxq_init->spcl_id = fp->cl_id; - rxq_init->stat_id = fp->cl_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; + rxq_init->rss_engine_id = BP_FUNC(bp); + + /* Maximum number or simultaneous TPA aggregation for this Queue. + * + * For PF Clients it should be the maximum avaliable number. + * VF driver(s) may want to define it to a smaller value. + */ + rxq_init->max_tpa_queues = + (CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 : + ETH_MAX_AGGREGATION_QUEUES_E1H_E2); + rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; if (IS_FCOE_FP(fp)) rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; else - rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; - - rxq_init->cid = HW_CID(bp, fp->cid); - - rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0; + rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; } -static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp, - struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init) +static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, + u8 cos) { - u16 flags = bnx2x_get_cl_flags(bp, fp); - - txq_init->flags = flags; - txq_init->cxt = &bp->context.vcxt[fp->cid].eth; - txq_init->dscr_map = fp->tx_desc_mapping; - txq_init->stat_id = fp->cl_id; - txq_init->cid = HW_CID(bp, fp->cid); - txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; + txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; + txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; + /* + * set the tss leading client id for TX classfication == + * leading RSS client id + */ + txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id); + if (IS_FCOE_FP(fp)) { txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE; } - - txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; } static void bnx2x_pf_init(struct bnx2x *bp) { struct bnx2x_func_init_params func_init = {0}; - struct bnx2x_rss_params rss = {0}; struct event_ring_data eq_data = { {0} }; u16 flags; - /* pf specific setups */ - if (!CHIP_IS_E1(bp)) - storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp)); - - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + @@ -2550,27 +2860,14 @@ static void bnx2x_pf_init(struct bnx2x *bp) /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); - if (CHIP_IS_E1x(bp)) - flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; - else - flags |= FUNC_FLG_TPA; - - /* function setup */ - - /** - * Although RSS is meaningless when there is a single HW queue we - * still need it enabled in order to have HW Rx hash generated. + /* This flag is relevant for E1x only. + * E2 doesn't have a TPA configuration in a function level. */ - rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP | - RSS_IPV6_CAP | RSS_IPV6_TCP_CAP); - rss.mode = bp->multi_mode; - rss.result_mask = MULTI_MASK; - func_init.rss = &rss; + flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = BP_FUNC(bp); func_init.func_id = BP_FUNC(bp); - func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats); func_init.spq_map = bp->spq_mapping; func_init.spq_prod = bp->spq_prod_idx; @@ -2579,11 +2876,11 @@ static void bnx2x_pf_init(struct bnx2x *bp) memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port)); /* - Congestion management values depend on the link rate - There is no active link so initial link rate is set to 10 Gbps. - When the link comes up The congestion management values are - re-calculated according to the actual link rate. - */ + * Congestion management values depend on the link rate + * There is no active link so initial link rate is set to 10 Gbps. + * When the link comes up The congestion management values are + * re-calculated according to the actual link rate. + */ bp->link_vars.line_speed = SPEED_10000; bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp)); @@ -2591,10 +2888,6 @@ static void bnx2x_pf_init(struct bnx2x *bp) if (bp->port.pmf) storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); - /* no rx until link is up */ - bp->rx_mode = BNX2X_RX_MODE_NONE; - bnx2x_set_storm_rx_mode(bp); - /* init Event Queue */ eq_data.base_addr.hi = U64_HI(bp->eq_mapping); eq_data.base_addr.lo = U64_LO(bp->eq_mapping); @@ -2609,11 +2902,9 @@ static void bnx2x_e1h_disable(struct bnx2x *bp) { int port = BP_PORT(bp); - netif_tx_disable(bp->dev); + bnx2x_tx_disable(bp); REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - - netif_carrier_off(bp->dev); } static void bnx2x_e1h_enable(struct bnx2x *bp) @@ -2708,20 +2999,60 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp) { int func = BP_FUNC(bp); - /* Make sure that BD data is updated before writing the producer */ - wmb(); + /* + * Make sure that BD data is updated before writing the producer: + * BD data is written to the memory, the producer is read from the + * memory, thus we need a full memory barrier to ensure the ordering. + */ + mb(); REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), bp->spq_prod_idx); mmiowb(); } -/* the slow path queue is odd since completions arrive on the fastpath ring */ +/** + * bnx2x_is_contextless_ramrod - check if the current command ends on EQ + * + * @cmd: command to check + * @cmd_type: command type + */ +static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) +{ + if ((cmd_type == NONE_CONNECTION_TYPE) || + (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || + (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || + (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || + (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || + (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || + (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) + return true; + else + return false; + +} + + +/** + * bnx2x_sp_post - place a single command on an SP ring + * + * @bp: driver handle + * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) + * @cid: SW CID the command is related to + * @data_hi: command private data address (high 32 bits) + * @data_lo: command private data address (low 32 bits) + * @cmd_type: command type (e.g. NONE, ETH) + * + * SP data is handled as if it's always an address pair, thus data fields are + * not swapped to little endian in upper functions. Instead this function swaps + * data as if it's two u32 fields. + */ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, - u32 data_hi, u32 data_lo, int common) + u32 data_hi, u32 data_lo, int cmd_type) { struct eth_spe *spe; u16 type; + bool common = bnx2x_is_contextless_ramrod(command, cmd_type); #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -2751,17 +3082,7 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | HW_CID(bp, cid)); - if (common) - /* Common ramrods: - * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC - * TRAFFIC_STOP, TRAFFIC_START - */ - type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - else - /* ETH ramrods: SETUP, HALT */ - type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; + type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & SPE_HDR_FUNCTION_ID); @@ -2771,25 +3092,23 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, spe->data.update_data_addr.hi = cpu_to_le32(data_hi); spe->data.update_data_addr.lo = cpu_to_le32(data_lo); - /* stats ramrod has it's own slot on the spq */ - if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { - /* It's ok if the actual decrement is issued towards the memory - * somewhere between the spin_lock and spin_unlock. Thus no - * more explict memory barrier is needed. - */ - if (common) - atomic_dec(&bp->eq_spq_left); - else - atomic_dec(&bp->cq_spq_left); - } + /* + * It's ok if the actual decrement is issued towards the memory + * somewhere between the spin_lock and spin_unlock. Thus no + * more explict memory barrier is needed. + */ + if (common) + atomic_dec(&bp->eq_spq_left); + else + atomic_dec(&bp->cq_spq_left); DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, - "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " - "type(0x%x) left (ETH, COMMON) (%x,%x)\n", + "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) " + "type(0x%x) left (CQ, EQ) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + - (void *)bp->spq_prod_bd - (void *)bp->spq), command, + (void *)bp->spq_prod_bd - (void *)bp->spq), command, common, HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); @@ -2892,9 +3211,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) /* save nig interrupt mask */ nig_mask = REG_RD(bp, nig_int_mask_addr); - REG_WR(bp, nig_int_mask_addr, 0); - bnx2x_link_attn(bp); + /* If nig_mask is not set, no need to call the update + * function. + */ + if (nig_mask) { + REG_WR(bp, nig_int_mask_addr, 0); + + bnx2x_link_attn(bp); + } /* handle unicore attn? */ } @@ -2999,8 +3324,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) bnx2x_fan_failure(bp); } - if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 | - AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) { + if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) { bnx2x_acquire_phy_lock(bp); bnx2x_handle_module_detect_int(&bp->link_params); bnx2x_release_phy_lock(bp); @@ -3063,13 +3387,13 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { - val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); - BNX2X_ERR("PXP hw attention 0x%x\n", val); + BNX2X_ERR("PXP hw attention-0 0x%x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) BNX2X_ERR("FATAL error from PXP\n"); - if (CHIP_IS_E2(bp)) { + + if (!CHIP_IS_E1x(bp)) { val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1); BNX2X_ERR("PXP hw attention-1 0x%x\n", val); } @@ -3117,20 +3441,31 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) bnx2x_pmf_update(bp); - /* Always call it here: bnx2x_link_report() will - * prevent the link indication duplication. - */ - bnx2x__link_status_update(bp); - if (bp->port.pmf && (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && bp->dcbx_enabled > 0) /* start dcbx state machine */ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_NEG_RECEIVED); + if (bp->link_vars.periodic_flags & + PERIODIC_FLAGS_LINK_EVENT) { + /* sync with link */ + bnx2x_acquire_phy_lock(bp); + bp->link_vars.periodic_flags &= + ~PERIODIC_FLAGS_LINK_EVENT; + bnx2x_release_phy_lock(bp); + if (IS_MF(bp)) + bnx2x_link_sync_notify(bp); + bnx2x_link_report(bp); + } + /* Always call it here: bnx2x_link_report() will + * prevent the link indication duplication. + */ + bnx2x__link_status_update(bp); } else if (attn & BNX2X_MC_ASSERT_BITS) { BNX2X_ERR("MC assert!\n"); + bnx2x_mc_assert(bp); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); @@ -3163,72 +3498,185 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) } } -#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1 -#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */ -#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) -#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) -#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS +/* + * Bits map: + * 0-7 - Engine0 load counter. + * 8-15 - Engine1 load counter. + * 16 - Engine0 RESET_IN_PROGRESS bit. + * 17 - Engine1 RESET_IN_PROGRESS bit. + * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function + * on the engine + * 19 - Engine1 ONE_IS_LOADED. + * 20 - Chip reset flow bit. When set none-leader must wait for both engines + * leader to complete (check for both RESET_IN_PROGRESS bits and not for + * just the one belonging to its engine). + * + */ +#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 + +#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff +#define BNX2X_PATH0_LOAD_CNT_SHIFT 0 +#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00 +#define BNX2X_PATH1_LOAD_CNT_SHIFT 8 +#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000 +#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000 +#define BNX2X_GLOBAL_RESET_BIT 0x00040000 /* + * Set the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +void bnx2x_set_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT); + barrier(); + mmiowb(); +} + +/* + * Clear the GLOBAL_RESET bit. + * + * Should be run under rtnl lock + */ +static inline void bnx2x_clear_reset_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT)); + barrier(); + mmiowb(); +} + +/* + * Checks the GLOBAL_RESET bit. + * * should be run under rtnl lock */ +static inline bool bnx2x_reset_is_global(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); + return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false; +} + +/* + * Clear RESET_IN_PROGRESS bit for the current engine. + * + * Should be run under rtnl lock + */ static inline void bnx2x_set_reset_done(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - val &= ~(1 << RESET_DONE_FLAG_SHIFT); - REG_WR(bp, BNX2X_MISC_GEN_REG, val); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Clear the bit */ + val &= ~bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); barrier(); mmiowb(); } /* + * Set RESET_IN_PROGRESS for the current engine. + * * should be run under rtnl lock */ -static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp) +void bnx2x_set_reset_in_progress(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - val |= (1 << 16); - REG_WR(bp, BNX2X_MISC_GEN_REG, val); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = BP_PATH(bp) ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* Set the bit */ + val |= bit; + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); barrier(); mmiowb(); } /* + * Checks the RESET_IN_PROGRESS bit for the given engine. * should be run under rtnl lock */ -bool bnx2x_reset_is_done(struct bnx2x *bp) +bool bnx2x_reset_is_done(struct bnx2x *bp, int engine) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val); - return (val & RESET_DONE_FLAG_MASK) ? false : true; + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 bit = engine ? + BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT; + + /* return false if bit is set */ + return (val & bit) ? false : true; } /* + * Increment the load counter for the current engine. + * * should be run under rtnl lock */ -inline void bnx2x_inc_load_cnt(struct bnx2x *bp) +void bnx2x_inc_load_cnt(struct bnx2x *bp) { - u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK; - REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* increment... */ + val1++; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); barrier(); mmiowb(); } -/* - * should be run under rtnl lock +/** + * bnx2x_dec_load_cnt - decrement the load counter + * + * @bp: driver handle + * + * Should be run under rtnl lock. + * Decrements the load counter for the current engine. Returns + * the new counter value. */ u32 bnx2x_dec_load_cnt(struct bnx2x *bp) { - u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG); + u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK; + u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT; DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val); - val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK; - REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1); + /* get the current counter value */ + val1 = (val & mask) >> shift; + + /* decrement... */ + val1--; + + /* clear the old value */ + val &= ~mask; + + /* set the new one */ + val |= ((val1 << shift) & mask); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val); barrier(); mmiowb(); @@ -3236,17 +3684,39 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp) } /* + * Read the load counter for the current engine. + * * should be run under rtnl lock */ -static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp) +static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine) { - return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK; + u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT : + BNX2X_PATH0_LOAD_CNT_SHIFT); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + + DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val); + + val = (val & mask) >> shift; + + DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val); + + return val; } +/* + * Reset the load counter for the current engine. + * + * should be run under rtnl lock + */ static inline void bnx2x_clear_load_cnt(struct bnx2x *bp) { - u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG); - REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK)); + u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); + u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : + BNX2X_PATH0_LOAD_CNT_MASK); + + REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); } static inline void _print_next_block(int idx, const char *blk) @@ -3256,7 +3726,8 @@ static inline void _print_next_block(int idx, const char *blk) pr_cont("%s", blk); } -static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) +static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, + bool print) { int i = 0; u32 cur_bit = 0; @@ -3265,19 +3736,33 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: - _print_next_block(par_num++, "BRB"); + if (print) + _print_next_block(par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: - _print_next_block(par_num++, "PARSER"); + if (print) + _print_next_block(par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: - _print_next_block(par_num++, "TSDM"); + if (print) + _print_next_block(par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: - _print_next_block(par_num++, "SEARCHER"); + if (print) + _print_next_block(par_num++, + "SEARCHER"); + break; + case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: - _print_next_block(par_num++, "TSEMI"); + if (print) + _print_next_block(par_num++, "TSEMI"); + break; + case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XPB"); break; } @@ -3289,7 +3774,8 @@ static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num) return par_num; } -static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) +static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, + bool *global, bool print) { int i = 0; u32 cur_bit = 0; @@ -3297,38 +3783,72 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) cur_bit = ((u32)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { - case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: - _print_next_block(par_num++, "PBCLIENT"); + case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: - _print_next_block(par_num++, "QM"); + if (print) + _print_next_block(par_num++, "QM"); + break; + case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: - _print_next_block(par_num++, "XSDM"); + if (print) + _print_next_block(par_num++, "XSDM"); + break; + case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: - _print_next_block(par_num++, "XSEMI"); + if (print) + _print_next_block(par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: - _print_next_block(par_num++, "DOORBELLQ"); + if (print) + _print_next_block(par_num++, + "DOORBELLQ"); + break; + case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: - _print_next_block(par_num++, "VAUX PCI CORE"); + if (print) + _print_next_block(par_num++, + "VAUX PCI CORE"); + *global = true; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: - _print_next_block(par_num++, "DEBUG"); + if (print) + _print_next_block(par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: - _print_next_block(par_num++, "USDM"); + if (print) + _print_next_block(par_num++, "USDM"); + break; + case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: - _print_next_block(par_num++, "USEMI"); + if (print) + _print_next_block(par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: - _print_next_block(par_num++, "UPB"); + if (print) + _print_next_block(par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: - _print_next_block(par_num++, "CSDM"); + if (print) + _print_next_block(par_num++, "CSDM"); + break; + case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "CCM"); break; } @@ -3340,7 +3860,8 @@ static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num) return par_num; } -static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) +static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, + bool print) { int i = 0; u32 cur_bit = 0; @@ -3349,26 +3870,37 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: - _print_next_block(par_num++, "CSEMI"); + if (print) + _print_next_block(par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: - _print_next_block(par_num++, "PXP"); + if (print) + _print_next_block(par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: - _print_next_block(par_num++, + if (print) + _print_next_block(par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: - _print_next_block(par_num++, "CFC"); + if (print) + _print_next_block(par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: - _print_next_block(par_num++, "CDU"); + if (print) + _print_next_block(par_num++, "CDU"); + break; + case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: - _print_next_block(par_num++, "IGU"); + if (print) + _print_next_block(par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: - _print_next_block(par_num++, "MISC"); + if (print) + _print_next_block(par_num++, "MISC"); break; } @@ -3380,7 +3912,8 @@ static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num) return par_num; } -static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) +static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, + bool *global, bool print) { int i = 0; u32 cur_bit = 0; @@ -3389,16 +3922,54 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: - _print_next_block(par_num++, "MCP ROM"); + if (print) + _print_next_block(par_num++, "MCP ROM"); + *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: - _print_next_block(par_num++, "MCP UMP RX"); + if (print) + _print_next_block(par_num++, + "MCP UMP RX"); + *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: - _print_next_block(par_num++, "MCP UMP TX"); + if (print) + _print_next_block(par_num++, + "MCP UMP TX"); + *global = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: - _print_next_block(par_num++, "MCP SCPAD"); + if (print) + _print_next_block(par_num++, + "MCP SCPAD"); + *global = true; + break; + } + + /* Clear the bit */ + sig &= ~cur_bit; + } + } + + return par_num; +} + +static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, + bool print) +{ + int i = 0; + u32 cur_bit = 0; + for (i = 0; sig; i++) { + cur_bit = ((u32)0x1 << i); + if (sig & cur_bit) { + switch (cur_bit) { + case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "PGLUE_B"); + break; + case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: + if (print) + _print_next_block(par_num++, "ATC"); break; } @@ -3410,38 +3981,55 @@ static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num) return par_num; } -static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1, - u32 sig2, u32 sig3) +static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, + u32 *sig) { - if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) || - (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) { + if ((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[3] & HW_PRTY_ASSERT_SET_3) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) { int par_num = 0; DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: " - "[0]:0x%08x [1]:0x%08x " - "[2]:0x%08x [3]:0x%08x\n", - sig0 & HW_PRTY_ASSERT_SET_0, - sig1 & HW_PRTY_ASSERT_SET_1, - sig2 & HW_PRTY_ASSERT_SET_2, - sig3 & HW_PRTY_ASSERT_SET_3); - printk(KERN_ERR"%s: Parity errors detected in blocks: ", - bp->dev->name); - par_num = bnx2x_print_blocks_with_parity0( - sig0 & HW_PRTY_ASSERT_SET_0, par_num); - par_num = bnx2x_print_blocks_with_parity1( - sig1 & HW_PRTY_ASSERT_SET_1, par_num); - par_num = bnx2x_print_blocks_with_parity2( - sig2 & HW_PRTY_ASSERT_SET_2, par_num); - par_num = bnx2x_print_blocks_with_parity3( - sig3 & HW_PRTY_ASSERT_SET_3, par_num); - printk("\n"); + "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x " + "[4]:0x%08x\n", + sig[0] & HW_PRTY_ASSERT_SET_0, + sig[1] & HW_PRTY_ASSERT_SET_1, + sig[2] & HW_PRTY_ASSERT_SET_2, + sig[3] & HW_PRTY_ASSERT_SET_3, + sig[4] & HW_PRTY_ASSERT_SET_4); + if (print) + netdev_err(bp->dev, + "Parity errors detected in blocks: "); + par_num = bnx2x_check_blocks_with_parity0( + sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); + par_num = bnx2x_check_blocks_with_parity1( + sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity2( + sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); + par_num = bnx2x_check_blocks_with_parity3( + sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); + par_num = bnx2x_check_blocks_with_parity4( + sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); + + if (print) + pr_cont("\n"); + return true; } else return false; } -bool bnx2x_chk_parity_attn(struct bnx2x *bp) +/** + * bnx2x_chk_parity_attn - checks for parity attentions. + * + * @bp: driver handle + * @global: true if there was a global attention + * @print: show parity attention in syslog + */ +bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print) { - struct attn_route attn; + struct attn_route attn = { {0} }; int port = BP_PORT(bp); attn.sig[0] = REG_RD(bp, @@ -3457,8 +4045,12 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp) MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); - return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2], - attn.sig[3]); + if (!CHIP_IS_E1x(bp)) + attn.sig[4] = REG_RD(bp, + MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + + port*4); + + return bnx2x_parity_attn(bp, global, print, attn.sig); } @@ -3537,21 +4129,25 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) u32 reg_addr; u32 val; u32 aeu_mask; + bool global = false; /* need to take HW lock because MCP or other port might also try to handle this event */ bnx2x_acquire_alr(bp); - if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) { + if (bnx2x_chk_parity_attn(bp, &global, true)) { +#ifndef BNX2X_STOP_ON_ERROR bp->recovery_state = BNX2X_RECOVERY_INIT; - bnx2x_set_reset_in_progress(bp); - schedule_delayed_work(&bp->reset_task, 0); + schedule_delayed_work(&bp->sp_rtnl_task, 0); /* Disable HW interrupts */ bnx2x_int_disable(bp); - bnx2x_release_alr(bp); /* In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ +#else + bnx2x_panic(); +#endif + bnx2x_release_alr(bp); return; } @@ -3559,7 +4155,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) attn.sig[4] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); else @@ -3655,6 +4251,15 @@ static void bnx2x_attn_int(struct bnx2x *bp) bnx2x_attn_int_deasserted(bp, deasserted); } +void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, + u16 index, u8 op, u8 update) +{ + u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; + + bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, + igu_addr); +} + static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) { /* No memory barriers */ @@ -3666,6 +4271,8 @@ static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, union event_ring_elem *elem) { + u8 err = elem->message.error; + if (!bp->cnic_eth_dev.starting_cid || (cid < bp->cnic_eth_dev.starting_cid && cid != bp->cnic_eth_dev.iscsi_l2_cid)) @@ -3673,16 +4280,123 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid); - if (unlikely(elem->message.data.cfc_del_event.error)) { + if (unlikely(err)) { + BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n", cid); bnx2x_panic_dump(bp); } - bnx2x_cnic_cfc_comp(bp, cid); + bnx2x_cnic_cfc_comp(bp, cid, err); return 0; } #endif +static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) +{ + struct bnx2x_mcast_ramrod_params rparam; + int rc; + + memset(&rparam, 0, sizeof(rparam)); + + rparam.mcast_obj = &bp->mcast_obj; + + netif_addr_lock_bh(bp->dev); + + /* Clear pending state for the last command */ + bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw); + + /* If there are pending mcast commands - send them */ + if (bp->mcast_obj.check_pending(&bp->mcast_obj)) { + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); + if (rc < 0) + BNX2X_ERR("Failed to send pending mcast commands: %d\n", + rc); + } + + netif_addr_unlock_bh(bp->dev); +} + +static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, + union event_ring_elem *elem) +{ + unsigned long ramrod_flags = 0; + int rc = 0; + u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* Always push next commands out, don't wait here */ + __set_bit(RAMROD_CONT, &ramrod_flags); + + switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { + case BNX2X_FILTER_MAC_PENDING: +#ifdef BCM_CNIC + if (cid == BNX2X_ISCSI_ETH_CID) + vlan_mac_obj = &bp->iscsi_l2_mac_obj; + else +#endif + vlan_mac_obj = &bp->fp[cid].mac_obj; + + break; + vlan_mac_obj = &bp->fp[cid].mac_obj; + + case BNX2X_FILTER_MCAST_PENDING: + /* This is only relevant for 57710 where multicast MACs are + * configured as unicast MACs using the same ramrod. + */ + bnx2x_handle_mcast_eqe(bp); + return; + default: + BNX2X_ERR("Unsupported classification command: %d\n", + elem->message.data.eth_event.echo); + return; + } + + rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags); + + if (rc < 0) + BNX2X_ERR("Failed to schedule new commands: %d\n", rc); + else if (rc > 0) + DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n"); + +} + +#ifdef BCM_CNIC +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); +#endif + +static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) +{ + netif_addr_lock_bh(bp->dev); + + clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + /* Send rx_mode command again if was requested */ + if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) + bnx2x_set_storm_rx_mode(bp); +#ifdef BCM_CNIC + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, true); + else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + &bp->sp_state)) + bnx2x_set_iscsi_eth_rx_mode(bp, false); +#endif + + netif_addr_unlock_bh(bp->dev); +} + +static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( + struct bnx2x *bp, u32 cid) +{ + DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid); +#ifdef BCM_CNIC + if (cid == BNX2X_FCOE_ETH_CID) + return &bnx2x_fcoe(bp, q_obj); + else +#endif + return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); +} + static void bnx2x_eq_int(struct bnx2x *bp) { u16 hw_cons, sw_cons, sw_prod; @@ -3690,6 +4404,9 @@ static void bnx2x_eq_int(struct bnx2x *bp) u32 cid; u8 opcode; int spqe_cnt = 0; + struct bnx2x_queue_sp_obj *q_obj; + struct bnx2x_func_sp_obj *f_obj = &bp->func_obj; + struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw; hw_cons = le16_to_cpu(*bp->eq_cons_sb); @@ -3708,7 +4425,7 @@ static void bnx2x_eq_int(struct bnx2x *bp) sw_cons = bp->eq_cons; sw_prod = bp->eq_prod; - DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n", + DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n", hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); for (; sw_cons != hw_cons; @@ -3724,9 +4441,10 @@ static void bnx2x_eq_int(struct bnx2x *bp) /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: - DP(NETIF_MSG_TIMER, "got statistics comp event\n"); + DP(NETIF_MSG_TIMER, "got statistics comp event %d\n", + bp->stats_comp++); /* nothing to do with stats comp */ - continue; + goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ @@ -3734,60 +4452,100 @@ static void bnx2x_eq_int(struct bnx2x *bp) * we may want to verify here that the bp state is * HALTING */ - DP(NETIF_MSG_IFDOWN, + DP(BNX2X_MSG_SP, "got delete ramrod for MULTI[%d]\n", cid); #ifdef BCM_CNIC if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) goto next_spqe; - if (cid == BNX2X_FCOE_ETH_CID) - bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED; - else #endif - bnx2x_fp(bp, cid, state) = - BNX2X_FP_STATE_CLOSED; + q_obj = bnx2x_cid_to_q_obj(bp, cid); + + if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) + break; + + goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: - DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n"); + DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_STOP)) + break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED); goto next_spqe; + case EVENT_RING_OPCODE_START_TRAFFIC: - DP(NETIF_MSG_IFUP, "got START TRAFFIC\n"); + DP(BNX2X_MSG_SP, "got START TRAFFIC\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_TX_START)) + break; bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); goto next_spqe; + case EVENT_RING_OPCODE_FUNCTION_START: + DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START)) + break; + + goto next_spqe; + + case EVENT_RING_OPCODE_FUNCTION_STOP: + DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n"); + if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP)) + break; + + goto next_spqe; } switch (opcode | bp->state) { - case (EVENT_RING_OPCODE_FUNCTION_START | + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAIT4_PORT): - DP(NETIF_MSG_IFUP, "got setup ramrod\n"); - bp->state = BNX2X_STATE_FUNC_STARTED; + cid = elem->message.data.eth_event.echo & + BNX2X_SWCID_MASK; + DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", + cid); + rss_raw->clear_pending(rss_raw); break; - case (EVENT_RING_OPCODE_FUNCTION_STOP | + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_SET_MAC | + BNX2X_STATE_CLOSING_WAIT4_HALT): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(NETIF_MSG_IFDOWN, "got halt ramrod\n"); - bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; + DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n"); + bnx2x_handle_classification_eqe(bp, elem); break; - case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): - case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): - DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); - if (elem->message.data.set_mac_event.echo) - bp->set_mac_pending = 0; + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_MULTICAST_RULES | + BNX2X_STATE_CLOSING_WAIT4_HALT): + DP(BNX2X_MSG_SP, "got mcast ramrod\n"); + bnx2x_handle_mcast_eqe(bp); break; - case (EVENT_RING_OPCODE_SET_MAC | + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_OPEN): + case (EVENT_RING_OPCODE_FILTERS_RULES | + BNX2X_STATE_DIAG): + case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAIT4_HALT): - DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); - if (elem->message.data.set_mac_event.echo) - bp->set_mac_pending = 0; + DP(BNX2X_MSG_SP, "got rx_mode ramrod\n"); + bnx2x_handle_rx_mode_eqe(bp); break; default: /* unknown event log error and continue */ - BNX2X_ERR("Unknown EQ event %d\n", - elem->message.opcode); + BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n", + elem->message.opcode, bp->state); } next_spqe: spqe_cnt++; @@ -3810,12 +4568,6 @@ static void bnx2x_sp_task(struct work_struct *work) struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); u16 status; - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return; - } - status = bnx2x_update_dsb_idx(bp); /* if (status == 0) */ /* BNX2X_ERR("spurious slowpath interrupt!\n"); */ @@ -3834,8 +4586,15 @@ static void bnx2x_sp_task(struct work_struct *work) struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); if ((!NO_FCOE(bp)) && - (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) + (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { + /* + * Prevent local bottom-halves from running as + * we are going to change the local NAPI list. + */ + local_bh_disable(); napi_schedule(&bnx2x_fcoe(bp, napi)); + local_bh_enable(); + } #endif /* Handle EQ completions */ bnx2x_eq_int(bp); @@ -3859,12 +4618,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) struct net_device *dev = dev_instance; struct bnx2x *bp = netdev_priv(dev); - /* Return here if interrupt is disabled */ - if (unlikely(atomic_read(&bp->intr_sem) != 0)) { - DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); - return IRQ_HANDLED; - } - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); @@ -3891,20 +4644,27 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) /* end of slow path */ + +void bnx2x_drv_pulse(struct bnx2x *bp) +{ + SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb, + bp->fw_drv_pulse_wr_seq); +} + + static void bnx2x_timer(unsigned long data) { + u8 cos; struct bnx2x *bp = (struct bnx2x *) data; if (!netif_running(bp->dev)) return; - if (atomic_read(&bp->intr_sem) != 0) - goto timer_restart; - if (poll) { struct bnx2x_fastpath *fp = &bp->fp[0]; - bnx2x_tx_int(fp); + for_each_cos_in_tx_queue(fp, cos) + bnx2x_tx_int(bp, &fp->txdata[cos]); bnx2x_rx_int(fp, 1000); } @@ -3917,7 +4677,7 @@ static void bnx2x_timer(unsigned long data) bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; /* TBD - add SYSTEM_TIME */ drv_pulse = bp->fw_drv_pulse_wr_seq; - SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse); + bnx2x_drv_pulse(bp); mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); @@ -3935,7 +4695,6 @@ static void bnx2x_timer(unsigned long data) if (bp->state == BNX2X_STATE_OPEN) bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); -timer_restart: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -3981,18 +4740,16 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) struct hc_status_block_data_e1x sb_data_e1x; /* disable the function first */ - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); - sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED; - sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED; + sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = false; sb_data_p = (u32 *)&sb_data_e2; data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); - sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED; - sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED; + sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = false; sb_data_p = (u32 *)&sb_data_e1x; data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32); @@ -4026,8 +4783,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); - sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED; - sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED; + sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = false; bnx2x_wr_sp_sb_data(bp, &sp_sb_data); @@ -4070,8 +4826,9 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_zero_fp_sb(bp, fw_sb_id); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); + sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = BP_FUNC(bp); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; @@ -4085,6 +4842,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); + sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = false; @@ -4108,25 +4866,20 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size); } -static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id, - u8 sb_index, u8 disable, u16 usec) -{ - int port = BP_PORT(bp); - u8 ticks = usec / BNX2X_BTR; - - storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks); - - disable = disable ? 1 : (usec ? 0 : 1); - storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); -} - -static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id, +static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id, u16 tx_usec, u16 rx_usec) { - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX, + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS, false, rx_usec); - bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX, - false, tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS0, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS1, false, + tx_usec); + bnx2x_update_coalesce_sb_index(bp, fw_sb_id, + HC_INDEX_ETH_TX_CQ_CONS_COS2, false, + tx_usec); } static void bnx2x_init_def_sb(struct bnx2x *bp) @@ -4167,7 +4920,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) bp->attn_group[index].sig[sindex] = REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) /* * enable5 is separate from the rest of the registers, * and therefore the address skip is 4 @@ -4185,7 +4938,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) REG_WR(bp, reg_offset, U64_LO(section)); REG_WR(bp, reg_offset + 4, U64_HI(section)); - } else if (CHIP_IS_E2(bp)) { + } else if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } @@ -4195,6 +4948,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_zero_sp_sb(bp); + sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; @@ -4205,9 +4959,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) bnx2x_wr_sp_sb_data(bp, &sp_sb_data); - bp->stats_pending = 0; - bp->set_mac_pending = 0; - bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } @@ -4253,146 +5004,129 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } -void bnx2x_push_indir_table(struct bnx2x *bp) + +/* called with netif_addr_lock_bh() */ +void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, + unsigned long rx_mode_flags, + unsigned long rx_accept_flags, + unsigned long tx_accept_flags, + unsigned long ramrod_flags) { - int func = BP_FUNC(bp); - int i; + struct bnx2x_rx_mode_ramrod_params ramrod_param; + int rc; - if (bp->multi_mode == ETH_RSS_MODE_DISABLED) - return; + memset(&ramrod_param, 0, sizeof(ramrod_param)); - for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) - REG_WR8(bp, BAR_TSTRORM_INTMEM + - TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, - bp->fp->cl_id + bp->rx_indir_table[i]); -} + /* Prepare ramrod parameters */ + ramrod_param.cid = 0; + ramrod_param.cl_id = cl_id; + ramrod_param.rx_mode_obj = &bp->rx_mode_obj; + ramrod_param.func_id = BP_FUNC(bp); -static void bnx2x_init_ind_table(struct bnx2x *bp) -{ - int i; + ramrod_param.pstate = &bp->sp_state; + ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING; + + ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata); + ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata); + + set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state); + + ramrod_param.ramrod_flags = ramrod_flags; + ramrod_param.rx_mode_flags = rx_mode_flags; - for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) - bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp); + ramrod_param.rx_accept_flags = rx_accept_flags; + ramrod_param.tx_accept_flags = tx_accept_flags; - bnx2x_push_indir_table(bp); + rc = bnx2x_config_rx_mode(bp, &ramrod_param); + if (rc < 0) { + BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); + return; + } } +/* called with netif_addr_lock_bh() */ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) { - int mode = bp->rx_mode; - int port = BP_PORT(bp); - u16 cl_id; - u32 def_q_filters = 0; + unsigned long rx_mode_flags = 0, ramrod_flags = 0; + unsigned long rx_accept_flags = 0, tx_accept_flags = 0; - /* All but management unicast packets should pass to the host as well */ - u32 llh_mask = - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN | - NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN; - - switch (mode) { - case BNX2X_RX_MODE_NONE: /* no Rx */ - def_q_filters = BNX2X_ACCEPT_NONE; #ifdef BCM_CNIC - if (!NO_FCOE(bp)) { - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); - } -#endif - break; + if (!NO_FCOE(bp)) - case BNX2X_RX_MODE_NORMAL: - def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | - BNX2X_ACCEPT_MULTICAST; -#ifdef BCM_CNIC - if (!NO_FCOE(bp)) { - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, - BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_MULTICAST); - } + /* Configure rx_mode of FCoE Queue */ + __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); #endif - break; - case BNX2X_RX_MODE_ALLMULTI: - def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST | - BNX2X_ACCEPT_ALL_MULTICAST; -#ifdef BCM_CNIC + switch (bp->rx_mode) { + case BNX2X_RX_MODE_NONE: /* - * Prevent duplication of multicast packets by configuring FCoE - * L2 Client to receive only matched unicast frames. + * 'drop all' supersedes any accept flags that may have been + * passed to the function. */ - if (!NO_FCOE(bp)) { - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, - BNX2X_ACCEPT_UNICAST); - } -#endif break; + case BNX2X_RX_MODE_NORMAL: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; + case BNX2X_RX_MODE_ALLMULTI: + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + break; case BNX2X_RX_MODE_PROMISC: - def_q_filters |= BNX2X_PROMISCUOUS_MODE; -#ifdef BCM_CNIC - /* - * Prevent packets duplication by configuring DROP_ALL for FCoE - * L2 Client. + /* According to deffinition of SI mode, iface in promisc mode + * should receive matched and unmatched (in resolution of port) + * unicast packets. */ - if (!NO_FCOE(bp)) { - cl_id = bnx2x_fcoe(bp, cl_id); - bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); - } -#endif - /* pass management unicast packets as well */ - llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; - break; + __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); + + /* internal switching mode */ + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); + + if (IS_MF_SI(bp)) + __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); + else + __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); - default: - BNX2X_ERR("BAD rx mode (%d)\n", mode); break; + default: + BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); + return; } - cl_id = BP_L_ID(bp); - bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters); - - REG_WR(bp, - (port ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask); + if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); + } - DP(NETIF_MSG_IFUP, "rx mode %d\n" - "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" - "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n" - "unmatched_ucast 0x%x\n", mode, - bp->mac_filters.ucast_drop_all, - bp->mac_filters.mcast_drop_all, - bp->mac_filters.bcast_drop_all, - bp->mac_filters.ucast_accept_all, - bp->mac_filters.mcast_accept_all, - bp->mac_filters.bcast_accept_all, - bp->mac_filters.unmatched_unicast - ); + __set_bit(RAMROD_RX, &ramrod_flags); + __set_bit(RAMROD_TX, &ramrod_flags); - storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); + bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, + tx_accept_flags, ramrod_flags); } static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; - if (!CHIP_IS_E1(bp)) { - - /* xstorm needs to know whether to add ovlan to packets or not, - * in switch-independent we'll write 0 to here... */ - REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, - bp->mf_mode); - REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET, - bp->mf_mode); - REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET, - bp->mf_mode); - REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET, - bp->mf_mode); - } - if (IS_MF_SI(bp)) /* * In switch independent mode, the TSTORM needs to accept @@ -4401,25 +5135,22 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) */ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2); + else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */ + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0); /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + i * 4, 0); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET, CHIP_INT_MODE_IS_BC(bp) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } -static void bnx2x_init_internal_port(struct bnx2x *bp) -{ - /* port */ - bnx2x_dcb_init_intmem_pfc(bp); -} - static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) { switch (load_code) { @@ -4429,7 +5160,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: - bnx2x_init_internal_port(bp); + /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: @@ -4443,31 +5174,70 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) } } -static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx) +static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) { - struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; +} - fp->state = BNX2X_FP_STATE_CLOSED; +static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) +{ + return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; +} + +static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) +{ + if (CHIP_IS_E1x(fp->bp)) + return BP_L_ID(fp->bp) + fp->index; + else /* We want Client ID to be the same as IGU SB ID for 57712 */ + return bnx2x_fp_igu_sb_id(fp); +} + +static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) +{ + struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 cos; + unsigned long q_type = 0; + u32 cids[BNX2X_MULTI_TX_COS] = { 0 }; fp->cid = fp_idx; - fp->cl_id = BP_L_ID(bp) + fp_idx; - fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE; - fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE; + fp->cl_id = bnx2x_fp_cl_id(fp); + fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp); + fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp); /* qZone id equals to FW (per path) client id */ - fp->cl_qzone_id = fp->cl_id + - BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 : - ETH_MAX_RX_CLIENTS_E1H); + fp->cl_qzone_id = bnx2x_fp_qzone_id(fp); + /* init shortcut */ - fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ? - USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) : - USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id); + fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp); /* Setup SB indicies */ fp->rx_cons_sb = BNX2X_RX_SB_INDEX; - fp->tx_cons_sb = BNX2X_TX_SB_INDEX; + + /* Configure Queue State object */ + __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); + __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); + + BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS); + + /* init tx data */ + for_each_cos_in_tx_queue(fp, cos) { + bnx2x_init_txdata(bp, &fp->txdata[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos), + FP_COS_TO_TXQ(fp, cos), + BNX2X_TX_SB_INDEX_BASE + cos); + cids[cos] = fp->txdata[cos].cid; + } + + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_sp_mapping(bp, q_rdata), q_type); + + /** + * Configure classification DBs: Always enable Tx switching + */ + bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX); DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) " "cl_id %d fw_sb %d igu_sb %d\n", - fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id, + fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false, fp->fw_sb_id, fp->igu_sb_id); @@ -4480,17 +5250,21 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) int i; for_each_eth_queue(bp, i) - bnx2x_init_fp_sb(bp, i); + bnx2x_init_eth_fp(bp, i); #ifdef BCM_CNIC if (!NO_FCOE(bp)) bnx2x_init_fcoe_fp(bp); bnx2x_init_sb(bp, bp->cnic_sb_mapping, BNX2X_VF_ID_INVALID, false, - CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp)); + bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); #endif + /* Initialize MOD_ABS interrupts */ + bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, + bp->common.shmem_base, bp->common.shmem2_base, + BP_PORT(bp)); /* ensure status block indices were read */ rmb(); @@ -4502,12 +5276,8 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) bnx2x_init_eq_ring(bp); bnx2x_init_internal(bp, load_code); bnx2x_pf_init(bp); - bnx2x_init_ind_table(bp); bnx2x_stats_init(bp); - /* At this point, we are ready for interrupts */ - atomic_set(&bp->intr_sem, 0); - /* flush all before enabling interrupts */ mb(); mmiowb(); @@ -4537,8 +5307,7 @@ static int bnx2x_gunzip_init(struct bnx2x *bp) if (bp->strm == NULL) goto gunzip_nomem2; - bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), - GFP_KERNEL); + bp->strm->workspace = vmalloc(zlib_inflate_workspacesize()); if (bp->strm->workspace == NULL) goto gunzip_nomem3; @@ -4562,7 +5331,7 @@ gunzip_nomem1: static void bnx2x_gunzip_end(struct bnx2x *bp) { if (bp->strm) { - kfree(bp->strm->workspace); + vfree(bp->strm->workspace); kfree(bp->strm); bp->strm = NULL; } @@ -4711,8 +5480,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) msleep(50); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); msleep(50); - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); DP(NETIF_MSG_HW, "part2\n"); @@ -4776,8 +5545,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) msleep(50); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); msleep(50); - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); #ifndef BCM_CNIC /* set NIC mode */ REG_WR(bp, PRS_REG_NIC_MODE, 1); @@ -4797,7 +5566,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp) static void bnx2x_enable_blocks_attention(struct bnx2x *bp) { REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); else REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0); @@ -4831,7 +5600,7 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) if (CHIP_REV_IS_FPGA(bp)) REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); - else if (CHIP_IS_E2(bp)) + else if (!CHIP_IS_E1x(bp)) REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT @@ -4844,7 +5613,11 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */ -/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */ + + if (!CHIP_IS_E1x(bp)) + /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ + REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); + REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ @@ -4853,10 +5626,24 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp) static void bnx2x_reset_common(struct bnx2x *bp) { + u32 val = 0x1400; + /* reset_common */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffff7f); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403); + + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val); +} + +static void bnx2x_setup_dmae(struct bnx2x *bp) +{ + bp->dmae_ready = 0; + spin_lock_init(&bp->dmae_lock); } static void bnx2x_init_pxp(struct bnx2x *bp) @@ -4865,7 +5652,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp) int r_order, w_order; pci_read_config_word(bp->pdev, - bp->pcie_cap + PCI_EXP_DEVCTL, &devctl); + pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl); DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl); w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); if (bp->mrrs == -1) @@ -4973,7 +5760,7 @@ static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num) DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num); } -static void bnx2x_pf_disable(struct bnx2x *bp) +void bnx2x_pf_disable(struct bnx2x *bp) { u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; @@ -4983,22 +5770,48 @@ static void bnx2x_pf_disable(struct bnx2x *bp) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); } -static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) +static inline void bnx2x__common_init_phy(struct bnx2x *bp) +{ + u32 shmem_base[2], shmem2_base[2]; + shmem_base[0] = bp->common.shmem_base; + shmem2_base[0] = bp->common.shmem2_base; + if (!CHIP_IS_E1x(bp)) { + shmem_base[1] = + SHMEM2_RD(bp, other_shmem_base_addr); + shmem2_base[1] = + SHMEM2_RD(bp, other_shmem2_base_addr); + } + bnx2x_acquire_phy_lock(bp); + bnx2x_common_init_phy(bp, shmem_base, shmem2_base, + bp->common.chip_id); + bnx2x_release_phy_lock(bp); +} + +/** + * bnx2x_init_hw_common - initialize the HW at the COMMON phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common(struct bnx2x *bp) { - u32 val, i; + u32 val; DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); bnx2x_reset_common(bp); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); - bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE); - if (!CHIP_IS_E1(bp)) - REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp)); + val = 0xfffc; + if (CHIP_IS_E3(bp)) { + val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); + + bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); - if (CHIP_IS_E2(bp)) { - u8 fid; + if (!CHIP_IS_E1x(bp)) { + u8 abs_func_id; /** * 4-port mode or 2-port mode we need to turn of master-enable @@ -5007,29 +5820,30 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) * for all functions on the given path, this means 0,2,4,6 for * path 0 and 1,3,5,7 for path 1 */ - for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) { - if (fid == BP_ABS_FUNC(bp)) { + for (abs_func_id = BP_PATH(bp); + abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) { + if (abs_func_id == BP_ABS_FUNC(bp)) { REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } - bnx2x_pretend_func(bp, fid); + bnx2x_pretend_func(bp, abs_func_id); /* clear pf enable */ bnx2x_pf_disable(bp); bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); } } - bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(bp)) { /* enable HW interrupt from PXP on USDM overflow bit 16 on INT_MASK_0 */ REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); } - bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); bnx2x_init_pxp(bp); #ifdef __BIG_ENDIAN @@ -5072,7 +5886,69 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) * This needs to be done by the first PF that is loaded in a path * (i.e. common phase) */ - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { +/* In E2 there is a bug in the timers block that can cause function 6 / 7 + * (i.e. vnic3) to start even if it is marked as "scan-off". + * This occurs when a different function (func2,3) is being marked + * as "scan-off". Real-life scenario for example: if a driver is being + * load-unloaded while func6,7 are down. This will cause the timer to access + * the ilt, translate to a logical address and send a request to read/write. + * Since the ilt for the function that is down is not valid, this will cause + * a translation error which is unrecoverable. + * The Workaround is intended to make sure that when this happens nothing fatal + * will occur. The workaround: + * 1. First PF driver which loads on a path will: + * a. After taking the chip out of reset, by using pretend, + * it will write "0" to the following registers of + * the other vnics. + * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); + * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); + * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); + * And for itself it will write '1' to + * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable + * dmae-operations (writing to pram for example.) + * note: can be done for only function 6,7 but cleaner this + * way. + * b. Write zero+valid to the entire ILT. + * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of + * VNIC3 (of that port). The range allocated will be the + * entire ILT. This is needed to prevent ILT range error. + * 2. Any PF driver load flow: + * a. ILT update with the physical addresses of the allocated + * logical pages. + * b. Wait 20msec. - note that this timeout is needed to make + * sure there are no requests in one of the PXP internal + * queues with "old" ILT addresses. + * c. PF enable in the PGLC. + * d. Clear the was_error of the PF in the PGLC. (could have + * occured while driver was down) + * e. PF enable in the CFC (WEAK + STRONG) + * f. Timers scan enable + * 3. PF driver unload flow: + * a. Clear the Timers scan_en. + * b. Polling for scan_on=0 for that PF. + * c. Clear the PF enable bit in the PXP. + * d. Clear the PF enable in the CFC (WEAK + STRONG) + * e. Write zero+valid to all ILT entries (The valid bit must + * stay set) + * f. If this is VNIC 3 of a port then also init + * first_timers_ilt_entry to zero and last_timers_ilt_entry + * to the last enrty in the ILT. + * + * Notes: + * Currently the PF error in the PGLC is non recoverable. + * In the future the there will be a recovery routine for this error. + * Currently attention is masked. + * Having an MCP lock on the load/unload process does not guarantee that + * there is no Timer disable during Func6/7 enable. This is because the + * Timers scan is currently being cleared by the MCP on FLR. + * Step 2.d can be done only for PF6/7 and the driver can also check if + * there is error before clearing it. But the flow above is simpler and + * more general. + * All ILT entries are written by zero+valid and not just PF6/7 + * ILT entries since in the future the ILT entries allocation for + * PF-s might be dynamic. + */ struct ilt_client_info ilt_cli; struct bnx2x_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); @@ -5086,7 +5962,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) /* Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th - * vnic (this code assumes existence of the vnic) + * vnic (this code assumes existance of the vnic) * * both steps performed by call to bnx2x_ilt_client_init_op() * with dummy TM client @@ -5107,12 +5983,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { int factor = CHIP_REV_IS_EMUL(bp) ? 1000 : (CHIP_REV_IS_FPGA(bp) ? 400 : 0); - bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON); - bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic ... */ do { @@ -5126,26 +6002,27 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) } } - bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON); /* clean the DMAE memory */ bp->dmae_ready = 1; - bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8); + bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1); + + bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON); - bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON); + + bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON); bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3); bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); - bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON); - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE); /* QM queues pointers table */ bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET); @@ -5155,57 +6032,51 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) REG_WR(bp, QM_REG_SOFT_RESET, 0); #ifdef BCM_CNIC - bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); #endif - bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); - - if (!CHIP_REV_IS_SLOW(bp)) { + if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); - } - bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); - if (CHIP_MODE_IS_4_PORT(bp)) { - REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248); - REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328); - } + bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); - bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); -#ifndef BCM_CNIC - /* set NIC mode */ - REG_WR(bp, PRS_REG_NIC_MODE, 1); -#endif + if (!CHIP_IS_E1(bp)) - REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp)); + REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); - if (CHIP_IS_E2(bp)) { - /* Bit-map indicating which L2 hdrs may appear after the - basic Ethernet header */ - int has_ovlan = IS_MF_SD(bp); - REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); - REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); - } + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) + /* Bit-map indicating which L2 hdrs may appear + * after the basic Ethernet header + */ + REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); - bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON); - bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); - bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp)); + if (!CHIP_IS_E1x(bp)) { + /* reset VFC memories */ + REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); + REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, + VFC_MEMORIES_RST_REG_CAM_RST | + VFC_MEMORIES_RST_REG_RAM_RST); - bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE); + msleep(20); + } - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, @@ -5213,21 +6084,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); - bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); - if (CHIP_IS_E2(bp)) { - int has_ovlan = IS_MF_SD(bp); - REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); - REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); - } + if (!CHIP_IS_E1x(bp)) + REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, + bp->path_has_ovlan ? 7 : 6); REG_WR(bp, SRC_REG_SOFT_RST, 1); - for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) - REG_WR(bp, i, random32()); - bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); + #ifdef BCM_CNIC REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); @@ -5248,11 +6116,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) "of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); - bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val); - bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON); REG_WR(bp, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(bp, CFC_REG_CFC_INT_MASK, 0); @@ -5260,20 +6128,19 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) /* set the thresholds to prevent CFC/CDU race */ REG_WR(bp, CFC_REG_DEBUG0, 0x20020000); - bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON); - if (CHIP_IS_E2(bp) && BP_NOMCP(bp)) + if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp)) REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36); - bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON); + bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON); - bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE); /* Reset PCIE errors for debug */ REG_WR(bp, 0x2814, 0xffffffff); REG_WR(bp, 0x3820, 0xffffffff); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); @@ -5287,21 +6154,15 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } - bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE); - bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE); - - bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); + bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(bp)) { - REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); - REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); - } - if (CHIP_IS_E2(bp)) { - /* Bit-map indicating which L2 hdrs may appear after the - basic Ethernet header */ - REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6)); + /* in E3 this done in per-port section */ + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); } + if (CHIP_IS_E1H(bp)) + /* not applicable for E2 (and above ...) */ + REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp)); if (CHIP_REV_IS_SLOW(bp)) msleep(200); @@ -5343,127 +6204,136 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code) REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); bnx2x_enable_blocks_attention(bp); - if (CHIP_PARITY_ENABLED(bp)) - bnx2x_enable_blocks_parity(bp); + bnx2x_enable_blocks_parity(bp); if (!BP_NOMCP(bp)) { - /* In E2 2-PORT mode, same ext phy is used for the two paths */ - if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || - CHIP_IS_E1x(bp)) { - u32 shmem_base[2], shmem2_base[2]; - shmem_base[0] = bp->common.shmem_base; - shmem2_base[0] = bp->common.shmem2_base; - if (CHIP_IS_E2(bp)) { - shmem_base[1] = - SHMEM2_RD(bp, other_shmem_base_addr); - shmem2_base[1] = - SHMEM2_RD(bp, other_shmem2_base_addr); - } - bnx2x_acquire_phy_lock(bp); - bnx2x_common_init_phy(bp, shmem_base, shmem2_base, - bp->common.chip_id); - bnx2x_release_phy_lock(bp); - } + if (CHIP_IS_E1x(bp)) + bnx2x__common_init_phy(bp); } else BNX2X_ERR("Bootcode is missing - can not initialize link\n"); return 0; } +/** + * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase. + * + * @bp: driver handle + */ +static int bnx2x_init_hw_common_chip(struct bnx2x *bp) +{ + int rc = bnx2x_init_hw_common(bp); + + if (rc) + return rc; + + /* In E2 2-PORT mode, same ext phy is used for the two paths */ + if (!BP_NOMCP(bp)) + bnx2x__common_init_phy(bp); + + return 0; +} + static int bnx2x_init_hw_port(struct bnx2x *bp) { int port = BP_PORT(bp); - int init_stage = port ? PORT1_STAGE : PORT0_STAGE; + int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; u32 low, high; u32 val; + bnx2x__link_reset(bp); + DP(BNX2X_MSG_MCP, "starting port init port %d\n", port); REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); - bnx2x_init_block(bp, PXP_BLOCK, init_stage); - bnx2x_init_block(bp, PXP2_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); /* Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); - bnx2x_init_block(bp, TCM_BLOCK, init_stage); - bnx2x_init_block(bp, UCM_BLOCK, init_stage); - bnx2x_init_block(bp, CCM_BLOCK, init_stage); - bnx2x_init_block(bp, XCM_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); /* QM cid (connection) count */ bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); #ifdef BCM_CNIC - bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_TM, init_phase); REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); #endif - bnx2x_init_block(bp, DQ_BLOCK, init_stage); - - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { - bnx2x_init_block(bp, BRB1_BLOCK, init_stage); - if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) { - /* no pause for emulation and FPGA */ - low = 0; - high = 513; - } else { - if (IS_MF(bp)) - low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); - else if (bp->dev->mtu > 4096) { - if (bp->flags & ONE_PORT_FLAG) - low = 160; - else { - val = bp->dev->mtu; - /* (24*1024 + val*4)/256 */ - low = 96 + (val/64) + - ((val % 64) ? 1 : 0); - } - } else - low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); - high = low + 56; /* 14*1024/256 */ - } + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + + if (IS_MF(bp)) + low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); + else if (bp->dev->mtu > 4096) { + if (bp->flags & ONE_PORT_FLAG) + low = 160; + else { + val = bp->dev->mtu; + /* (24*1024 + val*4)/256 */ + low = 96 + (val/64) + + ((val % 64) ? 1 : 0); + } + } else + low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160); + high = low + 56; /* 14*1024/256 */ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } - if (CHIP_MODE_IS_4_PORT(bp)) { - REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248); - REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328); - REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 : - BRB1_REG_MAC_GUARANTIED_0), 40); - } + if (CHIP_MODE_IS_4_PORT(bp)) + REG_WR(bp, (BP_PORT(bp) ? + BRB1_REG_MAC_GUARANTIED_1 : + BRB1_REG_MAC_GUARANTIED_0), 40); - bnx2x_init_block(bp, PRS_BLOCK, init_stage); - bnx2x_init_block(bp, TSDM_BLOCK, init_stage); - bnx2x_init_block(bp, CSDM_BLOCK, init_stage); - bnx2x_init_block(bp, USDM_BLOCK, init_stage); - bnx2x_init_block(bp, XSDM_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + if (CHIP_IS_E3B0(bp)) + /* Ovlan exists only if we are in multi-function + + * switch-dependent mode, in switch-independent there + * is no ovlan headers + */ + REG_WR(bp, BP_PORT(bp) ? + PRS_REG_HDRS_AFTER_BASIC_PORT_1 : + PRS_REG_HDRS_AFTER_BASIC_PORT_0, + (bp->path_has_ovlan ? 7 : 6)); - bnx2x_init_block(bp, TSEM_BLOCK, init_stage); - bnx2x_init_block(bp, USEM_BLOCK, init_stage); - bnx2x_init_block(bp, CSEM_BLOCK, init_stage); - bnx2x_init_block(bp, XSEM_BLOCK, init_stage); - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); - bnx2x_init_block(bp, UPB_BLOCK, init_stage); - bnx2x_init_block(bp, XPB_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); - bnx2x_init_block(bp, PBF_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); - if (!CHIP_IS_E2(bp)) { + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + + if (CHIP_IS_E1x(bp)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); @@ -5479,20 +6349,20 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) } #ifdef BCM_CNIC - bnx2x_init_block(bp, SRCH_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_SRC, init_phase); #endif - bnx2x_init_block(bp, CDU_BLOCK, init_stage); - bnx2x_init_block(bp, CFC_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_CDU, init_phase); + bnx2x_init_block(bp, BLOCK_CFC, init_phase); if (CHIP_IS_E1(bp)) { REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); } - bnx2x_init_block(bp, HC_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_HC, init_phase); - bnx2x_init_block(bp, IGU_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_IGU, init_phase); - bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF @@ -5502,22 +6372,31 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) val |= CHIP_IS_E1(bp) ? 0 : 0x10; REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); - bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); - bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); - bnx2x_init_block(bp, EMAC1_BLOCK, init_stage); - bnx2x_init_block(bp, DBU_BLOCK, init_stage); - bnx2x_init_block(bp, DBG_BLOCK, init_stage); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); - bnx2x_init_block(bp, NIG_BLOCK, init_stage); + if (!CHIP_IS_E1x(bp)) { + /* Bit-map indicating which L2 hdrs may appear after the + * basic Ethernet header + */ + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_P1_HDRS_AFTER_BASIC : + NIG_REG_P0_HDRS_AFTER_BASIC, + IS_MF_SD(bp) ? 7 : 6); - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); + if (CHIP_IS_E3(bp)) + REG_WR(bp, BP_PORT(bp) ? + NIG_REG_LLH1_MF_MODE : + NIG_REG_LLH_MF_MODE, IS_MF(bp)); + } + if (!CHIP_IS_E3(bp)) + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); if (!CHIP_IS_E1(bp)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(bp) ? 0x1 : 0x2)); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { val = 0; switch (bp->mf_mode) { case MULTI_FUNCTION_SD: @@ -5538,17 +6417,16 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) } } - bnx2x_init_block(bp, MCP_BLOCK, init_stage); - bnx2x_init_block(bp, DMAE_BLOCK, init_stage); - if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base, - bp->common.shmem2_base, port)) { + + /* If SPIO5 is set to generate interrupts, enable it for this port */ + val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); + if (val & (1 << MISC_REGISTERS_SPIO_5)) { u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(bp, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(bp, reg_addr, val); } - bnx2x__link_reset(bp); return 0; } @@ -5567,7 +6445,7 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) { - bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/); + bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); } static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) @@ -5581,6 +6459,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) { int port = BP_PORT(bp); int func = BP_FUNC(bp); + int init_phase = PHASE_PF0 + func; struct bnx2x_ilt *ilt = BP_ILT(bp); u16 cdu_ilt_start; u32 addr, val; @@ -5589,6 +6468,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) DP(BNX2X_MSG_MCP, "starting func init func %d\n", func); + /* FLR cleanup - hmmm */ + if (!CHIP_IS_E1x(bp)) + bnx2x_pf_flr_clnup(bp); + /* set MSI reconfigure capability */ if (bp->common.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); @@ -5597,6 +6480,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, addr, val); } + bnx2x_init_block(bp, BLOCK_PXP, init_phase); + bnx2x_init_block(bp, BLOCK_PXP2, init_phase); + ilt = BP_ILT(bp); cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; @@ -5622,7 +6508,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, PRS_REG_NIC_MODE, 1); #endif /* BCM_CNIC */ - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { u32 pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use @@ -5649,58 +6535,55 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) bp->dmae_ready = 1; - bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); - bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func); - - if (CHIP_IS_E2(bp)) { - REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET, - BP_PATH(bp)); - REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET, - BP_PATH(bp)); - } - - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func); - - if (CHIP_IS_E2(bp)) + bnx2x_init_block(bp, BLOCK_ATC, init_phase); + bnx2x_init_block(bp, BLOCK_DMAE, init_phase); + bnx2x_init_block(bp, BLOCK_NIG, init_phase); + bnx2x_init_block(bp, BLOCK_SRC, init_phase); + bnx2x_init_block(bp, BLOCK_MISC, init_phase); + bnx2x_init_block(bp, BLOCK_TCM, init_phase); + bnx2x_init_block(bp, BLOCK_UCM, init_phase); + bnx2x_init_block(bp, BLOCK_CCM, init_phase); + bnx2x_init_block(bp, BLOCK_XCM, init_phase); + bnx2x_init_block(bp, BLOCK_TSEM, init_phase); + bnx2x_init_block(bp, BLOCK_USEM, init_phase); + bnx2x_init_block(bp, BLOCK_CSEM, init_phase); + bnx2x_init_block(bp, BLOCK_XSEM, init_phase); + + if (!CHIP_IS_E1x(bp)) REG_WR(bp, QM_REG_PF_EN, 1); - bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func); - - if (CHIP_MODE_IS_4_PORT(bp)) - bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func); - - bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) { + REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func); + } + bnx2x_init_block(bp, BLOCK_QM, init_phase); + + bnx2x_init_block(bp, BLOCK_TM, init_phase); + bnx2x_init_block(bp, BLOCK_DORQ, init_phase); + bnx2x_init_block(bp, BLOCK_BRB1, init_phase); + bnx2x_init_block(bp, BLOCK_PRS, init_phase); + bnx2x_init_block(bp, BLOCK_TSDM, init_phase); + bnx2x_init_block(bp, BLOCK_CSDM, init_phase); + bnx2x_init_block(bp, BLOCK_USDM, init_phase); + bnx2x_init_block(bp, BLOCK_XSDM, init_phase); + bnx2x_init_block(bp, BLOCK_UPB, init_phase); + bnx2x_init_block(bp, BLOCK_XPB, init_phase); + bnx2x_init_block(bp, BLOCK_PBF, init_phase); + if (!CHIP_IS_E1x(bp)) REG_WR(bp, PBF_REG_DISABLE_PF, 0); - bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_CDU, init_phase); - bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_CFC, init_phase); - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(bp)) { @@ -5708,7 +6591,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); } - bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (bp->common.int_block == INT_BLOCK_HC) { @@ -5718,21 +6601,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); } - bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); } - bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func); + bnx2x_init_block(bp, BLOCK_IGU, init_phase); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { int dsb_idx = 0; /** * Producer memory: @@ -5827,13 +6710,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, 0x2114, 0xffffffff); REG_WR(bp, 0x2120, 0xffffffff); - bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func); - bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func); - if (CHIP_IS_E1x(bp)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + @@ -5859,65 +6735,26 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_RD(bp, main_mem_prty_clr); } +#ifdef BNX2X_STOP_ON_ERROR + /* Enable STORMs SP logging */ + REG_WR8(bp, BAR_USTRORM_INTMEM + + USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_TSTRORM_INTMEM + + TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); + REG_WR8(bp, BAR_XSTRORM_INTMEM + + XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1); +#endif + bnx2x_phy_probe(&bp->link_params); return 0; } -int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) -{ - int rc = 0; - - DP(BNX2X_MSG_MCP, "function %d load_code %x\n", - BP_ABS_FUNC(bp), load_code); - - bp->dmae_ready = 0; - spin_lock_init(&bp->dmae_lock); - - switch (load_code) { - case FW_MSG_CODE_DRV_LOAD_COMMON: - case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: - rc = bnx2x_init_hw_common(bp, load_code); - if (rc) - goto init_hw_err; - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_PORT: - rc = bnx2x_init_hw_port(bp); - if (rc) - goto init_hw_err; - /* no break */ - - case FW_MSG_CODE_DRV_LOAD_FUNCTION: - rc = bnx2x_init_hw_func(bp); - if (rc) - goto init_hw_err; - break; - - default: - BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); - break; - } - - if (!BP_NOMCP(bp)) { - int mb_idx = BP_FW_MB_IDX(bp); - - bp->fw_drv_pulse_wr_seq = - (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & - DRV_PULSE_SEQ_MASK); - DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); - } - -init_hw_err: - bnx2x_gunzip_end(bp); - - return rc; -} void bnx2x_free_mem(struct bnx2x *bp) { - bnx2x_gunzip_end(bp); - /* fastpath */ bnx2x_free_fp_mem(bp); /* end of fastpath */ @@ -5925,6 +6762,9 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, sizeof(struct host_sp_status_block)); + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); @@ -5936,7 +6776,7 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_FREE(bp->ilt->lines); #ifdef BCM_CNIC - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e2)); else @@ -5950,18 +6790,67 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); +} + +static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) +{ + int num_groups; + + /* number of eth_queues */ + u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp); + + /* Total number of FW statistics requests = + * 1 for port stats + 1 for PF stats + num_eth_queues */ + bp->fw_stats_num = 2 + num_queue_stats; + - BNX2X_FREE(bp->rx_indir_table); + /* Request is built from stats_query_header and an array of + * stats_query_cmd_group each of which contains + * STATS_QUERY_CMD_COUNT rules. The real number or requests is + * configured in the stats_query_header. + */ + num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT + + (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0); + + bp->fw_stats_req_sz = sizeof(struct stats_query_header) + + num_groups * sizeof(struct stats_query_cmd_group); + + /* Data for statistics requests + stats_conter + * + * stats_counter holds per-STORM counters that are incremented + * when STORM has finished with the current request. + */ + bp->fw_stats_data_sz = sizeof(struct per_port_stats) + + sizeof(struct per_pf_stats) + + sizeof(struct per_queue_stats) * num_queue_stats + + sizeof(struct stats_counter); + + BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + + /* Set shortcuts */ + bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; + bp->fw_stats_req_mapping = bp->fw_stats_mapping; + + bp->fw_stats_data = (struct bnx2x_fw_stats_data *) + ((u8 *)bp->fw_stats + bp->fw_stats_req_sz); + + bp->fw_stats_data_mapping = bp->fw_stats_mapping + + bp->fw_stats_req_sz; + return 0; + +alloc_mem_err: + BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, + bp->fw_stats_data_sz + bp->fw_stats_req_sz); + return -ENOMEM; } int bnx2x_alloc_mem(struct bnx2x *bp) { - if (bnx2x_gunzip_init(bp)) - return -ENOMEM; - #ifdef BCM_CNIC - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) + /* size = the status block + ramrod buffers */ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, sizeof(struct host_hc_status_block_e2)); else @@ -5979,7 +6868,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp) BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); - bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count; + /* Allocated memory for FW statistics */ + if (bnx2x_alloc_fw_stats_mem(bp)) + goto alloc_mem_err; + + bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, bp->context.size); @@ -5996,8 +6889,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp) BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); - BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * - TSTORM_INDIRECTION_TABLE_SIZE); /* fastpath */ /* need to be done at the end, since it's self adjusting to amount @@ -6015,629 +6906,75 @@ alloc_mem_err: /* * Init service functions */ -static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, - int *state_p, int flags); - -int bnx2x_func_start(struct bnx2x *bp) -{ - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1); - - /* Wait for completion */ - return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state), - WAIT_RAMROD_COMMON); -} - -static int bnx2x_func_stop(struct bnx2x *bp) -{ - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1); - - /* Wait for completion */ - return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD, - 0, &(bp->state), WAIT_RAMROD_COMMON); -} - -/** - * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips - * - * @bp: driver handle - * @set: set or clear an entry (1 or 0) - * @mac: pointer to a buffer containing a MAC - * @cl_bit_vec: bit vector of clients to register a MAC for - * @cam_offset: offset in a CAM to use - * @is_bcast: is the set MAC a broadcast address (for E1 only) - */ -static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, - u32 cl_bit_vec, u8 cam_offset, - u8 is_bcast) -{ - struct mac_configuration_cmd *config = - (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config); - int ramrod_flags = WAIT_RAMROD_COMMON; - - bp->set_mac_pending = 1; - - config->hdr.length = 1; - config->hdr.offset = cam_offset; - config->hdr.client_id = 0xff; - /* Mark the single MAC configuration ramrod as opposed to a - * UC/MC list configuration). - */ - config->hdr.echo = 1; - - /* primary MAC */ - config->config_table[0].msb_mac_addr = - swab16(*(u16 *)&mac[0]); - config->config_table[0].middle_mac_addr = - swab16(*(u16 *)&mac[2]); - config->config_table[0].lsb_mac_addr = - swab16(*(u16 *)&mac[4]); - config->config_table[0].clients_bit_vector = - cpu_to_le32(cl_bit_vec); - config->config_table[0].vlan_id = 0; - config->config_table[0].pf_id = BP_FUNC(bp); - if (set) - SET_FLAG(config->config_table[0].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_SET); - else - SET_FLAG(config->config_table[0].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); - - if (is_bcast) - SET_FLAG(config->config_table[0].flags, - MAC_CONFIGURATION_ENTRY_BROADCAST, 1); - - DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n", - (set ? "setting" : "clearing"), - config->config_table[0].msb_mac_addr, - config->config_table[0].middle_mac_addr, - config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); - - mb(); - - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(bnx2x_sp_mapping(bp, mac_config)), - U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); - - /* Wait for a completion */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags); -} - -static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, - int *state_p, int flags) -{ - /* can take a while if any port is running */ - int cnt = 5000; - u8 poll = flags & WAIT_RAMROD_POLL; - u8 common = flags & WAIT_RAMROD_COMMON; - - DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", - poll ? "polling" : "waiting", state, idx); - - might_sleep(); - while (cnt--) { - if (poll) { - if (common) - bnx2x_eq_int(bp); - else { - bnx2x_rx_int(bp->fp, 10); - /* if index is different from 0 - * the reply for some commands will - * be on the non default queue - */ - if (idx) - bnx2x_rx_int(&bp->fp[idx], 10); - } - } - - mb(); /* state is changed by bnx2x_sp_event() */ - if (*state_p == state) { -#ifdef BNX2X_STOP_ON_ERROR - DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt); -#endif - return 0; - } - - msleep(1); - - if (bp->panic) - return -EIO; - } - - /* timeout! */ - BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", - poll ? "polling" : "waiting", state, idx); -#ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); -#endif - - return -EBUSY; -} - -static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) -{ - if (CHIP_IS_E1H(bp)) - return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); - else if (CHIP_MODE_IS_4_PORT(bp)) - return E2_FUNC_MAX * rel_offset + BP_FUNC(bp); - else - return E2_FUNC_MAX * rel_offset + BP_VN(bp); -} - -/** - * LLH CAM line allocations: currently only iSCSI and ETH macs are - * relevant. In addition, current implementation is tuned for a - * single ETH MAC. - */ -enum { - LLH_CAM_ISCSI_ETH_LINE = 0, - LLH_CAM_ETH_LINE, - LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE -}; - -static void bnx2x_set_mac_in_nig(struct bnx2x *bp, - int set, - unsigned char *dev_addr, - int index) -{ - u32 wb_data[2]; - u32 mem_offset, ena_offset, mem_index; - /** - * indexes mapping: - * 0..7 - goes to MEM - * 8..15 - goes to MEM2 - */ - - if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) - return; - - /* calculate memory start offset according to the mapping - * and index in the memory */ - if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) { - mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : - NIG_REG_LLH0_FUNC_MEM; - ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : - NIG_REG_LLH0_FUNC_MEM_ENABLE; - mem_index = index; - } else { - mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 : - NIG_REG_P0_LLH_FUNC_MEM2; - ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE : - NIG_REG_P0_LLH_FUNC_MEM2_ENABLE; - mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET; - } - - if (set) { - /* LLH_FUNC_MEM is a u64 WB register */ - mem_offset += 8*mem_index; - - wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | - (dev_addr[4] << 8) | dev_addr[5]); - wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); - REG_WR_DMAE(bp, mem_offset, wb_data, 2); - } - - /* enable/disable the entry */ - REG_WR(bp, ena_offset + 4*mem_index, set); - -} - -void bnx2x_set_eth_mac(struct bnx2x *bp, int set) +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, + struct bnx2x_vlan_mac_obj *obj, bool set, + int mac_type, unsigned long *ramrod_flags) { - u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : - bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE)); + int rc; + struct bnx2x_vlan_mac_ramrod_params ramrod_param; - /* networking MAC */ - bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, - (1 << bp->fp->cl_id), cam_offset , 0); + memset(&ramrod_param, 0, sizeof(ramrod_param)); - bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE); + /* Fill general parameters */ + ramrod_param.vlan_mac_obj = obj; + ramrod_param.ramrod_flags = *ramrod_flags; - if (CHIP_IS_E1(bp)) { - /* broadcast MAC */ - static const u8 bcast[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; - bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); - } -} + /* Fill a user request section if needed */ + if (!test_bit(RAMROD_CONT, ramrod_flags)) { + memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); -static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp) -{ - return CHIP_REV_IS_SLOW(bp) ? - (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) : - (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp))); -} + __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); -/* set mc list, do not wait as wait implies sleep and - * set_rx_mode can be invoked from non-sleepable context. - * - * Instead we use the same ramrod data buffer each time we need - * to configure a list of addresses, and use the fact that the - * list of MACs is changed in an incremental way and that the - * function is called under the netif_addr_lock. A temporary - * inconsistent CAM configuration (possible in case of a very fast - * sequence of add/del/add on the host side) will shortly be - * restored by the handler of the last ramrod. - */ -static int bnx2x_set_e1_mc_list(struct bnx2x *bp) -{ - int i = 0, old; - struct net_device *dev = bp->dev; - u8 offset = bnx2x_e1_cam_mc_offset(bp); - struct netdev_hw_addr *ha; - struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); - dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); - - if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) - return -EINVAL; - - netdev_for_each_mc_addr(ha, dev) { - /* copy mac */ - config_cmd->config_table[i].msb_mac_addr = - swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]); - config_cmd->config_table[i].middle_mac_addr = - swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]); - config_cmd->config_table[i].lsb_mac_addr = - swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]); - - config_cmd->config_table[i].vlan_id = 0; - config_cmd->config_table[i].pf_id = BP_FUNC(bp); - config_cmd->config_table[i].clients_bit_vector = - cpu_to_le32(1 << BP_L_ID(bp)); - - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_SET); - - DP(NETIF_MSG_IFUP, - "setting MCAST[%d] (%04x:%04x:%04x)\n", i, - config_cmd->config_table[i].msb_mac_addr, - config_cmd->config_table[i].middle_mac_addr, - config_cmd->config_table[i].lsb_mac_addr); - i++; - } - old = config_cmd->hdr.length; - if (old > i) { - for (; i < old; i++) { - if (CAM_IS_INVALID(config_cmd-> - config_table[i])) { - /* already invalidated */ - break; - } - /* invalidate */ - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); - } + /* Set the command: ADD or DEL */ + if (set) + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; + else + ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL; } - wmb(); - - config_cmd->hdr.length = i; - config_cmd->hdr.offset = offset; - config_cmd->hdr.client_id = 0xff; - /* Mark that this ramrod doesn't use bp->set_mac_pending for - * synchronization. - */ - config_cmd->hdr.echo = 0; - - mb(); - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); -} - -void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp) -{ - int i; - struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); - dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); - int ramrod_flags = WAIT_RAMROD_COMMON; - u8 offset = bnx2x_e1_cam_mc_offset(bp); - - for (i = 0; i < BNX2X_MAX_MULTICAST; i++) - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); - - wmb(); - - config_cmd->hdr.length = BNX2X_MAX_MULTICAST; - config_cmd->hdr.offset = offset; - config_cmd->hdr.client_id = 0xff; - /* We'll wait for a completion this time... */ - config_cmd->hdr.echo = 1; - - bp->set_mac_pending = 1; - - mb(); - - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); - - /* Wait for a completion */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, - ramrod_flags); - + rc = bnx2x_config_vlan_mac(bp, &ramrod_param); + if (rc < 0) + BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; } -/* Accept one or more multicasts */ -static int bnx2x_set_e1h_mc_list(struct bnx2x *bp) +int bnx2x_del_all_macs(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + int mac_type, bool wait_for_comp) { - struct net_device *dev = bp->dev; - struct netdev_hw_addr *ha; - u32 mc_filter[MC_HASH_SIZE]; - u32 crc, bit, regidx; - int i; - - memset(mc_filter, 0, 4 * MC_HASH_SIZE); - - netdev_for_each_mc_addr(ha, dev) { - DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", - bnx2x_mc_addr(ha)); - - crc = crc32c_le(0, bnx2x_mc_addr(ha), - ETH_ALEN); - bit = (crc >> 24) & 0xff; - regidx = bit >> 5; - bit &= 0x1f; - mc_filter[regidx] |= (1 << bit); - } + int rc; + unsigned long ramrod_flags = 0, vlan_mac_flags = 0; - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), - mc_filter[i]); + /* Wait for completion of requested */ + if (wait_for_comp) + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); - return 0; -} + /* Set the mac type of addresses we want to clear */ + __set_bit(mac_type, &vlan_mac_flags); -void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp) -{ - int i; + rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags); + if (rc < 0) + BNX2X_ERR("Failed to delete MACs: %d\n", rc); - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); + return rc; } -#ifdef BCM_CNIC -/** - * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). - * - * @bp: driver handle - * @set: set or clear the CAM entry - * - * This function will wait until the ramdord completion returns. - * Return 0 if success, -ENODEV if ramrod doesn't return. - */ -static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) +int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { - u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : - bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); - u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + - BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; - u32 cl_bit_vec = (1 << iscsi_l2_cl_id); - u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; + unsigned long ramrod_flags = 0; - /* Send a SET_MAC ramrod */ - bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec, - cam_offset, 0); + DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); - bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); - - return 0; + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + /* Eth MAC is set on RSS leading client (fp[0]) */ + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); } -/** - * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s) - * - * @bp: driver handle - * @set: set or clear the CAM entry - * - * This function will wait until the ramrod completion returns. - * Returns 0 if success, -ENODEV if ramrod doesn't return. - */ -int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set) -{ - u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); - /** - * CAM allocation for E1H - * eth unicasts: by func number - * iscsi: by func number - * fip unicast: by func number - * fip multicast: by func number - */ - bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac, - cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0); - - return 0; -} - -int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set) +int bnx2x_setup_leading(struct bnx2x *bp) { - u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id)); - - /** - * CAM allocation for E1H - * eth unicasts: by func number - * iscsi: by func number - * fip unicast: by func number - * fip multicast: by func number - */ - bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec, - bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0); - - return 0; -} -#endif - -static void bnx2x_fill_cl_init_data(struct bnx2x *bp, - struct bnx2x_client_init_params *params, - u8 activate, - struct client_init_ramrod_data *data) -{ - /* Clear the buffer */ - memset(data, 0, sizeof(*data)); - - /* general */ - data->general.client_id = params->rxq_params.cl_id; - data->general.statistics_counter_id = params->rxq_params.stat_id; - data->general.statistics_en_flg = - (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; - data->general.is_fcoe_flg = - (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0; - data->general.activate_flg = activate; - data->general.sp_client_id = params->rxq_params.spcl_id; - - /* Rx data */ - data->rx.tpa_en_flg = - (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0; - data->rx.vmqueue_mode_en_flg = 0; - data->rx.cache_line_alignment_log_size = - params->rxq_params.cache_line_log; - data->rx.enable_dynamic_hc = - (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0; - data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt; - data->rx.client_qzone_id = params->rxq_params.cl_qzone_id; - data->rx.max_agg_size = params->rxq_params.tpa_agg_sz; - - /* We don't set drop flags */ - data->rx.drop_ip_cs_err_flg = 0; - data->rx.drop_tcp_cs_err_flg = 0; - data->rx.drop_ttl0_flg = 0; - data->rx.drop_udp_cs_err_flg = 0; - - data->rx.inner_vlan_removal_enable_flg = - (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0; - data->rx.outer_vlan_removal_enable_flg = - (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0; - data->rx.status_block_id = params->rxq_params.fw_sb_id; - data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index; - data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz); - data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz); - data->rx.mtu = cpu_to_le16(params->rxq_params.mtu); - data->rx.bd_page_base.lo = - cpu_to_le32(U64_LO(params->rxq_params.dscr_map)); - data->rx.bd_page_base.hi = - cpu_to_le32(U64_HI(params->rxq_params.dscr_map)); - data->rx.sge_page_base.lo = - cpu_to_le32(U64_LO(params->rxq_params.sge_map)); - data->rx.sge_page_base.hi = - cpu_to_le32(U64_HI(params->rxq_params.sge_map)); - data->rx.cqe_page_base.lo = - cpu_to_le32(U64_LO(params->rxq_params.rcq_map)); - data->rx.cqe_page_base.hi = - cpu_to_le32(U64_HI(params->rxq_params.rcq_map)); - data->rx.is_leading_rss = - (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0; - data->rx.is_approx_mcast = data->rx.is_leading_rss; - - /* Tx data */ - data->tx.enforce_security_flg = 0; /* VF specific */ - data->tx.tx_status_block_id = params->txq_params.fw_sb_id; - data->tx.tx_sb_index_number = params->txq_params.sb_cq_index; - data->tx.mtu = 0; /* VF specific */ - data->tx.tx_bd_page_base.lo = - cpu_to_le32(U64_LO(params->txq_params.dscr_map)); - data->tx.tx_bd_page_base.hi = - cpu_to_le32(U64_HI(params->txq_params.dscr_map)); - - /* flow control data */ - data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo); - data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi); - data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo); - data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi); - data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo); - data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi); - data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map); - - data->fc.safc_group_num = params->txq_params.cos; - data->fc.safc_group_en_flg = - (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; - data->fc.traffic_type = - (params->ramrod_params.flags & CLIENT_IS_FCOE) ? - LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; -} - -static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) -{ - /* ustorm cxt validation */ - cxt->ustorm_ag_context.cdu_usage = - CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG, - ETH_CONNECTION_TYPE); - /* xcontext validation */ - cxt->xstorm_ag_context.cdu_reserved = - CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG, - ETH_CONNECTION_TYPE); -} - -static int bnx2x_setup_fw_client(struct bnx2x *bp, - struct bnx2x_client_init_params *params, - u8 activate, - struct client_init_ramrod_data *data, - dma_addr_t data_mapping) -{ - u16 hc_usec; - int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; - int ramrod_flags = 0, rc; - - /* HC and context validation values */ - hc_usec = params->txq_params.hc_rate ? - 1000000 / params->txq_params.hc_rate : 0; - bnx2x_update_coalesce_sb_index(bp, - params->txq_params.fw_sb_id, - params->txq_params.sb_cq_index, - !(params->txq_params.flags & QUEUE_FLG_HC), - hc_usec); - - *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING; - - hc_usec = params->rxq_params.hc_rate ? - 1000000 / params->rxq_params.hc_rate : 0; - bnx2x_update_coalesce_sb_index(bp, - params->rxq_params.fw_sb_id, - params->rxq_params.sb_cq_index, - !(params->rxq_params.flags & QUEUE_FLG_HC), - hc_usec); - - bnx2x_set_ctx_validation(params->rxq_params.cxt, - params->rxq_params.cid); - - /* zero stats */ - if (params->txq_params.flags & QUEUE_FLG_STATS) - storm_memset_xstats_zero(bp, BP_PORT(bp), - params->txq_params.stat_id); - - if (params->rxq_params.flags & QUEUE_FLG_STATS) { - storm_memset_ustats_zero(bp, BP_PORT(bp), - params->rxq_params.stat_id); - storm_memset_tstats_zero(bp, BP_PORT(bp), - params->rxq_params.stat_id); - } - - /* Fill the ramrod data */ - bnx2x_fill_cl_init_data(bp, params, activate, data); - - /* SETUP ramrod. - * - * bnx2x_sp_post() takes a spin_lock thus no other explict memory - * barrier except from mmiowb() is needed to impose a - * proper ordering of memory operations. - */ - mmiowb(); - - - bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid, - U64_HI(data_mapping), U64_LO(data_mapping), 0); - - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state, - params->ramrod_params.index, - params->ramrod_params.pstate, - ramrod_flags); - return rc; + return bnx2x_setup_queue(bp, &bp->fp[0], 1); } /** @@ -6647,16 +6984,14 @@ static int bnx2x_setup_fw_client(struct bnx2x *bp, * * In case of MSI-X it will also try to enable MSI-X. */ -static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) { - int rc = 0; - - switch (bp->int_mode) { + switch (int_mode) { case INT_MODE_MSI: bnx2x_enable_msi(bp); /* falling through... */ case INT_MODE_INTx: - bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); break; default: @@ -6670,8 +7005,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp */ - rc = bnx2x_enable_msix(bp); - if (rc) { + if (bnx2x_enable_msix(bp)) { /* failed to enable MSI-X */ if (bp->multi_mode) DP(NETIF_MSG_IFUP, @@ -6679,17 +7013,15 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp) "enable MSI-X (%d), " "set number of queues to %d\n", bp->num_queues, - 1 + NONE_ETH_CONTEXT_USE); - bp->num_queues = 1 + NONE_ETH_CONTEXT_USE; + 1 + NON_ETH_CONTEXT_USE); + bp->num_queues = 1 + NON_ETH_CONTEXT_USE; + /* Try to enable MSI */ if (!(bp->flags & DISABLE_MSI_FLAG)) bnx2x_enable_msi(bp); } - break; } - - return rc; } /* must be called prioir to any HW initializations */ @@ -6713,7 +7045,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; - line += L2_ILT_LINES(bp); + line += bnx2x_cid_ilt_lines(bp); #ifdef BCM_CNIC line += CNIC_ILT_LINES; #endif @@ -6793,92 +7125,258 @@ void bnx2x_ilt_set_info(struct bnx2x *bp) #else ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); #endif + BUG_ON(line > ILT_MAX_LINES); +} + +/** + * bnx2x_pf_q_prep_init - prepare INIT transition parameters + * + * @bp: driver handle + * @fp: pointer to fastpath + * @init_params: pointer to parameters structure + * + * parameters configured: + * - HC configuration + * - Queue's CDU context + */ +static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, + struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) +{ + + u8 cos; + /* FCoE Queue uses Default SB, thus has no HC capabilities */ + if (!IS_FCOE_FP(fp)) { + __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags); + + /* If HC is supporterd, enable host coalescing in the transition + * to INIT state. + */ + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags); + __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags); + + /* HC rate */ + init_params->rx.hc_rate = bp->rx_ticks ? + (1000000 / bp->rx_ticks) : 0; + init_params->tx.hc_rate = bp->tx_ticks ? + (1000000 / bp->tx_ticks) : 0; + + /* FW SB ID */ + init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = + fp->fw_sb_id; + + /* + * CQ index among the SB indices: FCoE clients uses the default + * SB, therefore it's different. + */ + init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; + init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; + } + + /* set maximum number of COSs supported by this queue */ + init_params->max_cos = fp->max_cos; + + DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d", + fp->index, init_params->max_cos); + + /* set the context pointers queue object */ + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) + init_params->cxts[cos] = + &bp->context.vcxt[fp->txdata[cos].cid].eth; } -int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp, - int is_leading) +int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, + struct bnx2x_queue_state_params *q_params, + struct bnx2x_queue_setup_tx_only_params *tx_only_params, + int tx_index, bool leading) { - struct bnx2x_client_init_params params = { {0} }; + memset(tx_only_params, 0, sizeof(*tx_only_params)); + + /* Set the command */ + q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY; + + /* Set tx-only QUEUE flags: don't zero statistics */ + tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false); + + /* choose the index of the cid to send the slow path on */ + tx_only_params->cid_index = tx_index; + + /* Set general TX_ONLY_SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index); + + /* Set Tx TX_ONLY_SETUP parameters */ + bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index); + + DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:" + "cos %d, primary cid %d, cid %d, " + "client id %d, sp-client id %d, flags %lx", + tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX], + q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id, + tx_only_params->gen_params.spcl_id, tx_only_params->flags); + + /* send the ramrod */ + return bnx2x_queue_state_change(bp, q_params); +} + + +/** + * bnx2x_setup_queue - setup queue + * + * @bp: driver handle + * @fp: pointer to fastpath + * @leading: is leading + * + * This function performs 2 steps in a Queue state machine + * actually: 1) RESET->INIT 2) INIT->SETUP + */ + +int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool leading) +{ + struct bnx2x_queue_state_params q_params = {0}; + struct bnx2x_queue_setup_params *setup_params = + &q_params.params.setup; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + &q_params.params.tx_only; int rc; + u8 tx_index; + + DP(BNX2X_MSG_SP, "setting up queue %d", fp->index); /* reset IGU state skip FCoE L2 queue */ if (!IS_FCOE_FP(fp)) bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); - params.ramrod_params.pstate = &fp->state; - params.ramrod_params.state = BNX2X_FP_STATE_OPEN; - params.ramrod_params.index = fp->index; - params.ramrod_params.cid = fp->cid; + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); -#ifdef BCM_CNIC - if (IS_FCOE_FP(fp)) - params.ramrod_params.flags |= CLIENT_IS_FCOE; + /* Prepare the INIT parameters */ + bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init); -#endif + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_INIT; - if (is_leading) - params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; + /* Change the state to INIT */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) INIT failed\n", fp->index); + return rc; + } - bnx2x_pf_rx_cl_prep(bp, fp, ¶ms.pause, ¶ms.rxq_params); + DP(BNX2X_MSG_SP, "init complete"); - bnx2x_pf_tx_cl_prep(bp, fp, ¶ms.txq_params); - rc = bnx2x_setup_fw_client(bp, ¶ms, 1, - bnx2x_sp(bp, client_init_data), - bnx2x_sp_mapping(bp, client_init_data)); - return rc; -} + /* Now move the Queue to the SETUP state... */ + memset(setup_params, 0, sizeof(*setup_params)); -static int bnx2x_stop_fw_client(struct bnx2x *bp, - struct bnx2x_client_ramrod_params *p) -{ - int rc; + /* Set QUEUE flags */ + setup_params->flags = bnx2x_get_q_flags(bp, fp, leading); - int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0; + /* Set general SETUP parameters */ + bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params, + FIRST_TX_COS_INDEX); - /* halt the connection */ - *p->pstate = BNX2X_FP_STATE_HALTING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0, - p->cl_id, 0); + bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params, + &setup_params->rxq_params); - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index, - p->pstate, poll_flag); - if (rc) /* timeout */ - return rc; + bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params, + FIRST_TX_COS_INDEX); + + /* Set the command */ + q_params.cmd = BNX2X_Q_CMD_SETUP; - *p->pstate = BNX2X_FP_STATE_TERMINATING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0, - p->cl_id, 0); - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index, - p->pstate, poll_flag); - if (rc) /* timeout */ + /* Change the state to SETUP */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index); return rc; + } + /* loop through the relevant tx-only indices */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++) { - /* delete cfc entry */ - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1); + /* prepare and send tx-only ramrod*/ + rc = bnx2x_setup_tx_only(bp, fp, &q_params, + tx_only_params, tx_index, leading); + if (rc) { + BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n", + fp->index, tx_index); + return rc; + } + } - /* Wait for completion */ - rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index, - p->pstate, WAIT_RAMROD_COMMON); return rc; } -static int bnx2x_stop_client(struct bnx2x *bp, int index) +static int bnx2x_stop_queue(struct bnx2x *bp, int index) { - struct bnx2x_client_ramrod_params client_stop = {0}; struct bnx2x_fastpath *fp = &bp->fp[index]; + struct bnx2x_fp_txdata *txdata; + struct bnx2x_queue_state_params q_params = {0}; + int rc, tx_index; + + DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid); + + q_params.q_obj = &fp->q_obj; + /* We want to wait for completion in this context */ + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + + + /* close tx-only connections */ + for (tx_index = FIRST_TX_ONLY_COS_INDEX; + tx_index < fp->max_cos; + tx_index++){ - client_stop.index = index; - client_stop.cid = fp->cid; - client_stop.cl_id = fp->cl_id; - client_stop.pstate = &(fp->state); - client_stop.poll = 0; + /* ascertain this is a normal queue*/ + txdata = &fp->txdata[tx_index]; - return bnx2x_stop_fw_client(bp, &client_stop); + DP(BNX2X_MSG_SP, "stopping tx-only queue %d", + txdata->txq_index); + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = tx_index; + + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* send halt terminate on tx-only connection */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = tx_index; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + } + /* Stop the primary connection: */ + /* ...halt the connection */ + q_params.cmd = BNX2X_Q_CMD_HALT; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + + /* ...terminate the connection */ + q_params.cmd = BNX2X_Q_CMD_TERMINATE; + memset(&q_params.params.terminate, 0, + sizeof(q_params.params.terminate)); + q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) + return rc; + /* ...delete cfc entry */ + q_params.cmd = BNX2X_Q_CMD_CFC_DEL; + memset(&q_params.params.cfc_del, 0, + sizeof(q_params.params.cfc_del)); + q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; + return bnx2x_queue_state_change(bp, &q_params); } @@ -6887,12 +7385,6 @@ static void bnx2x_reset_func(struct bnx2x *bp) int port = BP_PORT(bp); int func = BP_FUNC(bp); int i; - int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) + - (CHIP_IS_E2(bp) ? - offsetof(struct hc_status_block_data_e2, common) : - offsetof(struct hc_status_block_data_e1x, common)); - int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func); - int pfid_offset = offsetof(struct pci_entity, pf_id); /* Disable the function in the FW */ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); @@ -6903,20 +7395,21 @@ static void bnx2x_reset_func(struct bnx2x *bp) /* FP SBs */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - REG_WR8(bp, - BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) - + pfunc_offset_fp + pfid_offset, - HC_FUNCTION_DISABLED); + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), + SB_DISABLED); } +#ifdef BCM_CNIC + /* CNIC SB */ + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), + SB_DISABLED); +#endif /* SP SB */ - REG_WR8(bp, - BAR_CSTRORM_INTMEM + - CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + - pfunc_offset_sp + pfid_offset, - HC_FUNCTION_DISABLED); - + REG_WR8(bp, BAR_CSTRORM_INTMEM + + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), + SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), @@ -6950,7 +7443,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) /* Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ - if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) { + if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); @@ -6962,7 +7455,7 @@ static void bnx2x_reset_func(struct bnx2x *bp) } /* this assumes that reset_port() called before reset_func()*/ - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) bnx2x_pf_disable(bp); bp->dmae_ready = 0; @@ -6973,6 +7466,9 @@ static void bnx2x_reset_port(struct bnx2x *bp) int port = BP_PORT(bp); u32 val; + /* Reset physical Link */ + bnx2x__link_reset(bp); + REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ @@ -6994,92 +7490,66 @@ static void bnx2x_reset_port(struct bnx2x *bp) /* TODO: Close Doorbell port? */ } -static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) +static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) { - DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", - BP_ABS_FUNC(bp), reset_code); + struct bnx2x_func_state_params func_params = {0}; - switch (reset_code) { - case FW_MSG_CODE_DRV_UNLOAD_COMMON: - bnx2x_reset_port(bp); - bnx2x_reset_func(bp); - bnx2x_reset_common(bp); - break; + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); - case FW_MSG_CODE_DRV_UNLOAD_PORT: - bnx2x_reset_port(bp); - bnx2x_reset_func(bp); - break; + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_HW_RESET; - case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: - bnx2x_reset_func(bp); - break; + func_params.params.hw_init.load_phase = load_code; - default: - BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code); - break; - } + return bnx2x_func_state_change(bp, &func_params); } -#ifdef BCM_CNIC -static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp) +static inline int bnx2x_func_stop(struct bnx2x *bp) { - if (bp->flags & FCOE_MACS_SET) { - if (!IS_MF_SD(bp)) - bnx2x_set_fip_eth_mac_addr(bp, 0); - - bnx2x_set_all_enode_macs(bp, 0); - - bp->flags &= ~FCOE_MACS_SET; - } -} -#endif - -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) -{ - int port = BP_PORT(bp); - u32 reset_code = 0; - int i, cnt, rc; - - /* Wait until tx fastpath tasks complete */ - for_each_tx_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_func_state_params func_params = {0}; + int rc; - cnt = 1000; - while (bnx2x_has_tx_work_unload(fp)) { + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_STOP; - if (!cnt) { - BNX2X_ERR("timeout waiting for queue[%d]\n", - i); + /* + * Try to stop the function the 'good way'. If fails (in case + * of a parity error during bnx2x_chip_cleanup()) and we are + * not in a debug mode, perform a state transaction in order to + * enable further HW_RESET transaction. + */ + rc = bnx2x_func_state_change(bp, &func_params); + if (rc) { #ifdef BNX2X_STOP_ON_ERROR - bnx2x_panic(); - return -EBUSY; + return rc; #else - break; + BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry " + "transaction\n"); + __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); + return bnx2x_func_state_change(bp, &func_params); #endif - } - cnt--; - msleep(1); - } } - /* Give HW time to discard old tx messages */ - msleep(1); - - bnx2x_set_eth_mac(bp, 0); - bnx2x_invalidate_uc_list(bp); - - if (CHIP_IS_E1(bp)) - bnx2x_invalidate_e1_mc_list(bp); - else { - bnx2x_invalidate_e1h_mc_list(bp); - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - } + return 0; +} -#ifdef BCM_CNIC - bnx2x_del_fcoe_eth_macs(bp); -#endif +/** + * bnx2x_send_unload_req - request unload mode from the MCP. + * + * @bp: driver handle + * @unload_mode: requested function's unload mode + * + * Return unload mode returned by the MCP: COMMON, PORT or FUNC. + */ +u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) +{ + u32 reset_code = 0; + int port = BP_PORT(bp); + /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; @@ -7106,54 +7576,215 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) } else reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + /* Send the request to the MCP */ + if (!BP_NOMCP(bp)) + reset_code = bnx2x_fw_command(bp, reset_code, 0); + else { + int path = BP_PATH(bp); + + DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + load_count[path][0]--; + load_count[path][1 + port]--; + DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " + "%d, %d, %d\n", + path, load_count[path][0], load_count[path][1], + load_count[path][2]); + if (load_count[path][0] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; + else if (load_count[path][1 + port] == 0) + reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; + else + reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; + } + + return reset_code; +} + +/** + * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. + * + * @bp: driver handle + */ +void bnx2x_send_unload_done(struct bnx2x *bp) +{ + /* Report UNLOAD_DONE to MCP */ + if (!BP_NOMCP(bp)) + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); +} + +static inline int bnx2x_func_wait_started(struct bnx2x *bp) +{ + int tout = 50; + int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; + + if (!bp->port.pmf) + return 0; + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + * 1. Sync IRS for default SB + * 2. Sync SP queue - this guarantes us that attention handling started + * 3. Wait, that TXdisable/enable transaction completes + * + * 1+2 guranty that if DCBx attention was scheduled it already changed + * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy + * received complettion for the transaction the state is TX_STOPPED. + * State will return to STARTED after completion of TX_STOPPED-->STARTED + * transaction. + */ + + /* make sure default SB ISR is done */ + if (msix) + synchronize_irq(bp->msix_table[0].vector); + else + synchronize_irq(bp->pdev->irq); + + flush_workqueue(bnx2x_wq); + + while (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED && tout--) + msleep(20); + + if (bnx2x_func_get_state(bp, &bp->func_obj) != + BNX2X_F_STATE_STARTED) { +#ifdef BNX2X_STOP_ON_ERROR + return -EBUSY; +#else + /* + * Failed to complete the transaction in a "good way" + * Force both transactions with CLR bit + */ + struct bnx2x_func_state_params func_params = {0}; + + DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! " + "Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + + func_params.f_obj = &bp->func_obj; + __set_bit(RAMROD_DRV_CLR_ONLY, + &func_params.ramrod_flags); + + /* STARTED-->TX_ST0PPED */ + func_params.cmd = BNX2X_F_CMD_TX_STOP; + bnx2x_func_state_change(bp, &func_params); + + /* TX_ST0PPED-->STARTED */ + func_params.cmd = BNX2X_F_CMD_TX_START; + return bnx2x_func_state_change(bp, &func_params); +#endif + } + + return 0; +} + +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +{ + int port = BP_PORT(bp); + int i, rc = 0; + u8 cos; + struct bnx2x_mcast_ramrod_params rparam = {0}; + u32 reset_code; + + /* Wait until tx fastpath tasks complete */ + for_each_tx_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + for_each_cos_in_tx_queue(fp, cos) + rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); +#ifdef BNX2X_STOP_ON_ERROR + if (rc) + return; +#endif + } + + /* Give HW time to discard old tx messages */ + usleep_range(1000, 1000); + + /* Clean all ETH MACs */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); + if (rc < 0) + BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); + + /* Clean up UC list */ + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, + true); + if (rc < 0) + BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: " + "%d\n", rc); + + /* Disable LLH */ + if (!CHIP_IS_E1(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + + /* Set "drop all" (stop Rx). + * We need to take a netif_addr_lock() here in order to prevent + * a race between the completion code and this code. + */ + netif_addr_lock_bh(bp->dev); + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + else + bnx2x_set_storm_rx_mode(bp); + + /* Cleanup multicast configuration */ + rparam.mcast_obj = &bp->mcast_obj; + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) + BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc); + + netif_addr_unlock_bh(bp->dev); + + + + /* + * Send the UNLOAD_REQUEST to the MCP. This will return if + * this function should perform FUNC, PORT or COMMON HW + * reset. + */ + reset_code = bnx2x_send_unload_req(bp, unload_mode); + + /* + * (assumption: No Attention from MCP at this stage) + * PMF probably in the middle of TXdisable/enable transaction + */ + rc = bnx2x_func_wait_started(bp); + if (rc) { + BNX2X_ERR("bnx2x_func_wait_started failed\n"); +#ifdef BNX2X_STOP_ON_ERROR + return; +#endif + } + /* Close multi and leading connections - Completions for ramrods are collected in a synchronous way */ + * Completions for ramrods are collected in a synchronous way + */ for_each_queue(bp, i) - - if (bnx2x_stop_client(bp, i)) + if (bnx2x_stop_queue(bp, i)) #ifdef BNX2X_STOP_ON_ERROR return; #else goto unload_error; #endif + /* If SP settings didn't get completed so far - something + * very wrong has happen. + */ + if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) + BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n"); +#ifndef BNX2X_STOP_ON_ERROR +unload_error: +#endif rc = bnx2x_func_stop(bp); if (rc) { BNX2X_ERR("Function stop failed!\n"); #ifdef BNX2X_STOP_ON_ERROR return; -#else - goto unload_error; #endif } -#ifndef BNX2X_STOP_ON_ERROR -unload_error: -#endif - if (!BP_NOMCP(bp)) - reset_code = bnx2x_fw_command(bp, reset_code, 0); - else { - DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] " - "%d, %d, %d\n", BP_PATH(bp), - load_count[BP_PATH(bp)][0], - load_count[BP_PATH(bp)][1], - load_count[BP_PATH(bp)][2]); - load_count[BP_PATH(bp)][0]--; - load_count[BP_PATH(bp)][1 + port]--; - DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] " - "%d, %d, %d\n", BP_PATH(bp), - load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1], - load_count[BP_PATH(bp)][2]); - if (load_count[BP_PATH(bp)][0] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; - else if (load_count[BP_PATH(bp)][1 + port] == 0) - reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; - else - reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; - } - - if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) || - (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT)) - bnx2x__link_reset(bp); /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); @@ -7162,12 +7793,13 @@ unload_error: bnx2x_free_irq(bp); /* Reset the chip */ - bnx2x_reset_chip(bp, reset_code); + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); - /* Report UNLOAD_DONE to MCP */ - if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + /* Report UNLOAD_DONE to MCP */ + bnx2x_send_unload_done(bp); } void bnx2x_disable_close_the_gate(struct bnx2x *bp) @@ -7184,7 +7816,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) val = REG_RD(bp, addr); val &= ~(0x300); REG_WR(bp, addr, val); - } else if (CHIP_IS_E1H(bp)) { + } else { val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); @@ -7195,24 +7827,37 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp) /* Close gates #2, #3 and #4: */ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close) { - u32 val, addr; + u32 val; /* Gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(bp)) { /* #4 */ - val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS); - REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, - close ? (val | 0x1) : (val & (~(u32)1))); + REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ - val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES); - REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, - close ? (val | 0x1) : (val & (~(u32)1))); + REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ - addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; - val = REG_RD(bp, addr); - REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1))); + if (CHIP_IS_E1x(bp)) { + /* Prevent interrupts from HC on both ports */ + val = REG_RD(bp, HC_REG_CONFIG_1); + REG_WR(bp, HC_REG_CONFIG_1, + (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : + (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); + + val = REG_RD(bp, HC_REG_CONFIG_0); + REG_WR(bp, HC_REG_CONFIG_0, + (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : + (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); + } else { + /* Prevent incomming interrupts in IGU */ + val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, + (!close) ? + (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : + (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); + } DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); @@ -7330,7 +7975,6 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) if (!CHIP_IS_E1(bp)) { REG_WR(bp, PXP2_REG_RD_START_INIT, 0); REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0); - REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0); mmiowb(); } } @@ -7345,46 +7989,133 @@ static void bnx2x_pxp_prep(struct bnx2x *bp) * - GRC * - RBCN, RBCP */ -static void bnx2x_process_kill_chip_reset(struct bnx2x *bp) +static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global) { u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; + u32 global_bits2, stay_reset2; + + /* + * Bits that have to be set in reset_mask2 if we want to reset 'global' + * (per chip) blocks. + */ + global_bits2 = + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; + /* Don't reset the following blocks */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = - MISC_REGISTERS_RESET_REG_2_RST_MDIO | + MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | - MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B; + MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | + MISC_REGISTERS_RESET_REG_2_RST_ATC | + MISC_REGISTERS_RESET_REG_2_PGLC; + /* + * Keep the following blocks in reset: + * - all xxMACs are handled by the bnx2x_link code. + */ + stay_reset2 = + MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | + MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | + MISC_REGISTERS_RESET_REG_2_UMAC0 | + MISC_REGISTERS_RESET_REG_2_UMAC1 | + MISC_REGISTERS_RESET_REG_2_XMAC | + MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; + + /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(bp)) reset_mask2 = 0xffff; - else + else if (CHIP_IS_E1H(bp)) reset_mask2 = 0x1ffff; + else if (CHIP_IS_E2(bp)) + reset_mask2 = 0xfffff; + else /* CHIP_IS_E3 */ + reset_mask2 = 0x3ffffff; - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, - reset_mask1 & (~not_reset_mask1)); + /* Don't reset global blocks unless we need to */ + if (!global) + reset_mask2 &= ~global_bits2; + + /* + * In case of attention in the QM, we need to reset PXP + * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM + * because otherwise QM reset would release 'close the gates' shortly + * before resetting the PXP, then the PSWRQ would send a write + * request to PGLUE. Then when PXP is reset, PGLUE would try to + * read the payload data from PSWWR, but PSWWR would not + * respond. The write queue in PGLUE would stuck, dmae commands + * would not return. Therefore it's important to reset the second + * reset register (containing the + * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the + * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM + * bit). + */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, + reset_mask1 & (~not_reset_mask1)); + + barrier(); + mmiowb(); + + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, + reset_mask2 & (~stay_reset2)); + barrier(); mmiowb(); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2); mmiowb(); } -static int bnx2x_process_kill(struct bnx2x *bp) +/** + * bnx2x_er_poll_igu_vq - poll for pending writes bit. + * It should get cleared in no more than 1s. + * + * @bp: driver handle + * + * It should get cleared in no more than 1s. Returns 0 if + * pending writes bit gets cleared. + */ +static int bnx2x_er_poll_igu_vq(struct bnx2x *bp) +{ + u32 cnt = 1000; + u32 pend_bits = 0; + + do { + pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS); + + if (pend_bits == 0) + break; + + usleep_range(1000, 1000); + } while (cnt-- > 0); + + if (cnt <= 0) { + BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n", + pend_bits); + return -EBUSY; + } + + return 0; +} + +static int bnx2x_process_kill(struct bnx2x *bp, bool global) { int cnt = 1000; u32 val = 0; @@ -7403,7 +8134,7 @@ static int bnx2x_process_kill(struct bnx2x *bp) ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff)) break; - msleep(1); + usleep_range(1000, 1000); } while (cnt-- > 0); if (cnt <= 0) { @@ -7423,6 +8154,11 @@ static int bnx2x_process_kill(struct bnx2x *bp) /* Close gates #2, #3 and #4 */ bnx2x_set_234_gates(bp, true); + /* Poll for IGU VQs for 57712 and newer chips */ + if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp)) + return -EAGAIN; + + /* TBD: Indicate that "process kill" is in progress to MCP */ /* Clear "unprepared" bit */ @@ -7435,25 +8171,28 @@ static int bnx2x_process_kill(struct bnx2x *bp) /* Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ - msleep(1); + usleep_range(1000, 1000); /* Prepare to chip reset: */ /* MCP */ - bnx2x_reset_mcp_prep(bp, &val); + if (global) + bnx2x_reset_mcp_prep(bp, &val); /* PXP */ bnx2x_pxp_prep(bp); barrier(); /* reset the chip */ - bnx2x_process_kill_chip_reset(bp); + bnx2x_process_kill_chip_reset(bp, global); barrier(); /* Recover after reset: */ /* MCP */ - if (bnx2x_reset_mcp_comp(bp, val)) + if (global && bnx2x_reset_mcp_comp(bp, val)) return -EAGAIN; + /* TBD: Add resetting the NO_MCP mode DB here */ + /* PXP */ bnx2x_pxp_prep(bp); @@ -7466,43 +8205,85 @@ static int bnx2x_process_kill(struct bnx2x *bp) return 0; } -static int bnx2x_leader_reset(struct bnx2x *bp) +int bnx2x_leader_reset(struct bnx2x *bp) { int rc = 0; + bool global = bnx2x_reset_is_global(bp); + /* Try to recover after the failure */ - if (bnx2x_process_kill(bp)) { - printk(KERN_ERR "%s: Something bad had happen! Aii!\n", - bp->dev->name); + if (bnx2x_process_kill(bp, global)) { + netdev_err(bp->dev, "Something bad had happen on engine %d! " + "Aii!\n", BP_PATH(bp)); rc = -EAGAIN; goto exit_leader_reset; } - /* Clear "reset is in progress" bit and update the driver state */ + /* + * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver + * state. + */ bnx2x_set_reset_done(bp); - bp->recovery_state = BNX2X_RECOVERY_DONE; + if (global) + bnx2x_clear_reset_global(bp); exit_leader_reset: bp->is_leader = 0; - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08); - smp_wmb(); + bnx2x_release_leader_lock(bp); + smp_mb(); return rc; } -/* Assumption: runs under rtnl lock. This together with the fact - * that it's called only from bnx2x_reset_task() ensure that it +static inline void bnx2x_recovery_failed(struct bnx2x *bp) +{ + netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); + + /* Disconnect this device */ + netif_device_detach(bp->dev); + + /* + * Block ifup for all function on this engine until "process kill" + * or power cycle. + */ + bnx2x_set_reset_in_progress(bp); + + /* Shut down the power */ + bnx2x_set_power_state(bp, PCI_D3hot); + + bp->recovery_state = BNX2X_RECOVERY_FAILED; + + smp_mb(); +} + +/* + * Assumption: runs under rtnl lock. This together with the fact + * that it's called only from bnx2x_sp_rtnl() ensure that it * will never be called when netif_running(bp->dev) is false. */ static void bnx2x_parity_recover(struct bnx2x *bp) { + bool global = false; + DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { case BNX2X_RECOVERY_INIT: DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n"); + bnx2x_chk_parity_attn(bp, &global, false); + /* Try to get a LEADER_LOCK HW lock */ - if (bnx2x_trylock_hw_lock(bp, - HW_LOCK_RESOURCE_RESERVED_08)) + if (bnx2x_trylock_leader_lock(bp)) { + bnx2x_set_reset_in_progress(bp); + /* + * Check if there is a global attention and if + * there was a global attention, set the global + * reset bit. + */ + + if (global) + bnx2x_set_reset_global(bp); + bp->is_leader = 1; + } /* Stop the driver */ /* If interface has been removed - break */ @@ -7510,21 +8291,47 @@ static void bnx2x_parity_recover(struct bnx2x *bp) return; bp->recovery_state = BNX2X_RECOVERY_WAIT; - /* Ensure "is_leader" and "recovery_state" - * update values are seen on other CPUs + + /* + * Reset MCP command sequence number and MCP mail box + * sequence as we are going to reset the MCP. + */ + if (global) { + bp->fw_seq = 0; + bp->fw_drv_pulse_wr_seq = 0; + } + + /* Ensure "is_leader", MCP command sequence and + * "recovery_state" update values are seen on other + * CPUs. */ - smp_wmb(); + smp_mb(); break; case BNX2X_RECOVERY_WAIT: DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n"); if (bp->is_leader) { - u32 load_counter = bnx2x_get_load_cnt(bp); - if (load_counter) { + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter = + bnx2x_get_load_cnt(bp, other_engine); + u32 load_counter = + bnx2x_get_load_cnt(bp, BP_PATH(bp)); + global = bnx2x_reset_is_global(bp); + + /* + * In case of a parity in a global block, let + * the first leader that performs a + * leader_reset() reset the global blocks in + * order to clear global attentions. Otherwise + * the the gates will remain closed for that + * engine. + */ + if (load_counter || + (global && other_load_counter)) { /* Wait until all other functions get * down. */ - schedule_delayed_work(&bp->reset_task, + schedule_delayed_work(&bp->sp_rtnl_task, HZ/10); return; } else { @@ -7533,37 +8340,27 @@ static void bnx2x_parity_recover(struct bnx2x *bp) * normal. In any case it's an exit * point for a leader. */ - if (bnx2x_leader_reset(bp) || - bnx2x_nic_load(bp, LOAD_NORMAL)) { - printk(KERN_ERR"%s: Recovery " - "has failed. Power cycle is " - "needed.\n", bp->dev->name); - /* Disconnect this device */ - netif_device_detach(bp->dev); - /* Block ifup for all function - * of this ASIC until - * "process kill" or power - * cycle. - */ - bnx2x_set_reset_in_progress(bp); - /* Shut down the power */ - bnx2x_set_power_state(bp, - PCI_D3hot); + if (bnx2x_leader_reset(bp)) { + bnx2x_recovery_failed(bp); return; } - return; + /* If we are here, means that the + * leader has succeeded and doesn't + * want to be a leader any more. Try + * to continue as a none-leader. + */ + break; } } else { /* non-leader */ - if (!bnx2x_reset_is_done(bp)) { + if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) { /* Try to get a LEADER_LOCK HW lock as * long as a former leader may have * been unloaded by the user or * released a leadership by another * reason. */ - if (bnx2x_trylock_hw_lock(bp, - HW_LOCK_RESOURCE_RESERVED_08)) { + if (bnx2x_trylock_leader_lock(bp)) { /* I'm a leader now! Restart a * switch case. */ @@ -7571,18 +8368,30 @@ static void bnx2x_parity_recover(struct bnx2x *bp) break; } - schedule_delayed_work(&bp->reset_task, + schedule_delayed_work(&bp->sp_rtnl_task, HZ/10); return; - } else { /* A leader has completed - * the "process kill". It's an exit - * point for a non-leader. - */ - bnx2x_nic_load(bp, LOAD_NORMAL); - bp->recovery_state = - BNX2X_RECOVERY_DONE; - smp_wmb(); + } else { + /* + * If there was a global attention, wait + * for it to be cleared. + */ + if (bnx2x_reset_is_global(bp)) { + schedule_delayed_work( + &bp->sp_rtnl_task, + HZ/10); + return; + } + + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + bnx2x_recovery_failed(bp); + else { + bp->recovery_state = + BNX2X_RECOVERY_DONE; + smp_mb(); + } + return; } } @@ -7595,35 +8404,92 @@ static void bnx2x_parity_recover(struct bnx2x *bp) /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is * scheduled on a general queue in order to prevent a dead lock. */ -static void bnx2x_reset_task(struct work_struct *work) +static void bnx2x_sp_rtnl_task(struct work_struct *work) { - struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work); - -#ifdef BNX2X_STOP_ON_ERROR - BNX2X_ERR("reset task called but STOP_ON_ERROR defined" - " so reset not done to allow debug dump,\n" - KERN_ERR " you will need to reboot when done\n"); - return; -#endif + struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); rtnl_lock(); if (!netif_running(bp->dev)) - goto reset_task_exit; + goto sp_rtnl_exit; + + /* if stop on error is defined no recovery flows should be executed */ +#ifdef BNX2X_STOP_ON_ERROR + BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined " + "so reset not done to allow debug dump,\n" + "you will need to reboot when done\n"); + goto sp_rtnl_not_reset; +#endif + + if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); - if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) bnx2x_parity_recover(bp); - else { + + goto sp_rtnl_exit; + } + + if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) { + /* + * Clear all pending SP commands as we are going to reset the + * function anyway. + */ + bp->sp_rtnl_state = 0; + smp_mb(); + bnx2x_nic_unload(bp, UNLOAD_NORMAL); bnx2x_nic_load(bp, LOAD_NORMAL); + + goto sp_rtnl_exit; } +#ifdef BNX2X_STOP_ON_ERROR +sp_rtnl_not_reset: +#endif + if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) + bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); -reset_task_exit: +sp_rtnl_exit: rtnl_unlock(); } /* end of nic load/unload */ +static void bnx2x_period_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work); + + if (!netif_running(bp->dev)) + goto period_task_exit; + + if (CHIP_REV_IS_SLOW(bp)) { + BNX2X_ERR("period task called on emulation, ignoring\n"); + goto period_task_exit; + } + + bnx2x_acquire_phy_lock(bp); + /* + * The barrier is needed to ensure the ordering between the writing to + * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and + * the reading here. + */ + smp_mb(); + if (bp->port.pmf) { + bnx2x_period_func(&bp->link_params, &bp->link_vars); + + /* Re-queue task in 1 sec */ + queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ); + } + + bnx2x_release_phy_lock(bp); +period_task_exit: + return; +} + /* * Init service functions */ @@ -7681,8 +8547,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; /* save our pf_num */ int orig_pf_num = bp->pf_num; - u32 swap_en; - u32 swap_val; + int port; + u32 swap_en, swap_val, value; /* clear the UNDI indication */ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); @@ -7717,21 +8583,19 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); bnx2x_undi_int_disable(bp); + port = BP_PORT(bp); /* close input traffic and wait for it */ /* Do not rcv packets to BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : - NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK : + NIG_REG_LLH0_BRB1_DRV_MASK), 0x0); /* Do not direct rcv packets that are not for MCP to * the BRB */ - REG_WR(bp, - (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP : - NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); + REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* clear AEU */ - REG_WR(bp, - (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : - MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); + REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : + MISC_REG_AEU_MASK_ATTN_FUNC_0), 0); msleep(10); /* save NIG port swap info */ @@ -7741,9 +8605,17 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0xd3ffffff); + + value = 0x1400; + if (CHIP_IS_E3(bp)) { + value |= MISC_REGISTERS_RESET_REG_2_MSTAT0; + value |= MISC_REGISTERS_RESET_REG_2_MSTAT1; + } + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - 0x1403); + value); + /* take the NIG out of reset and restore swap values */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, @@ -7784,7 +8656,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) /* Set doorbell size */ bp->db_size = (1 << BNX2X_DB_SHIFT); - if (CHIP_IS_E2(bp)) { + if (!CHIP_IS_E1x(bp)) { val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR); if ((val & 1) == 0) val = REG_RD(bp, MISC_REG_PORT4MODE_EN); @@ -7804,16 +8676,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->pfid = bp->pf_num; /* 0..7 */ } - /* - * set base FW non-default (fast path) status block id, this value is - * used to initialize the fw_sb_id saved on the fp/queue structure to - * determine the id used by the FW. - */ - if (CHIP_IS_E1x(bp)) - bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x; - else /* E2 */ - bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2; - bp->link_params.chip_id = bp->common.chip_id; BNX2X_DEV_INFO("chip ID is 0x%x\n", id); @@ -7825,13 +8687,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) } val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4); - bp->common.flash_size = (NVRAM_1MB_SIZE << + bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BNX2X_DEV_INFO("flash_size 0x%x (%d)\n", bp->common.flash_size, bp->common.flash_size); bnx2x_init_shmem(bp); + + bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); @@ -7880,6 +8744,10 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; + bp->link_params.feature_config_flags |= + (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? + FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; + pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc); bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG; @@ -7904,14 +8772,11 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) int vn = BP_E1HVN(bp); int igu_sb_id; u32 val; - u8 fid; + u8 fid, igu_sb_cnt = 0; bp->igu_base_sb = 0xff; - bp->igu_sb_cnt = 0; if (CHIP_INT_MODE_IS_BC(bp)) { - bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, - NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); - + igu_sb_cnt = bp->igu_sb_cnt; bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * FP_SB_MAX_E1x; @@ -7937,13 +8802,21 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) else { if (bp->igu_base_sb == 0xff) bp->igu_base_sb = igu_sb_id; - bp->igu_sb_cnt++; + igu_sb_cnt++; } } } - bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, - NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); - if (bp->igu_sb_cnt == 0) + +#ifdef CONFIG_PCI_MSI + /* + * It's expected that number of CAM entries for this functions is equal + * to the number evaluated based on the MSI-X table size. We want a + * harsh warning if these values are different! + */ + WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); +#endif + + if (igu_sb_cnt == 0) BNX2X_ERR("CAM configuration error\n"); } @@ -7991,24 +8864,25 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, return; } - switch (switch_cfg) { - case SWITCH_CFG_1G: - bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + - port*0x10); - BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); - break; - - case SWITCH_CFG_10G: - bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + - port*0x18); - BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); - break; - - default: - BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", - bp->port.link_config[0]); - return; + if (CHIP_IS_E3(bp)) + bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR); + else { + switch (switch_cfg) { + case SWITCH_CFG_1G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); + break; + case SWITCH_CFG_10G: + bp->port.phy_addr = REG_RD( + bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); + break; + default: + BNX2X_ERR("BAD switch_cfg link_config 0x%x\n", + bp->port.link_config[0]); + return; + } } + BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(bp->link_params.speed_cap_mask[idx] & @@ -8089,7 +8963,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8108,7 +8982,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8126,7 +9000,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8146,7 +9020,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8164,7 +9038,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8182,7 +9056,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8192,8 +9066,6 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) break; case PORT_FEATURE_LINK_SPEED_10G_CX4: - case PORT_FEATURE_LINK_SPEED_10G_KX4: - case PORT_FEATURE_LINK_SPEED_10G_KR: if (bp->port.supported[idx] & SUPPORTED_10000baseT_Full) { bp->link_params.req_line_speed[idx] = @@ -8202,7 +9074,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { - BNX2X_ERROR("NVRAM config error. " + BNX2X_ERR("NVRAM config error. " "Invalid link_config 0x%x" " speed_cap_mask 0x%x\n", link_config, @@ -8210,11 +9082,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) return; } break; + case PORT_FEATURE_LINK_SPEED_20G: + bp->link_params.req_line_speed[idx] = SPEED_20000; + break; default: - BNX2X_ERROR("NVRAM config error. " - "BAD link speed link_config 0x%x\n", - link_config); + BNX2X_ERR("NVRAM config error. " + "BAD link speed link_config 0x%x\n", + link_config); bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG; bp->port.advertising[idx] = @@ -8325,10 +9200,13 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) #ifdef BCM_CNIC static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) { + int port = BP_PORT(bp); + int func = BP_ABS_FUNC(bp); + u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[BP_PORT(bp)].max_iscsi_conn); + drv_lic_key[port].max_iscsi_conn); u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, - drv_lic_key[BP_PORT(bp)].max_fcoe_conn); + drv_lic_key[port].max_fcoe_conn); /* Get the number of maximum allowed iSCSI and FCoE connections */ bp->cnic_eth_dev.max_iscsi_conn = @@ -8339,11 +9217,59 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + /* Read the WWN: */ + if (!IS_MF(bp)) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + SHMEM_RD(bp, + dev_info.port_hw_config[port]. + fcoe_wwn_node_name_lower); + } else if (!IS_MF_SD(bp)) { + u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); + + /* + * Read the WWN info only if the FCoE feature is enabled for + * this function. + */ + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + /* Port info */ + bp->cnic_eth_dev.fcoe_wwn_port_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_upper); + bp->cnic_eth_dev.fcoe_wwn_port_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_port_name_lower); + + /* Node info */ + bp->cnic_eth_dev.fcoe_wwn_node_name_hi = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_upper); + bp->cnic_eth_dev.fcoe_wwn_node_name_lo = + MF_CFG_RD(bp, func_ext_config[func]. + fcoe_wwn_node_name_lower); + } + } + BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_iscsi_conn, bp->cnic_eth_dev.max_fcoe_conn); - /* If mamimum allowed number of connections is zero - + /* + * If maximum allowed number of connections is zero - * disable the feature. */ if (!bp->cnic_eth_dev.max_iscsi_conn) @@ -8364,6 +9290,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) u8 *fip_mac = bp->fip_mac; #endif + /* Zero primary MAC configuration */ + memset(bp->dev->dev_addr, 0, ETH_ALEN); + if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); random_ether_addr(bp->dev->dev_addr); @@ -8385,9 +9314,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) iscsi_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. iscsi_mac_addr_lower); - BNX2X_DEV_INFO("Read iSCSI MAC: " - "0x%x:0x%04x\n", val2, val); bnx2x_set_mac_buf(iscsi_mac, val, val2); + BNX2X_DEV_INFO("Read iSCSI MAC: " + BNX2X_MAC_FMT"\n", + BNX2X_MAC_PRN_LIST(iscsi_mac)); } else bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; @@ -8396,9 +9326,10 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) fcoe_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. fcoe_mac_addr_lower); - BNX2X_DEV_INFO("Read FCoE MAC to " - "0x%x:0x%04x\n", val2, val); bnx2x_set_mac_buf(fip_mac, val, val2); + BNX2X_DEV_INFO("Read FCoE L2 MAC to " + BNX2X_MAC_FMT"\n", + BNX2X_MAC_PRN_LIST(fip_mac)); } else bp->flags |= NO_FCOE_FLAG; @@ -8416,6 +9347,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) val = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_lower); bnx2x_set_mac_buf(iscsi_mac, val, val2); + + val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_upper); + val = SHMEM_RD(bp, dev_info.port_hw_config[port]. + fcoe_fip_mac_lower); + bnx2x_set_mac_buf(fip_mac, val, val2); #endif } @@ -8423,13 +9360,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); #ifdef BCM_CNIC - /* Set the FCoE MAC in modes other then MF_SI */ - if (!CHIP_IS_E1x(bp)) { - if (IS_MF_SD(bp)) - memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); - else if (!IS_MF(bp)) - memcpy(fip_mac, iscsi_mac, ETH_ALEN); - } + /* Set the FCoE MAC in MF_SD mode */ + if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp)) + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); /* Disable iSCSI if MAC configuration is * invalid. @@ -8447,6 +9380,13 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) memset(bp->fip_mac, 0, ETH_ALEN); } #endif + + if (!is_valid_ether_addr(bp->dev->dev_addr)) + dev_err(&bp->pdev->dev, + "bad Ethernet MAC address configuration: " + BNX2X_MAC_FMT", change it manually before bringing up " + "the appropriate network interface\n", + BNX2X_MAC_PRN_LIST(bp->dev->dev_addr)); } static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) @@ -8458,27 +9398,66 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_common_hwinfo(bp); + /* + * initialize IGU parameters + */ if (CHIP_IS_E1x(bp)) { bp->common.int_block = INT_BLOCK_HC; bp->igu_dsb_id = DEF_SB_IGU_ID; bp->igu_base_sb = 0; - bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, - NUM_IGU_SB_REQUIRED(bp->l2_cid_count)); } else { bp->common.int_block = INT_BLOCK_IGU; val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { - DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n"); + int tout = 5000; + + BNX2X_DEV_INFO("FORCING Normal Mode\n"); + + val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); + REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val); + REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f); + + while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + tout--; + usleep_range(1000, 1000); + } + + if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { + dev_err(&bp->pdev->dev, + "FORCING Normal Mode failed!!!\n"); + return -EPERM; + } + } + + if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { + BNX2X_DEV_INFO("IGU Backward Compatible Mode\n"); bp->common.int_block |= INT_BLOCK_MODE_BW_COMP; } else - DP(NETIF_MSG_PROBE, "IGU Normal Mode\n"); + BNX2X_DEV_INFO("IGU Normal Mode\n"); bnx2x_get_igu_cam_info(bp); } - DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n", - bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt); + + /* + * set base FW non-default (fast path) status block id, this value is + * used to initialize the fw_sb_id saved on the fp/queue structure to + * determine the id used by the FW. + */ + if (CHIP_IS_E1x(bp)) + bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp); + else /* + * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of + * the same queue are indicated on the same IGU SB). So we prefer + * FW and IGU SBs to be the same value. + */ + bp->base_fw_ndsb = bp->igu_base_sb; + + BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n" + "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb, + bp->igu_sb_cnt, bp->base_fw_ndsb); /* * Initialize MF configuration @@ -8489,10 +9468,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) vn = BP_E1HVN(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { - DP(NETIF_MSG_PROBE, - "shmem2base 0x%x, size %d, mfcfg offset %d\n", - bp->common.shmem2_base, SHMEM2_RD(bp, size), - (u32)offsetof(struct shmem2_region, mf_cfg_addr)); + BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", + bp->common.shmem2_base, SHMEM2_RD(bp, size), + (u32)offsetof(struct shmem2_region, mf_cfg_addr)); + if (SHMEM2_HAS(bp, mf_cfg_addr)) bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); else @@ -8523,8 +9502,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else - DP(NETIF_MSG_PROBE, "illegal MAC " - "address for SI\n"); + BNX2X_DEV_INFO("illegal MAC address " + "for SI\n"); break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: /* get OV configuration */ @@ -8537,14 +9516,12 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_config[vn] = MF_CFG_RD(bp, func_mf_config[func].config); } else - DP(NETIF_MSG_PROBE, "illegal OV for " - "SD\n"); + BNX2X_DEV_INFO("illegal OV for SD\n"); break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; - DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n", - val); + BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val); } } @@ -8557,13 +9534,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) FUNC_MF_CFG_E1HOV_TAG_MASK; if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { bp->mf_ov = val; - BNX2X_DEV_INFO("MF OV for func %d is %d" - " (0x%04x)\n", func, - bp->mf_ov, bp->mf_ov); + bp->path_has_ovlan = true; + + BNX2X_DEV_INFO("MF OV for func %d is %d " + "(0x%04x)\n", func, bp->mf_ov, + bp->mf_ov); } else { - BNX2X_ERR("No valid MF OV for func %d," - " aborting\n", func); - rc = -EPERM; + dev_err(&bp->pdev->dev, + "No valid MF OV for func %d, " + "aborting\n", func); + return -EPERM; } break; case MULTI_FUNCTION_SI: @@ -8572,31 +9552,40 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) break; default: if (vn) { - BNX2X_ERR("VN %d in single function mode," - " aborting\n", vn); - rc = -EPERM; + dev_err(&bp->pdev->dev, + "VN %d is in a single function mode, " + "aborting\n", vn); + return -EPERM; } break; } + /* check if other port on the path needs ovlan: + * Since MF configuration is shared between ports + * Possible mixed modes are only + * {SF, SI} {SF, SD} {SD, SF} {SI, SF} + */ + if (CHIP_MODE_IS_4_PORT(bp) && + !bp->path_has_ovlan && + !IS_MF(bp) && + bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) { + u8 other_port = !BP_PORT(bp); + u8 other_func = BP_PATH(bp) + 2*other_port; + val = MF_CFG_RD(bp, + func_mf_config[other_func].e1hov_tag); + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) + bp->path_has_ovlan = true; + } } /* adjust igu_sb_cnt to MF for E1x */ if (CHIP_IS_E1x(bp) && IS_MF(bp)) bp->igu_sb_cnt /= E1HVN_MAX; - /* - * adjust E2 sb count: to be removed when FW will support - * more then 16 L2 clients - */ -#define MAX_L2_CLIENTS 16 - if (CHIP_IS_E2(bp)) - bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, - MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1)); + /* port info */ + bnx2x_get_port_hwinfo(bp); if (!BP_NOMCP(bp)) { - bnx2x_get_port_hwinfo(bp); - bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); @@ -8610,6 +9599,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) bnx2x_get_cnic_info(bp); #endif + /* Get current FW pulse sequence */ + if (!BP_NOMCP(bp)) { + int mb_idx = BP_FW_MB_IDX(bp); + + bp->fw_drv_pulse_wr_seq = + (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) & + DRV_PULSE_SEQ_MASK); + BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); + } + return rc; } @@ -8677,16 +9676,59 @@ out_not_found: return; } +static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) +{ + u32 flags = 0; + + if (CHIP_REV_IS_FPGA(bp)) + SET_FLAGS(flags, MODE_FPGA); + else if (CHIP_REV_IS_EMUL(bp)) + SET_FLAGS(flags, MODE_EMUL); + else + SET_FLAGS(flags, MODE_ASIC); + + if (CHIP_MODE_IS_4_PORT(bp)) + SET_FLAGS(flags, MODE_PORT4); + else + SET_FLAGS(flags, MODE_PORT2); + + if (CHIP_IS_E2(bp)) + SET_FLAGS(flags, MODE_E2); + else if (CHIP_IS_E3(bp)) { + SET_FLAGS(flags, MODE_E3); + if (CHIP_REV(bp) == CHIP_REV_Ax) + SET_FLAGS(flags, MODE_E3_A0); + else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/ + SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); + } + + if (IS_MF(bp)) { + SET_FLAGS(flags, MODE_MF); + switch (bp->mf_mode) { + case MULTI_FUNCTION_SD: + SET_FLAGS(flags, MODE_MF_SD); + break; + case MULTI_FUNCTION_SI: + SET_FLAGS(flags, MODE_MF_SI); + break; + } + } else + SET_FLAGS(flags, MODE_SF); + +#if defined(__LITTLE_ENDIAN) + SET_FLAGS(flags, MODE_LITTLE_ENDIAN); +#else /*(__BIG_ENDIAN)*/ + SET_FLAGS(flags, MODE_BIG_ENDIAN); +#endif + INIT_MODE_FLAGS(bp) = flags; +} + static int __devinit bnx2x_init_bp(struct bnx2x *bp) { int func; int timer_interval; int rc; - /* Disable interrupt handling until HW is initialized */ - atomic_set(&bp->intr_sem, 1); - smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ - mutex_init(&bp->port.phy_mutex); mutex_init(&bp->fw_mb_mutex); spin_lock_init(&bp->stats_lock); @@ -8695,12 +9737,17 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) #endif INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); - INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task); - + INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); + INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task); rc = bnx2x_get_hwinfo(bp); + if (rc) + return rc; - if (!rc) - rc = bnx2x_alloc_mem_bp(bp); + bnx2x_set_modes_bitmap(bp); + + rc = bnx2x_alloc_mem_bp(bp); + if (rc) + return rc; bnx2x_read_fwinfo(bp); @@ -8718,7 +9765,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) "must load devices in order!\n"); bp->multi_mode = multi_mode; - bp->int_mode = int_mode; /* Set TPA flags */ if (disable_tpa) { @@ -8754,6 +9800,21 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_dcbx_init_params(bp); +#ifdef BCM_CNIC + if (CHIP_IS_E1x(bp)) + bp->cnic_base_cl_id = FP_SB_MAX_E1x; + else + bp->cnic_base_cl_id = FP_SB_MAX_E2; +#endif + + /* multiple tx priority */ + if (CHIP_IS_E1x(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E1X; + if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0; + if (CHIP_IS_E3B0(bp)) + bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; + return rc; } @@ -8762,49 +9823,70 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) * General service functions ****************************************************************************/ +/* + * net_device service functions + */ + /* called with rtnl_lock */ static int bnx2x_open(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + bool global = false; + int other_engine = BP_PATH(bp) ? 0 : 1; + u32 other_load_counter, load_counter; netif_carrier_off(dev); bnx2x_set_power_state(bp, PCI_D0); - if (!bnx2x_reset_is_done(bp)) { + other_load_counter = bnx2x_get_load_cnt(bp, other_engine); + load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp)); + + /* + * If parity had happen during the unload, then attentions + * and/or RECOVERY_IN_PROGRES may still be set. In this case we + * want the first function loaded on the current engine to + * complete the recovery. + */ + if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) || + bnx2x_chk_parity_attn(bp, &global, true)) do { - /* Reset MCP mail box sequence if there is on going - * recovery + /* + * If there are attentions and they are in a global + * blocks, set the GLOBAL_RESET bit regardless whether + * it will be this function that will complete the + * recovery or not. */ - bp->fw_seq = 0; + if (global) + bnx2x_set_reset_global(bp); - /* If it's the first function to load and reset done - * is still not cleared it may mean that. We don't - * check the attention state here because it may have - * already been cleared by a "common" reset but we - * shell proceed with "process kill" anyway. + /* + * Only the first function on the current engine should + * try to recover in open. In case of attentions in + * global blocks only the first in the chip should try + * to recover. */ - if ((bnx2x_get_load_cnt(bp) == 0) && - bnx2x_trylock_hw_lock(bp, - HW_LOCK_RESOURCE_RESERVED_08) && - (!bnx2x_leader_reset(bp))) { - DP(NETIF_MSG_HW, "Recovered in open\n"); + if ((!load_counter && + (!global || !other_load_counter)) && + bnx2x_trylock_leader_lock(bp) && + !bnx2x_leader_reset(bp)) { + netdev_info(bp->dev, "Recovered in open\n"); break; } + /* recovery has failed... */ bnx2x_set_power_state(bp, PCI_D3hot); + bp->recovery_state = BNX2X_RECOVERY_FAILED; - printk(KERN_ERR"%s: Recovery flow hasn't been properly" + netdev_err(bp->dev, "Recovery flow hasn't been properly" " completed yet. Try again later. If u still see this" " message after a few retries then power cycle is" - " required.\n", bp->dev->name); + " required.\n"); return -EAGAIN; } while (0); - } bp->recovery_state = BNX2X_RECOVERY_DONE; - return bnx2x_nic_load(bp, LOAD_OPEN); } @@ -8815,198 +9897,126 @@ static int bnx2x_close(struct net_device *dev) /* Unload the driver, release IRQs */ bnx2x_nic_unload(bp, UNLOAD_CLOSE); + + /* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); return 0; } -#define E1_MAX_UC_LIST 29 -#define E1H_MAX_UC_LIST 30 -#define E2_MAX_UC_LIST 14 -static inline u8 bnx2x_max_uc_list(struct bnx2x *bp) +static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) { - if (CHIP_IS_E1(bp)) - return E1_MAX_UC_LIST; - else if (CHIP_IS_E1H(bp)) - return E1H_MAX_UC_LIST; - else - return E2_MAX_UC_LIST; -} + int mc_count = netdev_mc_count(bp->dev); + struct bnx2x_mcast_list_elem *mc_mac = + kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); + struct netdev_hw_addr *ha; + if (!mc_mac) + return -ENOMEM; -static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp) -{ - if (CHIP_IS_E1(bp)) - /* CAM Entries for Port0: - * 0 - prim ETH MAC - * 1 - BCAST MAC - * 2 - iSCSI L2 ring ETH MAC - * 3-31 - UC MACs - * - * Port1 entries are allocated the same way starting from - * entry 32. - */ - return 3 + 32 * BP_PORT(bp); - else if (CHIP_IS_E1H(bp)) { - /* CAM Entries: - * 0-7 - prim ETH MAC for each function - * 8-15 - iSCSI L2 ring ETH MAC for each function - * 16 till 255 UC MAC lists for each function - * - * Remark: There is no FCoE support for E1H, thus FCoE related - * MACs are not considered. - */ - return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) + - bnx2x_max_uc_list(bp) * BP_FUNC(bp); - } else { - /* CAM Entries (there is a separate CAM per engine): - * 0-4 - prim ETH MAC for each function - * 4-7 - iSCSI L2 ring ETH MAC for each function - * 8-11 - FIP ucast L2 MAC for each function - * 12-15 - ALL_ENODE_MACS mcast MAC for each function - * 16 till 71 UC MAC lists for each function - */ - u8 func_idx = - (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp)); + INIT_LIST_HEAD(&p->mcast_list); - return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) + - bnx2x_max_uc_list(bp) * func_idx; + netdev_for_each_mc_addr(ha, bp->dev) { + mc_mac->mac = bnx2x_mc_addr(ha); + list_add_tail(&mc_mac->link, &p->mcast_list); + mc_mac++; } + + p->mcast_list_len = mc_count; + + return 0; +} + +static inline void bnx2x_free_mcast_macs_list( + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_mcast_list_elem *mc_mac = + list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem, + link); + + WARN_ON(!mc_mac); + kfree(mc_mac); } -/* set uc list, do not wait as wait implies sleep and - * set_rx_mode can be invoked from non-sleepable context. +/** + * bnx2x_set_uc_list - configure a new unicast MACs list. + * + * @bp: driver handle * - * Instead we use the same ramrod data buffer each time we need - * to configure a list of addresses, and use the fact that the - * list of MACs is changed in an incremental way and that the - * function is called under the netif_addr_lock. A temporary - * inconsistent CAM configuration (possible in case of very fast - * sequence of add/del/add on the host side) will shortly be - * restored by the handler of the last ramrod. + * We will use zero (0) as a MAC type for these MACs. */ -static int bnx2x_set_uc_list(struct bnx2x *bp) +static inline int bnx2x_set_uc_list(struct bnx2x *bp) { - int i = 0, old; + int rc; struct net_device *dev = bp->dev; - u8 offset = bnx2x_uc_list_cam_offset(bp); struct netdev_hw_addr *ha; - struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); - dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; + unsigned long ramrod_flags = 0; - if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp)) - return -EINVAL; + /* First schedule a cleanup up of old configuration */ + rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false); + if (rc < 0) { + BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc); + return rc; + } netdev_for_each_uc_addr(ha, dev) { - /* copy mac */ - config_cmd->config_table[i].msb_mac_addr = - swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]); - config_cmd->config_table[i].middle_mac_addr = - swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]); - config_cmd->config_table[i].lsb_mac_addr = - swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]); - - config_cmd->config_table[i].vlan_id = 0; - config_cmd->config_table[i].pf_id = BP_FUNC(bp); - config_cmd->config_table[i].clients_bit_vector = - cpu_to_le32(1 << BP_L_ID(bp)); - - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_SET); - - DP(NETIF_MSG_IFUP, - "setting UCAST[%d] (%04x:%04x:%04x)\n", i, - config_cmd->config_table[i].msb_mac_addr, - config_cmd->config_table[i].middle_mac_addr, - config_cmd->config_table[i].lsb_mac_addr); - - i++; - - /* Set uc MAC in NIG */ - bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha), - LLH_CAM_ETH_LINE + i); - } - old = config_cmd->hdr.length; - if (old > i) { - for (; i < old; i++) { - if (CAM_IS_INVALID(config_cmd-> - config_table[i])) { - /* already invalidated */ - break; - } - /* invalidate */ - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); + rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, + BNX2X_UC_LIST_MAC, &ramrod_flags); + if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", + rc); + return rc; } } - wmb(); - - config_cmd->hdr.length = i; - config_cmd->hdr.offset = offset; - config_cmd->hdr.client_id = 0xff; - /* Mark that this ramrod doesn't use bp->set_mac_pending for - * synchronization. - */ - config_cmd->hdr.echo = 0; - - mb(); - - return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); - + /* Execute the pending commands */ + __set_bit(RAMROD_CONT, &ramrod_flags); + return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */, + BNX2X_UC_LIST_MAC, &ramrod_flags); } -void bnx2x_invalidate_uc_list(struct bnx2x *bp) +static inline int bnx2x_set_mc_list(struct bnx2x *bp) { - int i; - struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); - dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); - int ramrod_flags = WAIT_RAMROD_COMMON; - u8 offset = bnx2x_uc_list_cam_offset(bp); - u8 max_list_size = bnx2x_max_uc_list(bp); - - for (i = 0; i < max_list_size; i++) { - SET_FLAG(config_cmd->config_table[i].flags, - MAC_CONFIGURATION_ENTRY_ACTION_TYPE, - T_ETH_MAC_COMMAND_INVALIDATE); - bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i); - } - - wmb(); + struct net_device *dev = bp->dev; + struct bnx2x_mcast_ramrod_params rparam = {0}; + int rc = 0; - config_cmd->hdr.length = max_list_size; - config_cmd->hdr.offset = offset; - config_cmd->hdr.client_id = 0xff; - /* We'll wait for a completion this time... */ - config_cmd->hdr.echo = 1; + rparam.mcast_obj = &bp->mcast_obj; - bp->set_mac_pending = 1; + /* first, clear all configured multicast MACs */ + rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); + if (rc < 0) { + BNX2X_ERR("Failed to clear multicast " + "configuration: %d\n", rc); + return rc; + } - mb(); + /* then, configure a new MACs list */ + if (netdev_mc_count(dev)) { + rc = bnx2x_init_mcast_macs_list(bp, &rparam); + if (rc) { + BNX2X_ERR("Failed to create multicast MACs " + "list: %d\n", rc); + return rc; + } - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, - U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); + /* Now add the new MACs */ + rc = bnx2x_config_mcast(bp, &rparam, + BNX2X_MCAST_CMD_ADD); + if (rc < 0) + BNX2X_ERR("Failed to set a new multicast " + "configuration: %d\n", rc); - /* Wait for a completion */ - bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, - ramrod_flags); + bnx2x_free_mcast_macs_list(&rparam); + } + return rc; } -static inline int bnx2x_set_mc_list(struct bnx2x *bp) -{ - /* some multicasts */ - if (CHIP_IS_E1(bp)) { - return bnx2x_set_e1_mc_list(bp); - } else { /* E1H and newer */ - return bnx2x_set_e1h_mc_list(bp); - } -} -/* called with netif_tx_lock from dev_mcast.c */ +/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */ void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -9017,23 +10027,31 @@ void bnx2x_set_rx_mode(struct net_device *dev) return; } - DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags); + DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags); if (dev->flags & IFF_PROMISC) rx_mode = BNX2X_RX_MODE_PROMISC; - else if (dev->flags & IFF_ALLMULTI) + else if ((dev->flags & IFF_ALLMULTI) || + ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && + CHIP_IS_E1(bp))) rx_mode = BNX2X_RX_MODE_ALLMULTI; else { /* some multicasts */ - if (bnx2x_set_mc_list(bp)) + if (bnx2x_set_mc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_ALLMULTI; - /* some unicasts */ - if (bnx2x_set_uc_list(bp)) + if (bnx2x_set_uc_list(bp) < 0) rx_mode = BNX2X_RX_MODE_PROMISC; } bp->rx_mode = rx_mode; + + /* Schedule the rx_mode command */ + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { + set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); + return; + } + bnx2x_set_storm_rx_mode(bp); } @@ -9122,10 +10140,35 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif + .ndo_setup_tc = bnx2x_setup_tc, + +#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) + .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, +#endif }; +static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) +{ + struct device *dev = &bp->pdev->dev; + + if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { + bp->flags |= USING_DAC_FLAG; + if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { + dev_err(dev, "dma_set_coherent_mask failed, " + "aborting\n"); + return -EIO; + } + } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { + dev_err(dev, "System does not support DMA, aborting\n"); + return -EIO; + } + + return 0; +} + static int __devinit bnx2x_init_dev(struct pci_dev *pdev, - struct net_device *dev) + struct net_device *dev, + unsigned long board_type) { struct bnx2x *bp; int rc; @@ -9179,29 +10222,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, goto err_out_release; } - bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); - if (bp->pcie_cap == 0) { - dev_err(&bp->pdev->dev, - "Cannot find PCI Express capability, aborting\n"); + if (!pci_is_pcie(pdev)) { + dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n"); rc = -EIO; goto err_out_release; } - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) { - bp->flags |= USING_DAC_FLAG; - if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) { - dev_err(&bp->pdev->dev, "dma_set_coherent_mask" - " failed, aborting\n"); - rc = -EIO; - goto err_out_release; - } - - } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { - dev_err(&bp->pdev->dev, - "System does not support DMA, aborting\n"); - rc = -EIO; + rc = bnx2x_set_coherency_mask(bp); + if (rc) goto err_out_release; - } dev->mem_start = pci_resource_start(pdev, 0); dev->base_addr = dev->mem_start; @@ -9217,16 +10246,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, goto err_out_release; } - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - min_t(u64, BNX2X_DB_SIZE(bp), - pci_resource_len(pdev, 2))); - if (!bp->doorbells) { - dev_err(&bp->pdev->dev, - "Cannot map doorbell space, aborting\n"); - rc = -ENOMEM; - goto err_out_unmap; - } - bnx2x_set_power_state(bp, PCI_D0); /* clean indirect addresses */ @@ -9237,6 +10256,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); + /* + * Enable internal target-read (in case we are probed after PF FLR). + * Must be done prior to any BAR read access. Only for 57712 and up + */ + if (board_type != BCM57710 && + board_type != BCM57711 && + board_type != BCM57711E) + REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); + /* Reset the load counter */ bnx2x_clear_load_cnt(bp); @@ -9273,16 +10301,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, return 0; -err_out_unmap: - if (bp->regview) { - iounmap(bp->regview); - bp->regview = NULL; - } - if (bp->doorbells) { - iounmap(bp->doorbells); - bp->doorbells = NULL; - } - err_out_release: if (atomic_read(&pdev->enable_cnt) == 1) pci_release_regions(pdev); @@ -9451,7 +10469,7 @@ int bnx2x_init_firmware(struct bnx2x *bp) fw_file_name = FW_FILE_NAME_E1; else if (CHIP_IS_E1H(bp)) fw_file_name = FW_FILE_NAME_E1H; - else if (CHIP_IS_E2(bp)) + else if (!CHIP_IS_E1x(bp)) fw_file_name = FW_FILE_NAME_E2; else { BNX2X_ERR("Unsupported chip revision\n"); @@ -9519,9 +10537,47 @@ request_firmware_exit: return rc; } -static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) +static void bnx2x_release_firmware(struct bnx2x *bp) { - int cid_count = L2_FP_COUNT(l2_cid_count); + kfree(bp->init_ops_offsets); + kfree(bp->init_ops); + kfree(bp->init_data); + release_firmware(bp->firmware); +} + + +static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = { + .init_hw_cmn_chip = bnx2x_init_hw_common_chip, + .init_hw_cmn = bnx2x_init_hw_common, + .init_hw_port = bnx2x_init_hw_port, + .init_hw_func = bnx2x_init_hw_func, + + .reset_hw_cmn = bnx2x_reset_common, + .reset_hw_port = bnx2x_reset_port, + .reset_hw_func = bnx2x_reset_func, + + .gunzip_init = bnx2x_gunzip_init, + .gunzip_end = bnx2x_gunzip_end, + + .init_fw = bnx2x_init_firmware, + .release_fw = bnx2x_release_firmware, +}; + +void bnx2x__init_func_obj(struct bnx2x *bp) +{ + /* Prepare DMAE related driver resources */ + bnx2x_setup_dmae(bp); + + bnx2x_init_func_obj(bp, &bp->func_obj, + bnx2x_sp(bp, func_rdata), + bnx2x_sp_mapping(bp, func_rdata), + &bnx2x_func_sp_drv); +} + +/* must be called after sriov-enable */ +static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) +{ + int cid_count = BNX2X_L2_CID_COUNT(bp); #ifdef BCM_CNIC cid_count += CNIC_CID_MAX; @@ -9529,24 +10585,74 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count) return roundup(cid_count, QM_CID_ROUND); } +/** + * bnx2x_get_num_none_def_sbs - return the number of none default SBs + * + * @dev: pci device + * + */ +static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) +{ + int pos; + u16 control; + + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + + /* + * If MSI-X is not supported - return number of SBs needed to support + * one fast path queue: one FP queue + SB for CNIC + */ + if (!pos) + return 1 + CNIC_PRESENT; + + /* + * The value in the PCI configuration space is the index of the last + * entry, namely one less than the actual size of the table, which is + * exactly what we want to return from this function: number of all SBs + * without the default SB. + */ + pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); + return control & PCI_MSIX_FLAGS_QSIZE; +} + static int __devinit bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev = NULL; struct bnx2x *bp; int pcie_width, pcie_speed; - int rc, cid_count; + int rc, max_non_def_sbs; + int rx_count, tx_count, rss_count; + /* + * An estimated maximum supported CoS number according to the chip + * version. + * We will try to roughly estimate the maximum number of CoSes this chip + * may support in order to minimize the memory allocated for Tx + * netdev_queue's. This number will be accurately calculated during the + * initialization of bp->max_cos based on the chip versions AND chip + * revision in the bnx2x_init_bp(). + */ + u8 max_cos_est = 0; switch (ent->driver_data) { case BCM57710: case BCM57711: case BCM57711E: - cid_count = FP_SB_MAX_E1x; + max_cos_est = BNX2X_MULTI_TX_COS_E1X; break; case BCM57712: - case BCM57712E: - cid_count = FP_SB_MAX_E2; + case BCM57712_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0; + break; + + case BCM57800: + case BCM57800_MF: + case BCM57810: + case BCM57810_MF: + case BCM57840: + case BCM57840_MF: + max_cos_est = BNX2X_MULTI_TX_COS_E3B0; break; default: @@ -9555,38 +10661,77 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, return -ENODEV; } - cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE; + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); + + /* !!! FIXME !!! + * Do not allow the maximum SB count to grow above 16 + * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. + * We will use the FP_SB_MAX_E1x macro for this matter. + */ + max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + + WARN_ON(!max_non_def_sbs); + + /* Maximum number of RSS queues: one IGU SB goes to CNIC */ + rss_count = max_non_def_sbs - CNIC_PRESENT; + + /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ + rx_count = rss_count + FCOE_PRESENT; + + /* + * Maximum number of netdev Tx queues: + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + */ + tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; /* dev zeroed in init_etherdev */ - dev = alloc_etherdev_mq(sizeof(*bp), cid_count); + dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); if (!dev) { dev_err(&pdev->dev, "Cannot allocate net device\n"); return -ENOMEM; } bp = netdev_priv(dev); - bp->msg_enable = debug; - pci_set_drvdata(pdev, dev); + DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); - bp->l2_cid_count = cid_count; + bp->igu_sb_cnt = max_non_def_sbs; + bp->msg_enable = debug; + pci_set_drvdata(pdev, dev); - rc = bnx2x_init_dev(pdev, dev); + rc = bnx2x_init_dev(pdev, dev, ent->driver_data); if (rc < 0) { free_netdev(dev); return rc; } + DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs); + rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; + /* + * Map doorbels here as we need the real value of bp->max_cos which + * is initialized in bnx2x_init_bp(). + */ + bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + min_t(u64, BNX2X_DB_SIZE(bp), + pci_resource_len(pdev, 2))); + if (!bp->doorbells) { + dev_err(&bp->pdev->dev, + "Cannot map doorbell space, aborting\n"); + rc = -ENOMEM; + goto init_one_exit; + } + /* calc qm_cid_count */ - bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); + bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); #ifdef BCM_CNIC - /* disable FCOE L2 queue for E1x*/ - if (CHIP_IS_E1x(bp)) + /* disable FCOE L2 queue for E1x and E3*/ + if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp)) bp->flags |= NO_FCOE_FLAG; #endif @@ -9686,7 +10831,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) bnx2x_set_power_state(bp, PCI_D3hot); /* Make sure RESET task is not scheduled before continuing */ - cancel_delayed_work_sync(&bp->reset_task); + cancel_delayed_work_sync(&bp->sp_rtnl_task); if (bp->regview) iounmap(bp->regview); @@ -9713,12 +10858,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bp->rx_mode = BNX2X_RX_MODE_NONE; +#ifdef BCM_CNIC + bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); +#endif + /* Stop Tx */ + bnx2x_tx_disable(bp); + bnx2x_netif_stop(bp, 0); - netif_carrier_off(bp->dev); del_timer_sync(&bp->timer); - bp->stats_state = STATS_STATE_DISABLED; - DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n"); + + bnx2x_stats_handle(bp, STATS_EVENT_STOP); /* Release IRQs */ bnx2x_free_irq(bp); @@ -9733,6 +10883,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bp->state = BNX2X_STATE_CLOSED; + netif_carrier_off(bp->dev); + return 0; } @@ -9845,8 +10997,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev) struct bnx2x *bp = netdev_priv(dev); if (bp->recovery_state != BNX2X_RECOVERY_DONE) { - printk(KERN_ERR "Handling parity error recovery. " - "Try again later\n"); + netdev_err(bp->dev, "Handling parity error recovery. " + "Try again later\n"); return; } @@ -9905,10 +11057,33 @@ static void __exit bnx2x_cleanup(void) destroy_workqueue(bnx2x_wq); } +void bnx2x_notify_link_changed(struct bnx2x *bp) +{ + REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1); +} + module_init(bnx2x_init); module_exit(bnx2x_cleanup); #ifdef BCM_CNIC +/** + * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). + * + * @bp: driver handle + * @set: set or clear the CAM entry + * + * This function will wait until the ramdord completion returns. + * Return 0 if success, -ENODEV if ramrod doesn't return. + */ +static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) +{ + unsigned long ramrod_flags = 0; + + __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac, + &bp->iscsi_l2_mac_obj, true, + BNX2X_ISCSI_ETH_MAC, &ramrod_flags); +} /* count denotes the number of new completions we have seen */ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) @@ -9929,23 +11104,22 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type) & SPE_HDR_CONN_TYPE) >> SPE_HDR_CONN_TYPE_SHIFT; + u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data) + >> SPE_HDR_CMD_ID_SHIFT) & 0xff; /* Set validation for iSCSI L2 client before sending SETUP * ramrod */ if (type == ETH_CONNECTION_TYPE) { - u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons-> - hdr.conn_and_cmd_data) >> - SPE_HDR_CMD_ID_SHIFT) & 0xff; - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) - bnx2x_set_ctx_validation(&bp->context. - vcxt[BNX2X_ISCSI_ETH_CID].eth, - HW_CID(bp, BNX2X_ISCSI_ETH_CID)); + bnx2x_set_ctx_validation(bp, &bp->context. + vcxt[BNX2X_ISCSI_ETH_CID].eth, + BNX2X_ISCSI_ETH_CID); } - /* There may be not more than 8 L2 and not more than 8 L5 SPEs - * We also check that the number of outstanding + /* + * There may be not more than 8 L2, not more than 8 L5 SPEs + * and in the air. We also check that number of outstanding * COMMON ramrods is not more than the EQ and SPQ can * accommodate. */ @@ -10071,18 +11245,61 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd) return bnx2x_cnic_ctl_send(bp, &ctl); } -static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid) +static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err) { - struct cnic_ctl_info ctl; + struct cnic_ctl_info ctl = {0}; /* first we tell CNIC and only then we count this as a completion */ ctl.cmd = CNIC_CTL_COMPLETION_CMD; ctl.data.comp.cid = cid; + ctl.data.comp.error = err; bnx2x_cnic_ctl_send_bh(bp, &ctl); bnx2x_cnic_sp_post(bp, 0); } + +/* Called with netif_addr_lock_bh() taken. + * Sets an rx_mode config for an iSCSI ETH client. + * Doesn't block. + * Completion should be checked outside. + */ +static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start) +{ + unsigned long accept_flags = 0, ramrod_flags = 0; + u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); + int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED; + + if (start) { + /* Start accepting on iSCSI L2 ring. Accept all multicasts + * because it's the only way for UIO Queue to accept + * multicasts (in non-promiscuous mode only one Queue per + * function will receive multicast packets (leading in our + * case). + */ + __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags); + __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); + + /* Clear STOP_PENDING bit if START is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state); + + sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED; + } else + /* Clear START_PENDING bit if STOP is requested */ + clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state); + + if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) + set_bit(sched_state, &bp->sp_state); + else { + __set_bit(RAMROD_RX, &ramrod_flags); + bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0, + ramrod_flags); + } +} + + static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) { struct bnx2x *bp = netdev_priv(dev); @@ -10106,45 +11323,65 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) /* rtnl_lock is held. */ case DRV_CTL_START_L2_CMD: { - u32 cli = ctl->data.ring.client_id; - - /* Clear FCoE FIP and ALL ENODE MACs addresses first */ - bnx2x_del_fcoe_eth_macs(bp); + struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + unsigned long sp_bits = 0; + + /* Configure the iSCSI classification object */ + bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj, + cp->iscsi_l2_client_id, + cp->iscsi_l2_cid, BP_FUNC(bp), + bnx2x_sp(bp, mac_rdata), + bnx2x_sp_mapping(bp, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &bp->sp_state, BNX2X_OBJ_TYPE_RX, + &bp->macs_pool); /* Set iSCSI MAC address */ - bnx2x_set_iscsi_eth_mac_addr(bp, 1); + rc = bnx2x_set_iscsi_eth_mac_addr(bp); + if (rc) + break; mmiowb(); barrier(); - /* Start accepting on iSCSI L2 ring. Accept all multicasts - * because it's the only way for UIO Client to accept - * multicasts (in non-promiscuous mode only one Client per - * function will receive multicast packets (leading in our - * case). - */ - bnx2x_rxq_set_mac_filters(bp, cli, - BNX2X_ACCEPT_UNICAST | - BNX2X_ACCEPT_BROADCAST | - BNX2X_ACCEPT_ALL_MULTICAST); - storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); + /* Start accepting on iSCSI L2 ring */ + + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, true); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); break; } /* rtnl_lock is held. */ case DRV_CTL_STOP_L2_CMD: { - u32 cli = ctl->data.ring.client_id; + unsigned long sp_bits = 0; /* Stop accepting on iSCSI L2 ring */ - bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE); - storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); + netif_addr_lock_bh(dev); + bnx2x_set_iscsi_eth_rx_mode(bp, false); + netif_addr_unlock_bh(dev); + + /* bits to wait on */ + __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits); + __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits); + + if (!bnx2x_wait_sp_comp(bp, sp_bits)) + BNX2X_ERR("rx_mode completion timed out!\n"); mmiowb(); barrier(); /* Unset iSCSI L2 MAC */ - bnx2x_set_iscsi_eth_mac_addr(bp, 0); + rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj, + BNX2X_ISCSI_ETH_MAC, true); break; } case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { @@ -10156,11 +11393,6 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) break; } - case DRV_CTL_ISCSI_STOPPED_CMD: { - bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED); - break; - } - default: BNX2X_ERR("unknown command %x\n", ctl->cmd); rc = -EINVAL; @@ -10181,13 +11413,13 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; } - if (CHIP_IS_E2(bp)) + if (!CHIP_IS_E1x(bp)) cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb; else cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb; - cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp); - cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp); + cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp); + cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp); cp->irq_arr[1].status_blk = bp->def_status_blk; cp->irq_arr[1].status_blk_num = DEF_SB_ID; cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID; @@ -10204,9 +11436,6 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, if (ops == NULL) return -EINVAL; - if (atomic_read(&bp->intr_sem) != 0) - return -EBUSY; - bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!bp->cnic_kwq) return -ENOMEM; @@ -10221,7 +11450,7 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, bp->cnic_data = data; cp->num_irq = 0; - cp->drv_state = CNIC_DRV_STATE_REGD; + cp->drv_state |= CNIC_DRV_STATE_REGD; cp->iro_arr = bp->iro_arr; bnx2x_setup_cnic_irq_info(bp); @@ -10275,8 +11504,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; - cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID + - BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; + cp->iscsi_l2_client_id = + bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; if (NO_ISCSI_OOO(bp)) diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 86bba25d2d3..02461fef875 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -32,7 +32,11 @@ /* [R 1] ATC initalization done */ #define ATC_REG_ATC_INIT_DONE 0x1100bc /* [RC 6] Interrupt register #0 read clear */ -#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +#define ATC_REG_ATC_INT_STS_CLR 0x1101c0 +/* [RW 5] Parity mask register #0 read/write */ +#define ATC_REG_ATC_PRTY_MASK 0x1101d8 +/* [RC 5] Parity register #0 read clear */ +#define ATC_REG_ATC_PRTY_STS_CLR 0x1101d0 /* [RW 19] Interrupt mask register #0 read/write */ #define BRB1_REG_BRB1_INT_MASK 0x60128 /* [R 19] Interrupt register #0 read */ @@ -54,16 +58,20 @@ /* [RW 10] The number of free blocks below which the full signal to class 0 * is asserted */ #define BRB1_REG_FULL_0_XOFF_THRESHOLD_0 0x601d0 -/* [RW 10] The number of free blocks above which the full signal to class 0 +#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1 0x60230 +/* [RW 11] The number of free blocks above which the full signal to class 0 * is de-asserted */ #define BRB1_REG_FULL_0_XON_THRESHOLD_0 0x601d4 -/* [RW 10] The number of free blocks below which the full signal to class 1 +#define BRB1_REG_FULL_0_XON_THRESHOLD_1 0x60234 +/* [RW 11] The number of free blocks below which the full signal to class 1 * is asserted */ #define BRB1_REG_FULL_1_XOFF_THRESHOLD_0 0x601d8 -/* [RW 10] The number of free blocks above which the full signal to class 1 +#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1 0x60238 +/* [RW 11] The number of free blocks above which the full signal to class 1 * is de-asserted */ #define BRB1_REG_FULL_1_XON_THRESHOLD_0 0x601dc -/* [RW 10] The number of free blocks below which the full signal to the LB +#define BRB1_REG_FULL_1_XON_THRESHOLD_1 0x6023c +/* [RW 11] The number of free blocks below which the full signal to the LB * port is asserted */ #define BRB1_REG_FULL_LB_XOFF_THRESHOLD 0x601e0 /* [RW 10] The number of free blocks above which the full signal to the LB @@ -75,15 +83,49 @@ /* [RW 10] The number of free blocks below which the High_llfc signal to interface #n is asserted. */ #define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0 0x6013c -/* [RW 23] LL RAM data. */ -#define BRB1_REG_LL_RAM 0x61000 +/* [RW 11] The number of blocks guarantied for the LB port */ +#define BRB1_REG_LB_GUARANTIED 0x601ec +/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port + * before signaling XON. */ +#define BRB1_REG_LB_GUARANTIED_HYST 0x60264 +/* [RW 24] LL RAM data. */ +#define BRB1_REG_LL_RAM 0x61000 /* [RW 10] The number of free blocks above which the Low_llfc signal to interface #n is de-asserted. */ #define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0 0x6016c /* [RW 10] The number of free blocks below which the Low_llfc signal to interface #n is asserted. */ #define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0 0x6015c -/* [RW 10] The number of blocks guarantied for the MAC port */ +/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED 0x60244 +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST 0x60254 +/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED 0x60248 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0 + * before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST 0x60258 +/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register + * is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED 0x6024c +/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST 0x6025c +/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The + * register is applicable only when per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED 0x60250 +/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC + * 1 before signaling XON. The register is applicable only when + * per_class_guaranty_mode is set. */ +#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST 0x60260 +/* [RW 11] The number of blocks guarantied for the MAC port. The register is + * applicable only when per_class_guaranty_mode is reset. */ #define BRB1_REG_MAC_GUARANTIED_0 0x601e8 #define BRB1_REG_MAC_GUARANTIED_1 0x60240 /* [R 24] The number of full blocks. */ @@ -100,15 +142,19 @@ /* [RW 10] The number of free blocks below which the pause signal to class 0 * is asserted */ #define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 0x601c0 -/* [RW 10] The number of free blocks above which the pause signal to class 0 +#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 0x60220 +/* [RW 11] The number of free blocks above which the pause signal to class 0 * is de-asserted */ #define BRB1_REG_PAUSE_0_XON_THRESHOLD_0 0x601c4 -/* [RW 10] The number of free blocks below which the pause signal to class 1 +#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1 0x60224 +/* [RW 11] The number of free blocks below which the pause signal to class 1 * is asserted */ #define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0 0x601c8 -/* [RW 10] The number of free blocks above which the pause signal to class 1 +#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 0x60228 +/* [RW 11] The number of free blocks above which the pause signal to class 1 * is de-asserted */ #define BRB1_REG_PAUSE_1_XON_THRESHOLD_0 0x601cc +#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1 0x6022c /* [RW 10] Write client 0: De-assert pause threshold. Not Functional */ #define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078 #define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c @@ -325,7 +371,7 @@ mechanism. The fields are: [5:0] - message length; [12:6] - message pointer; 18:13] - next pointer. */ #define CCM_REG_XX_DESCR_TABLE 0xd0300 -#define CCM_REG_XX_DESCR_TABLE_SIZE 36 +#define CCM_REG_XX_DESCR_TABLE_SIZE 24 /* [R 7] Used to read the value of XX protection Free counter. */ #define CCM_REG_XX_FREE 0xd0184 /* [RW 6] Initial value for the credit counter; responsible for fulfilling @@ -422,6 +468,7 @@ #define CFC_REG_NUM_LCIDS_ALLOC 0x104020 /* [R 9] Number of Arriving LCIDs in Link List Block */ #define CFC_REG_NUM_LCIDS_ARRIVING 0x104004 +#define CFC_REG_NUM_LCIDS_INSIDE_PF 0x104120 /* [R 9] Number of Leaving LCIDs in Link List Block */ #define CFC_REG_NUM_LCIDS_LEAVING 0x104018 #define CFC_REG_WEAK_ENABLE_PF 0x104124 @@ -783,6 +830,7 @@ /* [RW 3] The number of simultaneous outstanding requests to Context Fetch Interface. */ #define DORQ_REG_OUTST_REQ 0x17003c +#define DORQ_REG_PF_USAGE_CNT 0x1701d0 #define DORQ_REG_REGN 0x170038 /* [R 4] Current value of response A counter credit. Initial credit is configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd @@ -802,10 +850,12 @@ /* [RW 28] TCM Header when both ULP and TCP context is loaded. */ #define DORQ_REG_SHRT_CMHEAD 0x170054 #define HC_CONFIG_0_REG_ATTN_BIT_EN_0 (0x1<<4) +#define HC_CONFIG_0_REG_BLOCK_DISABLE_0 (0x1<<0) #define HC_CONFIG_0_REG_INT_LINE_EN_0 (0x1<<3) #define HC_CONFIG_0_REG_MSI_ATTN_EN_0 (0x1<<7) #define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2) -#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) +#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1) +#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0) #define HC_REG_AGG_INT_0 0x108050 #define HC_REG_AGG_INT_1 0x108054 #define HC_REG_ATTN_BIT 0x108120 @@ -844,6 +894,7 @@ #define HC_REG_VQID_0 0x108008 #define HC_REG_VQID_1 0x10800c #define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN (0x1<<1) +#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE (0x1<<0) #define IGU_REG_ATTENTION_ACK_BITS 0x130108 /* [R 4] Debug: attn_fsm */ #define IGU_REG_ATTN_FSM 0x130054 @@ -933,6 +984,14 @@ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */ #define IGU_REG_WRITE_DONE_PENDING 0x130480 #define MCP_A_REG_MCPR_SCRATCH 0x3a0000 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c +#define MCP_REG_MCPR_GP_INPUTS 0x800c0 +#define MCP_REG_MCPR_GP_OENABLE 0x800c8 +#define MCP_REG_MCPR_GP_OUTPUTS 0x800c4 +#define MCP_REG_MCPR_IMC_COMMAND 0x85900 +#define MCP_REG_MCPR_IMC_DATAREG0 0x85920 +#define MCP_REG_MCPR_IMC_SLAVE_CONTROL 0x85904 +#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER 0x8501c #define MCP_REG_MCPR_NVM_ACCESS_ENABLE 0x86424 #define MCP_REG_MCPR_NVM_ADDR 0x8640c #define MCP_REG_MCPR_NVM_CFG4 0x8642c @@ -1429,11 +1488,37 @@ /* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 only. */ #define MISC_REG_E1HMF_MODE 0xa5f8 +/* [R 1] Status of four port mode path swap input pin. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP 0xa75c +/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 - + the path_swap output is equal to 4 port mode path swap input pin; if it + is 1 - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR 0xa738 +/* [R 1] Status of 4 port mode port swap input pin. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP 0xa754 +/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 - + the port_swap output is equal to 4 port mode port swap input pin; if it + is 1 - the port_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the port_swap output. Reset on Hard reset. */ +#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR 0xa734 /* [RW 32] Debug only: spare RW register reset by core reset */ #define MISC_REG_GENERIC_CR_0 0xa460 #define MISC_REG_GENERIC_CR_1 0xa464 /* [RW 32] Debug only: spare RW register reset by por reset */ #define MISC_REG_GENERIC_POR_1 0xa474 +/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to + use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO + can not be configured as an output. Each output has its output enable in + the MCP register space; but this bit needs to be set to make use of that. + Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When + set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON. + When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change + the i/o to an output and will drive the TimeSync output. Bit[31:7]: + spare. Global register. Reset by hard reset. */ +#define MISC_REG_GEN_PURP_HWG 0xa9a0 /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of these bits is written as a '1'; the corresponding SPIO bit will turn off it's drivers and become an input. This is the reset state of all GPIO @@ -1636,6 +1721,14 @@ in this register. address 0 - timer 1; address 1 - timer 2, ... address 7 - timer 8 */ #define MISC_REG_SW_TIMER_VAL 0xa5c0 +/* [R 1] Status of two port mode path swap input pin. */ +#define MISC_REG_TWO_PORT_PATH_SWAP 0xa758 +/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the + path_swap output is equal to 2 port mode path swap input pin; if it is 1 + - the path_swap output is equal to bit[1] of this register; [1] - + Overwrite value. If bit[0] of this register is 1 this is the value that + receives the path_swap output. Reset on Hard reset. */ +#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR 0xa72c /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are loaded; 0-prepare; -unprepare */ #define MISC_REG_UNPREPARED 0xa424 @@ -1644,6 +1737,36 @@ #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) #define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) +/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or + * not it is the recipient of the message on the MDIO interface. The value + * is compared to the value on ctrl_md_devad. Drives output + * misc_xgxs0_phy_addr. Global register. */ +#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc +/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system + side. This should be less than or equal to phy_port_mode; if some of the + ports are not used. This enables reduction of frequency on the core side. + This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 - + Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap + input for the XMAC_MP core; and should be changed only while reset is + held low. Reset on Hard reset. */ +#define MISC_REG_XMAC_CORE_PORT_MODE 0xa964 +/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp + Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode; + 01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the + XMAC_MP core; and should be changed only while reset is held low. Reset + on Hard reset. */ +#define MISC_REG_XMAC_PHY_PORT_MODE 0xa960 +/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0. + * Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_RX_STAT_GR64_LO 0x200 +/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits + * 31:0. Reads from this register will clear bits 31:0. */ +#define MSTAT_REG_TX_STAT_GTXPOK_LO 0 +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST (0x1<<0) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST (0x1<<1) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) +#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) #define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0) #define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0) #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) @@ -1837,6 +1960,10 @@ #define NIG_REG_LLH1_FUNC_MEM 0x161c0 #define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 #define NIG_REG_LLH1_FUNC_MEM_SIZE 16 +/* [RW 1] When this bit is set; the LLH will classify the packet before + * sending it to the BRB or calculating WoL on it. This bit controls port 1 + * only. The legacy llh_multi_function_mode bit controls port 0. */ +#define NIG_REG_LLH1_MF_MODE 0x18614 /* [RW 8] init credit counter for port1 in LLH */ #define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 #define NIG_REG_LLH1_XCM_MASK 0x10134 @@ -1858,11 +1985,25 @@ /* [R 32] Interrupt register #0 read */ #define NIG_REG_NIG_INT_STS_0 0x103b0 #define NIG_REG_NIG_INT_STS_1 0x103c0 +/* [R 32] Legacy E1 and E1H location for parity error mask register. */ +#define NIG_REG_NIG_PRTY_MASK 0x103dc +/* [RW 32] Parity mask register #0 read/write */ +#define NIG_REG_NIG_PRTY_MASK_0 0x183c8 +#define NIG_REG_NIG_PRTY_MASK_1 0x183d8 /* [R 32] Legacy E1 and E1H location for parity error status register. */ #define NIG_REG_NIG_PRTY_STS 0x103d0 /* [R 32] Parity register #0 read */ #define NIG_REG_NIG_PRTY_STS_0 0x183bc #define NIG_REG_NIG_PRTY_STS_1 0x183cc +/* [R 32] Legacy E1 and E1H location for parity error status clear register. */ +#define NIG_REG_NIG_PRTY_STS_CLR 0x103d4 +/* [RC 32] Parity register #0 read clear */ +#define NIG_REG_NIG_PRTY_STS_CLR_0 0x183c0 +#define NIG_REG_NIG_PRTY_STS_CLR_1 0x183d0 +#define MCPR_IMC_COMMAND_ENABLE (1L<<31) +#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT 16 +#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT 28 +#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT 8 /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic * Ethernet header. */ #define NIG_REG_P0_HDRS_AFTER_BASIC 0x18038 @@ -1872,6 +2013,12 @@ #define NIG_REG_P0_HWPFC_ENABLE 0x18078 #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 1] Input enable for RX MAC interface. */ +#define NIG_REG_P0_MAC_IN_EN 0x185ac +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P0_MAC_OUT_EN 0x185b0 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P0_MAC_PAUSE_OUT_EN 0x185b4 /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for * future expansion) each priorty is to be mapped to. Bits 3:0 specify the * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit @@ -1888,11 +2035,52 @@ * than one bit may be set; allowing multiple priorities to be mapped to one * COS. */ #define NIG_REG_P0_RX_COS1_PRIORITY_MASK 0x1805c +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS2_PRIORITY_MASK 0x186b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A + * priority is mapped to COS 3 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS3_PRIORITY_MASK 0x186b4 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A + * priority is mapped to COS 4 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS4_PRIORITY_MASK 0x186b8 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A + * priority is mapped to COS 5 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P0_RX_COS5_PRIORITY_MASK 0x186bc +/* [R 1] RX FIFO for receiving data from MAC is empty. */ /* [RW 15] Specify which of the credit registers the client is to be mapped * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For * clients that are not subject to WFQ credit blocking - their * specifications here are not used. */ #define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP 0x180f0 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x18688 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. */ +#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x1868c /* [RW 5] Specify whether the client competes directly in the strict * priority arbiter. The bits are mapped according to client ID (client IDs * are defined in tx_arb_priority_client). Default value is set to enable @@ -1907,10 +2095,24 @@ * reach. */ #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0 0x1810c #define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1 0x18110 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2 0x18114 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3 0x18118 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4 0x1811c +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5 0x186a0 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6 0x186a4 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7 0x186a8 +#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8 0x186ac /* [RW 32] Specify the weight (in bytes) to be added to credit register 0 * when it is time to increment. */ #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0 0x180f8 #define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1 0x180fc +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2 0x18100 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3 0x18104 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4 0x18108 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5 0x18690 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6 0x18694 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7 0x18698 +#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8 0x1869c /* [RW 12] Specify the number of strict priority arbitration slots between * two round-robin arbitration slots to avoid starvation. A value of 0 means * no strict priority cycles - the strict priority with anti-starvation @@ -1925,8 +2127,36 @@ * for management at priority 0; debug traffic at priorities 1 and 2; COS0 * traffic at priority 3; and COS1 traffic at priority 4. */ #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT 0x180e4 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header. */ +#define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 #define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +/* [RW 32] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 31:0 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB 0x18680 +/* [RW 4] Specify the client number to be assigned to each priority of the + * strict priority arbiter. This register specifies bits 35:32 of the 36-bit + * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + * client; bits [35-32] are for priority 8 client. The clients are assigned + * the following IDs: 0-management; 1-debug traffic from this port; 2-debug + * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + * accommodate the 9 input clients to ETS arbiter. */ +#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 +#define NIG_REG_P1_MAC_IN_EN 0x185c0 +/* [RW 1] Output enable for TX MAC interface */ +#define NIG_REG_P1_MAC_OUT_EN 0x185c4 +/* [RW 1] Output enable for TX PAUSE signal to the MAC. */ +#define NIG_REG_P1_MAC_PAUSE_OUT_EN 0x185c8 /* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for * future expansion) each priorty is to be mapped to. Bits 3:0 specify the * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit @@ -1943,6 +2173,105 @@ * than one bit may be set; allowing multiple priorities to be mapped to one * COS. */ #define NIG_REG_P1_RX_COS1_PRIORITY_MASK 0x181b0 +/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A + * priority is mapped to COS 2 when the corresponding mask bit is 1. More + * than one bit may be set; allowing multiple priorities to be mapped to one + * COS. */ +#define NIG_REG_P1_RX_COS2_PRIORITY_MASK 0x186f8 +/* [R 1] RX FIFO for receiving data from MAC is empty. */ +#define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c +/* [R 1] TLLH FIFO is empty. */ +#define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 32] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB 0x186e8 +/* [RW 4] Specify which of the credit registers the client is to be mapped + * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are + * for client 0; bits [35:32] are for client 8. For clients that are not + * subject to WFQ credit blocking - their specifications here are not used. + * This is a new register (with 2_) added in E3 B0 to accommodate the 9 + * input clients to ETS arbiter. The reset default is set for management and + * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to + * use credit registers 0-5 respectively (0x543210876). Note that credit + * registers can not be shared between clients. Note also that there are + * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only + * credit registers 0-5 are valid. This register should be configured + * appropriately before enabling WFQ. */ +#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB 0x186ec +/* [RW 9] Specify whether the client competes directly in the strict + * priority arbiter. The bits are mapped according to client ID (client IDs + * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic + * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 + * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. + * Default value is set to enable strict priorities for all clients. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT 0x18234 +/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The + * bits are mapped according to client ID (client IDs are defined in + * tx_arb_priority_client2): 0-management; 1-debug traffic from this port; + * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 + * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is + * 0 for not using WFQ credit blocking. */ +#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x18238 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 0x18258 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 0x1825c +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 0x18260 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 0x18264 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 0x18268 +#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 0x186f4 +/* [RW 32] Specify the weight (in bytes) to be added to credit register 0 + * when it is time to increment. */ +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 0x18244 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 0x18248 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 0x1824c +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 0x18250 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 0x18254 +#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 0x186f0 +/* [RW 12] Specify the number of strict priority arbitration slots between + two round-robin arbitration slots to avoid starvation. A value of 0 means + no strict priority cycles - the strict priority with anti-starvation + arbiter becomes a round-robin arbiter. */ +#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS 0x18240 +/* [RW 32] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 31:0 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB 0x186e0 +/* [RW 4] Specify the client number to be assigned to each priority of the + strict priority arbiter. This register specifies bits 35:32 of the 36-bit + value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 + client; bits [35-32] are for priority 8 client. The clients are assigned + the following IDs: 0-management; 1-debug traffic from this port; 2-debug + traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic; + 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is + set to 0x345678021. This is a new register (with 2_) added in E3 B0 to + accommodate the 9 input clients to ETS arbiter. Note that this register + is the same as the one for port 0, except that port 1 only has COS 0-2 + traffic. There is no traffic for COS 3-5 of port 1. */ +#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 +/* [R 1] TX FIFO for transmitting data to MAC is empty. */ +#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 +/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets + forwarded to the host. */ +#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 +/* [RW 32] Specify the upper bound that credit register 0 is allowed to + * reach. */ /* [RW 1] Pause enable for port0. This register may get 1 only when ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same port */ @@ -2026,12 +2355,45 @@ #define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 /* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */ #define PBF_REG_COS0_UPPER_BOUND 0x15c05c +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 0. */ +#define PBF_REG_COS0_UPPER_BOUND_P0 0x15c2cc +/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter + * of port 1. */ +#define PBF_REG_COS0_UPPER_BOUND_P1 0x15c2e4 /* [RW 31] The weight of COS0 in the ETS command arbiter. */ #define PBF_REG_COS0_WEIGHT 0x15c054 +/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P0 0x15c2a8 +/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */ +#define PBF_REG_COS0_WEIGHT_P1 0x15c2c0 /* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */ #define PBF_REG_COS1_UPPER_BOUND 0x15c060 /* [RW 31] The weight of COS1 in the ETS command arbiter. */ #define PBF_REG_COS1_WEIGHT 0x15c058 +/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P0 0x15c2ac +/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */ +#define PBF_REG_COS1_WEIGHT_P1 0x15c2c4 +/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P0 0x15c2b0 +/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */ +#define PBF_REG_COS2_WEIGHT_P1 0x15c2c8 +/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */ +#define PBF_REG_COS3_WEIGHT_P0 0x15c2b4 +/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */ +#define PBF_REG_COS4_WEIGHT_P0 0x15c2b8 +/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */ +#define PBF_REG_COS5_WEIGHT_P0 0x15c2bc +/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_LB_Q 0x140338 +/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q0 0x14033c +/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_CREDIT_Q1 0x140340 /* [RW 1] Disable processing further tasks from port 0 (after ending the current task in process). */ #define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c @@ -2042,6 +2404,52 @@ current task in process). */ #define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c #define PBF_REG_DISABLE_PF 0x1402e8 +/* [RW 18] For port 0: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0 0x15c288 +/* [RW 9] For port 1: For each client that is subject to WFQ (the + * corresponding bit is 1); indicates to which of the credit registers this + * client is mapped. For clients which are not credit blocked; their mapping + * is dont care. */ +#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1 0x15c28c +/* [RW 6] For port 0: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 0x15c278 +/* [RW 3] For port 1: Bit per client to indicate if the client competes in + * the strict priority arbiter directly (corresponding bit = 1); or first + * goes to the RR arbiter (corresponding bit = 0); and then competes in the + * lowest priority in the strict-priority arbiter. */ +#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 0x15c27c +/* [RW 6] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 0x15c280 +/* [RW 3] For port 0: Bit per client to indicate if the client is subject to + * WFQ credit blocking (corresponding bit = 1). */ +#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 0x15c284 +/* [RW 16] For port 0: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 0x15c2a0 +/* [RW 16] For port 1: The number of strict priority arbitration slots + * between 2 RR arbitration slots. A value of 0 means no strict priority + * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR + * arbiter. */ +#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 0x15c2a4 +/* [RW 18] For port 0: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 0x15c270 +/* [RW 9] For port 1: Indicates which client is connected to each priority + * in the strict-priority arbiter. Priority 0 is the highest priority, and + * priority 5 is the lowest; to which the RR output is connected to (this is + * not configurable). */ +#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 0x15c274 /* [RW 1] Indicates that ETS is performed between the COSes in the command * arbiter. If reset strict priority w/ anti-starvation will be performed * w/o WFQ. */ @@ -2049,14 +2457,25 @@ /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic * Ethernet header. */ #define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 -/* [RW 1] Indicates which COS is conncted to the highest priority in the - * command arbiter. */ +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8 +/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest + * priority in the command arbiter. */ #define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c #define PBF_REG_IF_ENABLE_REG 0x140044 /* [RW 1] Init bit. When set the initial credits are copied to the credit registers (except the port credits). Should be set and then reset after the configuration of the block has ended. */ #define PBF_REG_INIT 0x140000 +/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_LB_Q 0x15c248 +/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q0 0x15c230 +/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte + * lines. */ +#define PBF_REG_INIT_CRD_Q1 0x15c234 /* [RW 1] Init bit for port 0. When set the initial credit of port 0 is copied to the credit register. Should be set and then reset after the configuration of the port has ended. */ @@ -2069,6 +2488,15 @@ copied to the credit register. Should be set and then reset after the configuration of the port has ended. */ #define PBF_REG_INIT_P4 0x14000c +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * the LB queue. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q 0x140354 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 0. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 0x140358 +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * queue 1. Reset upon init. */ +#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 0x14035c /* [RW 1] Enable for mac interface 0. */ #define PBF_REG_MAC_IF0_ENABLE 0x140030 /* [RW 1] Enable for mac interface 1. */ @@ -2089,24 +2517,49 @@ /* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte lines. */ #define PBF_REG_P0_INIT_CRD 0x1400d0 -/* [RW 1] Indication that pause is enabled for port 0. */ -#define PBF_REG_P0_PAUSE_ENABLE 0x140014 -/* [R 8] Number of tasks in port 0 task queue. */ +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 0. Reset upon init. */ +#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT 0x140308 +/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */ +#define PBF_REG_P0_PAUSE_ENABLE 0x140014 +/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */ #define PBF_REG_P0_TASK_CNT 0x140204 -/* [R 11] Current credit for port 1 in the tx port buffers in 16 byte lines. */ +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 0. Reset upon init. */ +#define PBF_REG_P0_TQ_LINES_FREED_CNT 0x1402f0 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */ +#define PBF_REG_P0_TQ_OCCUPANCY 0x1402fc +/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port + * buffers in 16 byte lines. */ #define PBF_REG_P1_CREDIT 0x140208 -/* [RW 11] Initial credit for port 1 in the tx port buffers in 16 byte - lines. */ +/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port + * buffers in 16 byte lines. */ #define PBF_REG_P1_INIT_CRD 0x1400d4 -/* [R 8] Number of tasks in port 1 task queue. */ +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 1. Reset upon init. */ +#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT 0x14030c +/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */ #define PBF_REG_P1_TASK_CNT 0x14020c +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 1. Reset upon init. */ +#define PBF_REG_P1_TQ_LINES_FREED_CNT 0x1402f4 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */ +#define PBF_REG_P1_TQ_OCCUPANCY 0x140300 /* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */ #define PBF_REG_P4_CREDIT 0x140210 /* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte lines. */ #define PBF_REG_P4_INIT_CRD 0x1400e0 -/* [R 8] Number of tasks in port 4 task queue. */ +/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for + * port 4. Reset upon init. */ +#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT 0x140310 +/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */ #define PBF_REG_P4_TASK_CNT 0x140214 +/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines + * freed from the task queue of port 4. Reset upon init. */ +#define PBF_REG_P4_TQ_LINES_FREED_CNT 0x1402f8 +/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */ +#define PBF_REG_P4_TQ_OCCUPANCY 0x140304 /* [RW 5] Interrupt mask register #0 read/write */ #define PBF_REG_PBF_INT_MASK 0x1401d4 /* [R 5] Interrupt register #0 read */ @@ -2115,6 +2568,27 @@ #define PBF_REG_PBF_PRTY_MASK 0x1401e4 /* [RC 20] Parity register #0 read clear */ #define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PBF_REG_TAG_ETHERTYPE_0 0x15c090 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PBF_REG_TAG_LEN_0 0x15c09c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task + * queue. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q 0x14038c +/* [R 32] Cyclic counter for number of 8 byte lines freed from the task + * queue 0. Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q0 0x140390 +/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1. + * Reset upon init. */ +#define PBF_REG_TQ_LINES_FREED_CNT_Q1 0x140394 +/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB + * queue. */ +#define PBF_REG_TQ_OCCUPANCY_LB_Q 0x1403a8 +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */ +#define PBF_REG_TQ_OCCUPANCY_Q0 0x1403ac +/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */ +#define PBF_REG_TQ_OCCUPANCY_Q1 0x1403b0 #define PB_REG_CONTROL 0 /* [RW 2] Interrupt mask register #0 read/write */ #define PB_REG_PB_INT_MASK 0x28 @@ -2206,8 +2680,12 @@ #define PGLUE_B_REG_PGLUE_B_INT_STS 0x9298 /* [RC 9] Interrupt register #0 read clear */ #define PGLUE_B_REG_PGLUE_B_INT_STS_CLR 0x929c +/* [RW 2] Parity mask register #0 read/write */ +#define PGLUE_B_REG_PGLUE_B_PRTY_MASK 0x92b4 /* [R 2] Parity register #0 read */ #define PGLUE_B_REG_PGLUE_B_PRTY_STS 0x92a8 +/* [RC 2] Parity register #0 read clear */ +#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR 0x92ac /* [R 13] Details of first request received with error. [2:0] - PFID. [3] - * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 - @@ -2444,10 +2922,24 @@ /* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic * Ethernet header. */ #define PRS_REG_HDRS_AFTER_BASIC 0x40238 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic + * Ethernet header for port 0 packets. */ +#define PRS_REG_HDRS_AFTER_BASIC_PORT_0 0x40270 +#define PRS_REG_HDRS_AFTER_BASIC_PORT_1 0x40290 +/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */ +#define PRS_REG_HDRS_AFTER_TAG_0 0x40248 +/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for + * port 0 packets */ +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0 0x40280 +#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1 0x402a0 /* [RW 4] The increment value to send in the CFC load request message */ #define PRS_REG_INC_VALUE 0x40048 /* [RW 6] Bit-map indicating which headers must appear in the packet */ #define PRS_REG_MUST_HAVE_HDRS 0x40254 +/* [RW 6] Bit-map indicating which headers must appear in the packet for + * port 0 packets */ +#define PRS_REG_MUST_HAVE_HDRS_PORT_0 0x4028c +#define PRS_REG_MUST_HAVE_HDRS_PORT_1 0x402ac #define PRS_REG_NIC_MODE 0x40138 /* [RW 8] The 8-bit event ID for cases where there is no match on the connection. Used in packet start message to TCM. */ @@ -2496,6 +2988,11 @@ #define PRS_REG_SERIAL_NUM_STATUS_MSB 0x40158 /* [R 4] debug only: SRC current credit. Transaction based. */ #define PRS_REG_SRC_CURRENT_CREDIT 0x4016c +/* [RW 16] The Ethernet type value for L2 tag 0 */ +#define PRS_REG_TAG_ETHERTYPE_0 0x401d4 +/* [RW 4] The length of the info field for L2 tag 0. The length is between + * 2B and 14B; in 2B granularity */ +#define PRS_REG_TAG_LEN_0 0x4022c /* [R 8] debug only: TCM current credit. Cycle based. */ #define PRS_REG_TCM_CURRENT_CREDIT 0x40160 /* [R 8] debug only: TSDM current credit. Transaction based. */ @@ -3080,6 +3577,7 @@ #define QM_REG_BYTECREDITAFULLTHR 0x168094 /* [RW 4] The initial credit for interface */ #define QM_REG_CMINITCRD_0 0x1680cc +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 #define QM_REG_CMINITCRD_1 0x1680d0 #define QM_REG_CMINITCRD_2 0x1680d4 #define QM_REG_CMINITCRD_3 0x1680d8 @@ -3170,7 +3668,10 @@ /* [RW 2] The PCI attributes field used in the PCI request. */ #define QM_REG_PCIREQAT 0x168054 #define QM_REG_PF_EN 0x16e70c -/* [R 16] The byte credit of port 0 */ +/* [R 24] The number of tasks stored in the QM for the PF. only even + * functions are valid in E2 (odd I registers will be hard wired to 0) */ +#define QM_REG_PF_USG_CNT_0 0x16e040 +/* [R 16] NOT USED */ #define QM_REG_PORT0BYTECRD 0x168300 /* [R 16] The byte credit of port 1 */ #define QM_REG_PORT1BYTECRD 0x168304 @@ -3725,7 +4226,7 @@ mechanism. The fields are: [5:0] - length of the message; 15:6] - message pointer; 20:16] - next pointer. */ #define TCM_REG_XX_DESCR_TABLE 0x50280 -#define TCM_REG_XX_DESCR_TABLE_SIZE 32 +#define TCM_REG_XX_DESCR_TABLE_SIZE 29 /* [R 6] Use to read the value of XX protection Free counter. */ #define TCM_REG_XX_FREE 0x50178 /* [RW 6] Initial value for the credit counter; responsible for fulfilling @@ -3782,6 +4283,8 @@ #define TM_REG_LIN0_LOGIC_ADDR 0x164240 /* [RW 18] Linear0 Max active cid (in banks of 32 entries). */ #define TM_REG_LIN0_MAX_ACTIVE_CID 0x164048 +/* [ST 16] Linear0 Number of scans counter. */ +#define TM_REG_LIN0_NUM_SCANS 0x1640a0 /* [WB 64] Linear0 phy address. */ #define TM_REG_LIN0_PHY_ADDR 0x164270 /* [RW 1] Linear0 physical address valid. */ @@ -3789,6 +4292,7 @@ #define TM_REG_LIN0_SCAN_ON 0x1640d0 /* [RW 24] Linear0 array scan timeout. */ #define TM_REG_LIN0_SCAN_TIME 0x16403c +#define TM_REG_LIN0_VNIC_UC 0x164128 /* [RW 32] Linear1 logic address. */ #define TM_REG_LIN1_LOGIC_ADDR 0x164250 /* [WB 64] Linear1 phy address. */ @@ -4175,6 +4679,8 @@ #define UCM_REG_UCM_INT_MASK 0xe01d4 /* [R 11] Interrupt register #0 read */ #define UCM_REG_UCM_INT_STS 0xe01c8 +/* [RW 27] Parity mask register #0 read/write */ +#define UCM_REG_UCM_PRTY_MASK 0xe01e4 /* [R 27] Parity register #0 read */ #define UCM_REG_UCM_PRTY_STS 0xe01d8 /* [RC 27] Parity register #0 read clear */ @@ -4248,7 +4754,7 @@ mechanism. The fields are:[5:0] - message length; 14:6] - message pointer; 19:15] - next pointer. */ #define UCM_REG_XX_DESCR_TABLE 0xe0280 -#define UCM_REG_XX_DESCR_TABLE_SIZE 32 +#define UCM_REG_XX_DESCR_TABLE_SIZE 27 /* [R 6] Use to read the XX protection Free counter. */ #define UCM_REG_XX_FREE 0xe016c /* [RW 6] Initial value for the credit counter; responsible for fulfilling @@ -4265,6 +4771,23 @@ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - header pointer. */ #define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) +#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) +#define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) +#define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) +#define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) +#define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) +#define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers + * to bit 17 of the MAC address etc. */ +#define UMAC_REG_MAC_ADDR0 0xc +/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1 + * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */ +#define UMAC_REG_MAC_ADDR1 0x10 +/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive + * logic to check frames. */ +#define UMAC_REG_MAXFR 0x14 /* [RW 8] The event id for aggregated interrupt 0 */ #define USDM_REG_AGG_INT_EVENT_0 0xc4038 #define USDM_REG_AGG_INT_EVENT_1 0xc403c @@ -4696,8 +5219,13 @@ #define XCM_REG_XCM_INT_MASK 0x202b4 /* [R 14] Interrupt register #0 read */ #define XCM_REG_XCM_INT_STS 0x202a8 +/* [RW 30] Parity mask register #0 read/write */ +#define XCM_REG_XCM_PRTY_MASK 0x202c4 /* [R 30] Parity register #0 read */ #define XCM_REG_XCM_PRTY_STS 0x202b8 +/* [RC 30] Parity register #0 read clear */ +#define XCM_REG_XCM_PRTY_STS_CLR 0x202bc + /* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). Is used to determine the number of the AG context REG-pairs written back; @@ -4772,6 +5300,34 @@ #define XCM_REG_XX_MSG_NUM 0x20428 /* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */ #define XCM_REG_XX_OVFL_EVNT_ID 0x20058 +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS (0x1<<0) +#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS (0x1<<1) +#define XMAC_CTRL_REG_CORE_LOCAL_LPBK (0x1<<3) +#define XMAC_CTRL_REG_RX_EN (0x1<<1) +#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) +#define XMAC_CTRL_REG_TX_EN (0x1<<0) +#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) +#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) +#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN (0x1<<0) +#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN (0x1<<3) +#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN (0x1<<4) +#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN (0x1<<5) +#define XMAC_REG_CLEAR_RX_LSS_STATUS 0x60 +#define XMAC_REG_CTRL 0 +/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_HI 0x2c +/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC + * packets transmitted by the MAC */ +#define XMAC_REG_CTRL_SA_LO 0x28 +#define XMAC_REG_PAUSE_CTRL 0x68 +#define XMAC_REG_PFC_CTRL 0x70 +#define XMAC_REG_PFC_CTRL_HI 0x74 +#define XMAC_REG_RX_LSS_STATUS 0x58 +/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & + * CRC in strip mode */ +#define XMAC_REG_RX_MAX_SIZE 0x40 +#define XMAC_REG_TX_CTRL 0x20 /* [RW 16] Indirect access to the XX table of the XX protection mechanism. The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - header pointer. */ @@ -4846,6 +5402,10 @@ #define XSDM_REG_NUM_OF_Q9_CMD 0x166268 /* [RW 13] The start address in the internal RAM for queue counters */ #define XSDM_REG_Q_COUNTER_START_ADDR 0x166010 +/* [W 17] Generate an operation after completion; bit-16 is + * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and + * bits 4:0 are the T124Param[4:0] */ +#define XSDM_REG_OPERATION_GEN 0x1664c4 /* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */ #define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY 0x166548 /* [R 1] parser fifo empty in sdm_sync block */ @@ -5019,6 +5579,7 @@ #define BIGMAC_REGISTER_CNT_MAX_SIZE (0x05<<3) #define BIGMAC_REGISTER_RX_CONTROL (0x21<<3) #define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS (0x46<<3) +#define BIGMAC_REGISTER_RX_LSS_STATUS (0x43<<3) #define BIGMAC_REGISTER_RX_MAX_SIZE (0x23<<3) #define BIGMAC_REGISTER_RX_STAT_GR64 (0x26<<3) #define BIGMAC_REGISTER_RX_STAT_GRIPJ (0x42<<3) @@ -5034,6 +5595,7 @@ #define BIGMAC2_REGISTER_PFC_CONTROL (0x06<<3) #define BIGMAC2_REGISTER_RX_CONTROL (0x3A<<3) #define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS (0x62<<3) +#define BIGMAC2_REGISTER_RX_LSS_STAT (0x3E<<3) #define BIGMAC2_REGISTER_RX_MAX_SIZE (0x3C<<3) #define BIGMAC2_REGISTER_RX_STAT_GR64 (0x40<<3) #define BIGMAC2_REGISTER_RX_STAT_GRIPJ (0x5f<<3) @@ -5052,7 +5614,9 @@ #define EMAC_LED_OVERRIDE (1L<<0) #define EMAC_LED_TRAFFIC (1L<<6) #define EMAC_MDIO_COMM_COMMAND_ADDRESS (0L<<26) +#define EMAC_MDIO_COMM_COMMAND_READ_22 (2L<<26) #define EMAC_MDIO_COMM_COMMAND_READ_45 (3L<<26) +#define EMAC_MDIO_COMM_COMMAND_WRITE_22 (1L<<26) #define EMAC_MDIO_COMM_COMMAND_WRITE_45 (1L<<26) #define EMAC_MDIO_COMM_DATA (0xffffL<<0) #define EMAC_MDIO_COMM_START_BUSY (1L<<29) @@ -5128,16 +5692,30 @@ #define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27) #define MISC_REGISTERS_RESET_REG_1_SET 0x584 #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 +#define MISC_REGISTERS_RESET_REG_2_MSTAT0 (0x1<<24) +#define MISC_REGISTERS_RESET_REG_2_MSTAT1 (0x1<<25) +#define MISC_REGISTERS_RESET_REG_2_PGLC (0x1<<19) +#define MISC_REGISTERS_RESET_REG_2_RST_ATC (0x1<<17) #define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) +#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1 (0x1<<1) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0 (0x1<<2) #define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) +#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1 (0x1<<3) #define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15) #define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4) #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE (0x1<<8) +#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU (0x1<<7) #define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5) #define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13) #define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11) +#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO (0x1<<13) #define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9) #define MISC_REGISTERS_RESET_REG_2_SET 0x594 +#define MISC_REGISTERS_RESET_REG_2_UMAC0 (0x1<<20) +#define MISC_REGISTERS_RESET_REG_2_UMAC1 (0x1<<21) +#define MISC_REGISTERS_RESET_REG_2_XMAC (0x1<<22) +#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT (0x1<<23) #define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN (0x1<<2) @@ -5160,74 +5738,86 @@ #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 #define MISC_REGISTERS_SPIO_SET_POS 8 +#define HW_LOCK_DRV_FLAGS 10 #define HW_LOCK_MAX_RESOURCE_VALUE 31 #define HW_LOCK_RESOURCE_GPIO 1 #define HW_LOCK_RESOURCE_MDIO 0 -#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 -#define HW_LOCK_RESOURCE_RESERVED_08 8 +#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8 +#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9 #define HW_LOCK_RESOURCE_SPIO 2 #define HW_LOCK_RESOURCE_UNDI 5 -#define PRS_FLAG_OVERETH_IPV4 1 -#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) -#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) -#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) -#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) -#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) -#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (1<<8) -#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (1<<7) -#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (1<<6) -#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (1<<29) -#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (1<<28) -#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (1<<1) -#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (1<<0) -#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (1<<18) -#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (1<<11) -#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (1<<13) -#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (1<<12) -#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) -#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) -#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29) -#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30) -#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) -#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) -#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) -#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (1<<0) -#define AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT (1<<31) -#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) -#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) -#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (1<<3) -#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (1<<2) -#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (1<<5) -#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (1<<4) -#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3) -#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2) -#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22) -#define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15) -#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27) -#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5) -#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25) -#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (1<<24) -#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (1<<29) -#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (1<<28) -#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (1<<23) -#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (1<<27) -#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (1<<26) -#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (1<<21) -#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (1<<20) -#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (1<<25) -#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (1<<24) -#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (1<<16) -#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (1<<9) -#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (1<<7) -#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (1<<6) -#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (1<<11) -#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (1<<10) +#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT (0x1<<1) +#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR (0x1<<18) +#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR (0x1<<10) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT (0x1<<13) +#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR (0x1<<14) +#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT (0x1<<31) +#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR (0x1<<30) +#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR (0x1<<0) +#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (0x1<<3) +#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (0x1<<2) +#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_SPIO5 (0x1<<15) +#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR (0x1<<4) +#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT (0x1<<29) +#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR (0x1<<28) +#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT (0x1<<23) +#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR (0x1<<22) +#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT (0x1<<27) +#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR (0x1<<26) +#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT (0x1<<21) +#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR (0x1<<20) +#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT (0x1<<25) +#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR (0x1<<24) +#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR (0x1<<16) +#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT (0x1<<9) +#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR (0x1<<8) +#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT (0x1<<7) +#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR (0x1<<6) +#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT (0x1<<11) +#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR (0x1<<10) + +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (0x1<<5) +#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (0x1<<9) + #define RESERVED_GENERAL_ATTENTION_BIT_0 0 -#define EVEREST_GEN_ATTN_IN_USE_MASK 0x3ffe0 +#define EVEREST_GEN_ATTN_IN_USE_MASK 0x7ffe0 #define EVEREST_LATCHED_ATTN_IN_USE_MASK 0xffe00000 #define RESERVED_GENERAL_ATTENTION_BIT_6 6 @@ -5317,7 +5907,13 @@ #define GRCBASE_HC 0x108000 #define GRCBASE_PXP2 0x120000 #define GRCBASE_PBF 0x140000 +#define GRCBASE_UMAC0 0x160000 +#define GRCBASE_UMAC1 0x160400 #define GRCBASE_XPB 0x161000 +#define GRCBASE_MSTAT0 0x162000 +#define GRCBASE_MSTAT1 0x162800 +#define GRCBASE_XMAC0 0x163000 +#define GRCBASE_XMAC1 0x163800 #define GRCBASE_TIMERS 0x164000 #define GRCBASE_XSDM 0x166000 #define GRCBASE_QM 0x168000 @@ -5883,6 +6479,10 @@ #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G 0x0C00 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX 0x0D00 #define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 0x0E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR 0x0F00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 +#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 #define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 @@ -6032,15 +6632,11 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_CTRL 0x0 #define MDIO_PMA_REG_STATUS 0x1 #define MDIO_PMA_REG_10G_CTRL2 0x7 +#define MDIO_PMA_REG_TX_DISABLE 0x0009 #define MDIO_PMA_REG_RX_SD 0xa /*bcm*/ #define MDIO_PMA_REG_BCM_CTRL 0x0096 #define MDIO_PMA_REG_FEC_CTRL 0x00ab -#define MDIO_PMA_REG_RX_ALARM_CTRL 0x9000 -#define MDIO_PMA_REG_LASI_CTRL 0x9002 -#define MDIO_PMA_REG_RX_ALARM 0x9003 -#define MDIO_PMA_REG_TX_ALARM 0x9004 -#define MDIO_PMA_REG_LASI_STATUS 0x9005 #define MDIO_PMA_REG_PHY_IDENTIFIER 0xc800 #define MDIO_PMA_REG_DIGITAL_CTRL 0xc808 #define MDIO_PMA_REG_DIGITAL_STATUS 0xc809 @@ -6201,6 +6797,169 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 #define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 +/* BCM84833 only */ +#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a +#define MDIO_84833_SUPER_ISOLATE 0x8000 +/* These are mailbox register set used by 84833. */ +#define MDIO_84833_TOP_CFG_SCRATCH_REG0 0x4005 +#define MDIO_84833_TOP_CFG_SCRATCH_REG1 0x4006 +#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007 +#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008 +#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009 +#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011 +#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012 + +/* Mailbox command set used by 84833. */ +#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2 +/* Mailbox status set used by 84833. */ +#define PHY84833_CMD_RECEIVED 0x0001 +#define PHY84833_CMD_IN_PROGRESS 0x0002 +#define PHY84833_CMD_COMPLETE_PASS 0x0004 +#define PHY84833_CMD_COMPLETE_ERROR 0x0008 +#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010 +#define PHY84833_CMD_SYSTEM_BOOT 0x0020 +#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040 +#define PHY84833_CMD_CLEAR_COMPLETE 0x0080 +#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5 + + +/* 84833 F/W Feature Commands */ +#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27 +#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28 + +/* Warpcore clause 45 addressing */ +#define MDIO_WC_DEVAD 0x3 +#define MDIO_WC_REG_IEEE0BLK_MIICNTL 0x0 +#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP 0x7 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0 0x10 +#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1 0x11 +#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 +#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 +#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e +#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL0 0x8015 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL1 0x8016 +#define MDIO_WC_REG_XGXSBLK1_LANECTRL2 0x8017 +#define MDIO_WC_REG_TX0_ANA_CTRL0 0x8061 +#define MDIO_WC_REG_TX1_ANA_CTRL0 0x8071 +#define MDIO_WC_REG_TX2_ANA_CTRL0 0x8081 +#define MDIO_WC_REG_TX3_ANA_CTRL0 0x8091 +#define MDIO_WC_REG_TX0_TX_DRIVER 0x8067 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET 0x04 +#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK 0x00f0 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET 0x08 +#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK 0x0f00 +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET 0x0c +#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK 0x7000 +#define MDIO_WC_REG_TX1_TX_DRIVER 0x8077 +#define MDIO_WC_REG_TX2_TX_DRIVER 0x8087 +#define MDIO_WC_REG_TX3_TX_DRIVER 0x8097 +#define MDIO_WC_REG_RX0_ANARXCONTROL1G 0x80b9 +#define MDIO_WC_REG_RX2_ANARXCONTROL1G 0x80d9 +#define MDIO_WC_REG_RX0_PCI_CTRL 0x80ba +#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca +#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da +#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea +#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104 +#define MDIO_WC_REG_XGXS_STATUS3 0x8129 +#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 +#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 +#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 +#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B +#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 +#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 +#define MDIO_WC_REG_GP2_STATUS_GP_2_1 0x81d1 +#define MDIO_WC_REG_GP2_STATUS_GP_2_2 0x81d2 +#define MDIO_WC_REG_GP2_STATUS_GP_2_3 0x81d3 +#define MDIO_WC_REG_GP2_STATUS_GP_2_4 0x81d4 +#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP 0x81EE +#define MDIO_WC_REG_UC_INFO_B1_VERSION 0x81F0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE 0x81F2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT 0x0 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR 0x1 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC 0x2 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI 0x3 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET 0x4 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET 0x8 +#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET 0xc +#define MDIO_WC_REG_UC_INFO_B1_CRC 0x81FE +#define MDIO_WC_REG_DSC_SMC 0x8213 +#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0 0x821e +#define MDIO_WC_REG_TX_FIR_TAP 0x82e2 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET 0x00 +#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK 0x000f +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET 0x04 +#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK 0x03f0 +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a +#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 +#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 +#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 +#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 +#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL 0x82e8 +#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL 0x82ec +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1 0x8300 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2 0x8301 +#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3 0x8302 +#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1 0x8304 +#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308 +#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309 +#define MDIO_WC_REG_DIGITAL3_UP1 0x8329 +#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c +#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 +#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 +#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e +#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 +#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 +#define MDIO_WC_REG_TX66_CONTROL 0x83b0 +#define MDIO_WC_REG_RX66_CONTROL 0x83c0 +#define MDIO_WC_REG_RX66_SCW0 0x83c2 +#define MDIO_WC_REG_RX66_SCW1 0x83c3 +#define MDIO_WC_REG_RX66_SCW2 0x83c4 +#define MDIO_WC_REG_RX66_SCW3 0x83c5 +#define MDIO_WC_REG_RX66_SCW0_MASK 0x83c6 +#define MDIO_WC_REG_RX66_SCW1_MASK 0x83c7 +#define MDIO_WC_REG_RX66_SCW2_MASK 0x83c8 +#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 +#define MDIO_WC_REG_FX100_CTRL1 0x8400 +#define MDIO_WC_REG_FX100_CTRL3 0x8402 + +#define MDIO_WC_REG_MICROBLK_CMD 0xffc2 +#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 +#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc + +#define MDIO_WC_REG_AERBLK_AER 0xffde +#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL 0xffe0 +#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT 0xffe1 + +#define MDIO_WC0_XGXS_BLK2_LANE_RESET 0x810A +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT 0 +#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT 4 + +#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2 0x8141 + +#define DIGITAL5_ACTUAL_SPEED_TX_MASK 0x003f + +/* 54618se */ +#define MDIO_REG_GPHY_PHYID_LSB 0x3 +#define MDIO_REG_GPHY_ID_54618SE 0x5cd5 +#define MDIO_REG_GPHY_CL45_ADDR_REG 0xd +#define MDIO_REG_GPHY_CL45_DATA_REG 0xe +#define MDIO_REG_GPHY_EEE_ADV 0x3c +#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) +#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) +#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_INTR_STATUS 0x1a +#define MDIO_REG_INTR_MASK 0x1b +#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) +#define MDIO_REG_GPHY_SHADOW 0x1c +#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10) +#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15) +#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10) +#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD (0x1 << 8) + #define IGU_FUNC_BASE 0x0400 #define IGU_ADDR_MSIX 0x0000 @@ -6217,11 +6976,6 @@ Theotherbitsarereservedandshouldbezero*/ #define IGU_ADDR_MSI_ADDR_HI 0x0212 #define IGU_ADDR_MSI_DATA 0x0213 -#define IGU_INT_ENABLE 0 -#define IGU_INT_DISABLE 1 -#define IGU_INT_NOP 2 -#define IGU_INT_NOP2 3 - #define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup 0 #define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup 1 #define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup 2 @@ -6292,15 +7046,6 @@ Theotherbitsarereservedandshouldbezero*/ #define IGU_BC_BASE_DSB_PROD 128 #define IGU_NORM_BASE_DSB_PROD 136 -#define IGU_CTRL_CMD_TYPE_WR\ - 1 -#define IGU_CTRL_CMD_TYPE_RD\ - 0 - -#define IGU_SEG_ACCESS_NORM 0 -#define IGU_SEG_ACCESS_DEF 1 -#define IGU_SEG_ACCESS_ATTN 2 - /* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \ [5:2] = 0; [1:0] = PF number) */ #define IGU_FID_ENCODE_IS_PF (0x1<<6) diff --git a/drivers/net/bnx2x/bnx2x_sp.c b/drivers/net/bnx2x/bnx2x_sp.c new file mode 100644 index 00000000000..df52f110c6c --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_sp.c @@ -0,0 +1,5692 @@ +/* bnx2x_sp.c: Broadcom Everest network driver. + * + * Copyright 2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Vladislav Zolotarov + * + */ +#include <linux/module.h> +#include <linux/crc32.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/crc32c.h> +#include "bnx2x.h" +#include "bnx2x_cmn.h" +#include "bnx2x_sp.h" + +#define BNX2X_MAX_EMUL_MULTI 16 + +/**** Exe Queue interfaces ****/ + +/** + * bnx2x_exe_queue_init - init the Exe Queue object + * + * @o: poiter to the object + * @exe_len: length + * @owner: poiter to the owner + * @validate: validate function pointer + * @optimize: optimize function pointer + * @exec: execute function pointer + * @get: get function pointer + */ +static inline void bnx2x_exe_queue_init(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + int exe_len, + union bnx2x_qable_obj *owner, + exe_q_validate validate, + exe_q_optimize optimize, + exe_q_execute exec, + exe_q_get get) +{ + memset(o, 0, sizeof(*o)); + + INIT_LIST_HEAD(&o->exe_queue); + INIT_LIST_HEAD(&o->pending_comp); + + spin_lock_init(&o->lock); + + o->exe_chunk_len = exe_len; + o->owner = owner; + + /* Owner specific callbacks */ + o->validate = validate; + o->optimize = optimize; + o->execute = exec; + o->get = get; + + DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk " + "length of %d\n", exe_len); +} + +static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp, + struct bnx2x_exeq_elem *elem) +{ + DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n"); + kfree(elem); +} + +static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + int cnt = 0; + + spin_lock_bh(&o->lock); + + list_for_each_entry(elem, &o->exe_queue, link) + cnt++; + + spin_unlock_bh(&o->lock); + + return cnt; +} + +/** + * bnx2x_exe_queue_add - add a new element to the execution queue + * + * @bp: driver handle + * @o: queue + * @cmd: new command to add + * @restore: true - do not optimize the command + * + * If the element is optimized or is illegal, frees it. + */ +static inline int bnx2x_exe_queue_add(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore) +{ + int rc; + + spin_lock_bh(&o->lock); + + if (!restore) { + /* Try to cancel this element queue */ + rc = o->optimize(bp, o->owner, elem); + if (rc) + goto free_and_exit; + + /* Check if this request is ok */ + rc = o->validate(bp, o->owner, elem); + if (rc) { + BNX2X_ERR("Preamble failed: %d\n", rc); + goto free_and_exit; + } + } + + /* If so, add it to the execution queue */ + list_add_tail(&elem->link, &o->exe_queue); + + spin_unlock_bh(&o->lock); + + return 0; + +free_and_exit: + bnx2x_exe_queue_free_elem(bp, elem); + + spin_unlock_bh(&o->lock); + + return rc; + +} + +static inline void __bnx2x_exe_queue_reset_pending( + struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o) +{ + struct bnx2x_exeq_elem *elem; + + while (!list_empty(&o->pending_comp)) { + elem = list_first_entry(&o->pending_comp, + struct bnx2x_exeq_elem, link); + + list_del(&elem->link); + bnx2x_exe_queue_free_elem(bp, elem); + } +} + +static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o) +{ + + spin_lock_bh(&o->lock); + + __bnx2x_exe_queue_reset_pending(bp, o); + + spin_unlock_bh(&o->lock); + +} + +/** + * bnx2x_exe_queue_step - execute one execution chunk atomically + * + * @bp: driver handle + * @o: queue + * @ramrod_flags: flags + * + * (Atomicy is ensured using the exe_queue->lock). + */ +static inline int bnx2x_exe_queue_step(struct bnx2x *bp, + struct bnx2x_exe_queue_obj *o, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem, spacer; + int cur_len = 0, rc; + + memset(&spacer, 0, sizeof(spacer)); + + spin_lock_bh(&o->lock); + + /* + * Next step should not be performed until the current is finished, + * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to + * properly clear object internals without sending any command to the FW + * which also implies there won't be any completion to clear the + * 'pending' list. + */ + if (!list_empty(&o->pending_comp)) { + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "resetting pending_comp\n"); + __bnx2x_exe_queue_reset_pending(bp, o); + } else { + spin_unlock_bh(&o->lock); + return 1; + } + } + + /* + * Run through the pending commands list and create a next + * execution chunk. + */ + while (!list_empty(&o->exe_queue)) { + elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem, + link); + WARN_ON(!elem->cmd_len); + + if (cur_len + elem->cmd_len <= o->exe_chunk_len) { + cur_len += elem->cmd_len; + /* + * Prevent from both lists being empty when moving an + * element. This will allow the call of + * bnx2x_exe_queue_empty() without locking. + */ + list_add_tail(&spacer.link, &o->pending_comp); + mb(); + list_del(&elem->link); + list_add_tail(&elem->link, &o->pending_comp); + list_del(&spacer.link); + } else + break; + } + + /* Sanity check */ + if (!cur_len) { + spin_unlock_bh(&o->lock); + return 0; + } + + rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags); + if (rc < 0) + /* + * In case of an error return the commands back to the queue + * and reset the pending_comp. + */ + list_splice_init(&o->pending_comp, &o->exe_queue); + else if (!rc) + /* + * If zero is returned, means there are no outstanding pending + * completions and we may dismiss the pending list. + */ + __bnx2x_exe_queue_reset_pending(bp, o); + + spin_unlock_bh(&o->lock); + return rc; +} + +static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o) +{ + bool empty = list_empty(&o->exe_queue); + + /* Don't reorder!!! */ + mb(); + + return empty && list_empty(&o->pending_comp); +} + +static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem( + struct bnx2x *bp) +{ + DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n"); + return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC); +} + +/************************ raw_obj functions ***********************************/ +static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o) +{ + return !!test_bit(o->state, o->pstate); +} + +static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_clear_bit(); + clear_bit(o->state, o->pstate); + smp_mb__after_clear_bit(); +} + +static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) +{ + smp_mb__before_clear_bit(); + set_bit(o->state, o->pstate); + smp_mb__after_clear_bit(); +} + +/** + * bnx2x_state_wait - wait until the given bit(state) is cleared + * + * @bp: device handle + * @state: state which is to be cleared + * @state_p: state buffer + * + */ +static inline int bnx2x_state_wait(struct bnx2x *bp, int state, + unsigned long *pstate) +{ + /* can take a while if any port is running */ + int cnt = 5000; + + + if (CHIP_REV_IS_EMUL(bp)) + cnt *= 20; + + DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state); + + might_sleep(); + while (cnt--) { + if (!test_bit(state, pstate)) { +#ifdef BNX2X_STOP_ON_ERROR + DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt); +#endif + return 0; + } + + usleep_range(1000, 1000); + + if (bp->panic) + return -EIO; + } + + /* timeout! */ + BNX2X_ERR("timeout waiting for state %d\n", state); +#ifdef BNX2X_STOP_ON_ERROR + bnx2x_panic(); +#endif + + return -EBUSY; +} + +static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw) +{ + return bnx2x_state_wait(bp, raw->state, raw->pstate); +} + +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* credit handling callbacks */ +static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get_entry(mp, offset); +} + +static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + WARN_ON(!mp); + + return mp->get(mp, 1); +} + +static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get_entry(vp, offset); +} + +static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + WARN_ON(!vp); + + return vp->get(vp, 1); +} + +static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->get(mp, 1)) + return false; + + if (!vp->get(vp, 1)) { + mp->put(mp, 1); + return false; + } + + return true; +} + +static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put_entry(mp, offset); +} + +static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + + return mp->put(mp, 1); +} + +static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put_entry(vp, offset); +} + +static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + return vp->put(vp, 1); +} + +static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_credit_pool_obj *mp = o->macs_pool; + struct bnx2x_credit_pool_obj *vp = o->vlans_pool; + + if (!mp->put(mp, 1)) + return false; + + if (!vp->put(vp, 1)) { + mp->get(mp, 1); + return false; + } + + return true; +} + +/* check_add() callbacks */ +static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + if (!is_valid_ether_addr(data->mac.mac)) + return -EINVAL; + + /* Check if a requested MAC already exists */ + list_for_each_entry(pos, &o->head, link) + if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + return -EEXIST; + + return 0; +} + +static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return -EEXIST; + + return 0; +} + +static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN))) + return -EEXIST; + + return 0; +} + + +/* check_del() callbacks */ +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) + return pos; + + return NULL; +} + +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if (data->vlan.vlan == pos->u.vlan.vlan) + return pos; + + return NULL; +} + +static struct bnx2x_vlan_mac_registry_elem * + bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + + list_for_each_entry(pos, &o->head, link) + if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && + (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, + ETH_ALEN))) + return pos; + + return NULL; +} + +/* check_move() callback */ +static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + int rc; + + /* Check if we can delete the requested configuration from the first + * object. + */ + pos = src_o->check_del(src_o, data); + + /* check if configuration can be added */ + rc = dst_o->check_add(dst_o, data); + + /* If this classification can not be added (is already set) + * or can't be deleted - return an error. + */ + if (rc || !pos) + return false; + + return true; +} + +static bool bnx2x_check_move_always_err( + struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data) +{ + return false; +} + + +static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD; + + return rx_tx_flag; +} + +/* LLH CAM line allocations */ +enum { + LLH_CAM_ISCSI_ETH_LINE = 0, + LLH_CAM_ETH_LINE, + LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 +}; + +static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, + bool add, unsigned char *dev_addr, int index) +{ + u32 wb_data[2]; + u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : + NIG_REG_LLH0_FUNC_MEM; + + if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE) + return; + + DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", + (add ? "ADD" : "DELETE"), index); + + if (add) { + /* LLH_FUNC_MEM is a u64 WB register */ + reg_offset += 8*index; + + wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) | + (dev_addr[4] << 8) | dev_addr[5]); + wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]); + + REG_WR_DMAE(bp, reg_offset, wb_data, 2); + } + + REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE : + NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add); +} + +/** + * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue for which we want to configure this rule + * @add: if true the command is an ADD command, DEL otherwise + * @opcode: CLASSIFY_RULE_OPCODE_XXX + * @hdr: pointer to a header to setup + * + */ +static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, + struct eth_classify_cmd_header *hdr) +{ + struct bnx2x_raw_obj *raw = &o->raw; + + hdr->client_id = raw->cl_id; + hdr->func_id = raw->func_id; + + /* Rx or/and Tx (internal switching) configuration ? */ + hdr->cmd_general_data |= + bnx2x_vlan_mac_get_rx_tx_flag(o); + + if (add) + hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD; + + hdr->cmd_general_data |= + (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header + * + * @cid: connection id + * @type: BNX2X_FILTER_XXX_PENDING + * @hdr: poiter to header to setup + * @rule_cnt: + * + * currently we always configure one rule and echo field to contain a CID and an + * opcode type. + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type, + struct eth_classify_header *hdr, int rule_cnt) +{ + hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT); + hdr->rule_cnt = (u8)rule_cnt; +} + + +/* hw_config() callbacks */ +static void bnx2x_set_one_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; + u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; + + /* + * Set LLH CAM entry: currently only iSCSI and ETH macs are + * relevant. In addition, current implementation is tuned for a + * single ETH MAC. + * + * When multiple unicast ETH MACs PF configuration in switch + * independent mode is required (NetQ, multiple netdev MACs, + * etc.), consider better utilisation of 8 per function MAC + * entries in the LLH register. There is also + * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the + * total number of CAM entries to 16. + * + * Currently we won't configure NIG for MACs other than a primary ETH + * MAC and iSCSI L2 MAC. + * + * If this MAC is moving from one Queue to another, no need to change + * NIG configuration. + */ + if (cmd != BNX2X_VLAN_MAC_MOVE) { + if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, + LLH_CAM_ISCSI_ETH_LINE); + else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags)) + bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE); + } + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Setup a command header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for " + "Queue %d\n", (add ? "add" : "delete"), + BNX2X_MAC_PRN_LIST(mac), raw->cl_id); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_MAC, + &rule_entry->mac.header); + + /* Set a MAC itself */ + bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, + &rule_entry->mac.mac_mid, + &rule_entry->mac.mac_lsb, mac); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod + * + * @bp: device handle + * @o: queue + * @type: + * @cam_offset: offset in cam memory + * @hdr: pointer to a header to setup + * + * E1/E1H + */ +static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, + struct mac_configuration_hdr *hdr) +{ + struct bnx2x_raw_obj *r = &o->raw; + + hdr->length = 1; + hdr->offset = (u8)cam_offset; + hdr->client_id = 0xff; + hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT)); +} + +static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac, + u16 vlan_id, struct mac_configuration_entry *cfg_entry) +{ + struct bnx2x_raw_obj *r = &o->raw; + u32 cl_bit_vec = (1 << r->cl_id); + + cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec); + cfg_entry->pf_id = r->func_id; + cfg_entry->vlan_id = cpu_to_le16(vlan_id); + + if (add) { + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + SET_FLAG(cfg_entry->flags, + MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode); + + /* Set a MAC in a ramrod data */ + bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr, + &cfg_entry->middle_mac_addr, + &cfg_entry->lsb_mac_addr, mac); + } else + SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); +} + +static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add, + u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config) +{ + struct mac_configuration_entry *cfg_entry = &config->config_table[0]; + struct bnx2x_raw_obj *raw = &o->raw; + + bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset, + &config->hdr); + bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id, + cfg_entry); + + DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n", + (add ? "setting" : "clearing"), + BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset); +} + +/** + * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_mac_e1x(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* + * 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.mac.mac, 0, + ETH_VLAN_FILTER_ANY_VLAN, config); +} + +static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + int cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"), + vlan); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_VLAN, + &rule_entry->vlan.header); + + /* Set a VLAN itself */ + rule_entry->vlan.vlan = cpu_to_le16(vlan); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct eth_classify_rules_ramrod_data *data = + (struct eth_classify_rules_ramrod_data *)(raw->rdata); + int rule_cnt = rule_idx + 1; + union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; + int cmd = elem->cmd_data.vlan_mac.cmd; + bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; + u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; + + + /* Reset the ramrod data buffer for the first rule */ + if (rule_idx == 0) + memset(data, 0, sizeof(*data)); + + /* Set a rule header */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set VLAN and MAC themselvs */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + + /* MOVE: Add a rule that will add this MAC to the target Queue */ + if (cmd == BNX2X_VLAN_MAC_MOVE) { + rule_entry++; + rule_cnt++; + + /* Setup ramrod data */ + bnx2x_vlan_mac_set_cmd_hdr_e2(bp, + elem->cmd_data.vlan_mac.target_obj, + true, CLASSIFY_RULE_OPCODE_PAIR, + &rule_entry->pair.header); + + /* Set a VLAN itself */ + rule_entry->pair.vlan = cpu_to_le16(vlan); + bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, + &rule_entry->pair.mac_mid, + &rule_entry->pair.mac_lsb, mac); + } + + /* Set the ramrod data header */ + /* TODO: take this to the higher level in order to prevent multiple + writing */ + bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header, + rule_cnt); +} + +/** + * bnx2x_set_one_vlan_mac_e1h - + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @elem: bnx2x_exeq_elem + * @rule_idx: rule_idx + * @cam_offset: cam_offset + */ +static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + int rule_idx, int cam_offset) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *config = + (struct mac_configuration_cmd *)(raw->rdata); + /* + * 57710 and 57711 do not support MOVE command, + * so it's either ADD or DEL + */ + bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + true : false; + + /* Reset the ramrod data buffer */ + memset(config, 0, sizeof(*config)); + + bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING, + cam_offset, add, + elem->cmd_data.vlan_mac.u.vlan_mac.mac, + elem->cmd_data.vlan_mac.u.vlan_mac.vlan, + ETH_VLAN_FILTER_CLASSIFY, config); +} + +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) + +/** + * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element + * + * @bp: device handle + * @p: command parameters + * @ppos: pointer to the cooky + * + * reconfigure next MAC/VLAN/VLAN-MAC element from the + * previously configured elements list. + * + * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken + * into an account + * + * pointer to the cooky - that should be given back in the next call to make + * function handle the next element. If *ppos is set to NULL it will restart the + * iterator. If returned *ppos == NULL this means that the last element has been + * handled. + * + */ +static int bnx2x_vlan_mac_restore(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos) +{ + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + + /* If list is empty - there is nothing to do here */ + if (list_empty(&o->head)) { + *ppos = NULL; + return 0; + } + + /* make a step... */ + if (*ppos == NULL) + *ppos = list_first_entry(&o->head, + struct bnx2x_vlan_mac_registry_elem, + link); + else + *ppos = list_next_entry(*ppos, link); + + pos = *ppos; + + /* If it's the last step - return NULL */ + if (list_is_last(&pos->link, &o->head)) + *ppos = NULL; + + /* Prepare a 'user_req' */ + memcpy(&p->user_req.u, &pos->u, sizeof(pos->u)); + + /* Set the command */ + p->user_req.cmd = BNX2X_VLAN_MAC_ADD; + + /* Set vlan_mac_flags */ + p->user_req.vlan_mac_flags = pos->vlan_mac_flags; + + /* Set a restore bit */ + __set_bit(RAMROD_RESTORE, &p->ramrod_flags); + + return bnx2x_config_vlan_mac(bp, p); +} + +/* + * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a + * pointer to an element with a specific criteria and NULL if such an element + * hasn't been found. + */ +static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac( + struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem *pos; + struct bnx2x_vlan_mac_ramrod_data *data = + &elem->cmd_data.vlan_mac.u.vlan_mac; + + /* Check pending for execution commands */ + list_for_each_entry(pos, &o->exe_queue, link) + if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data, + sizeof(*data)) && + (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd)) + return pos; + + return NULL; +} + +/** + * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed + * + * @bp: device handle + * @qo: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + * + * Checks that the requested configuration can be added. If yes and if + * requested, consume CAM credit. + * + * The 'validate' is run after the 'optimize'. + * + */ +static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + int rc; + + /* Check the registry */ + rc = o->check_add(o, &elem->cmd_data.vlan_mac.u); + if (rc) { + DP(BNX2X_MSG_SP, "ADD command is not allowed considering " + "current registry state\n"); + return rc; + } + + /* + * Check if there is a pending ADD command for this + * MAC/VLAN/VLAN-MAC. Return an error if there is. + */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending ADD command already\n"); + return -EEXIST; + } + + /* + * TODO: Check the pending MOVE from other objects where this + * object is a destination object. + */ + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->get_credit(o))) + return -EINVAL; + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed + * + * @bp: device handle + * @qo: quable object to check + * @elem: element that needs to be deleted + * + * Checks that the requested configuration can be deleted. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_vlan_mac_registry_elem *pos; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem query_elem; + + /* If this classification can not be deleted (doesn't exist) + * - return a BNX2X_EXIST. + */ + pos = o->check_del(o, &elem->cmd_data.vlan_mac.u); + if (!pos) { + DP(BNX2X_MSG_SP, "DEL command is not allowed considering " + "current registry state\n"); + return -EEXIST; + } + + /* + * Check if there are pending DEL or MOVE commands for this + * MAC/VLAN/VLAN-MAC. Return an error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check for MOVE commands */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE; + if (exeq->get(exeq, &query_elem)) { + BNX2X_ERR("There is a pending MOVE command already\n"); + return -EINVAL; + } + + /* Check for DEL commands */ + if (exeq->get(exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending DEL command already\n"); + return -EEXIST; + } + + /* Return the credit to the credit pool if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + o->put_credit(o))) { + BNX2X_ERR("Failed to return a credit\n"); + return -EINVAL; + } + + return 0; +} + +/** + * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed + * + * @bp: device handle + * @qo: quable object to check (source) + * @elem: element that needs to be moved + * + * Checks that the requested configuration can be moved. If yes and if + * requested, returns a CAM credit. + * + * The 'validate' is run after the 'optimize'. + */ +static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac; + struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj; + struct bnx2x_exeq_elem query_elem; + struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue; + struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue; + + /* + * Check if we can perform this operation based on the current registry + * state. + */ + if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) { + DP(BNX2X_MSG_SP, "MOVE command is not allowed considering " + "current registry state\n"); + return -EINVAL; + } + + /* + * Check if there is an already pending DEL or MOVE command for the + * source object or ADD command for a destination object. Return an + * error if so. + */ + memcpy(&query_elem, elem, sizeof(query_elem)); + + /* Check DEL on source */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + if (src_exeq->get(src_exeq, &query_elem)) { + BNX2X_ERR("There is a pending DEL command on the source " + "queue already\n"); + return -EINVAL; + } + + /* Check MOVE on source */ + if (src_exeq->get(src_exeq, elem)) { + DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n"); + return -EEXIST; + } + + /* Check ADD on destination */ + query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + if (dest_exeq->get(dest_exeq, &query_elem)) { + BNX2X_ERR("There is a pending ADD command on the " + "destination queue already\n"); + return -EINVAL; + } + + /* Consume the credit if not requested not to */ + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + dest_o->get_credit(dest_o))) + return -EINVAL; + + if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &elem->cmd_data.vlan_mac.vlan_mac_flags) || + src_o->put_credit(src_o))) { + /* return the credit taken from dest... */ + dest_o->put_credit(dest_o); + return -EINVAL; + } + + return 0; +} + +static int bnx2x_validate_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + return bnx2x_validate_vlan_mac_add(bp, qo, elem); + case BNX2X_VLAN_MAC_DEL: + return bnx2x_validate_vlan_mac_del(bp, qo, elem); + case BNX2X_VLAN_MAC_MOVE: + return bnx2x_validate_vlan_mac_move(bp, qo, elem); + default: + return -EINVAL; + } +} + +/** + * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes. + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * + */ +static int bnx2x_wait_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o) +{ + int cnt = 5000, rc; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_raw_obj *raw = &o->raw; + + while (cnt--) { + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Wait until there are no pending commands */ + if (!bnx2x_exe_queue_empty(exeq)) + usleep_range(1000, 1000); + else + return 0; + } + + return -EBUSY; +} + +/** + * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod + * + * @bp: device handle + * @o: bnx2x_vlan_mac_obj + * @cqe: + * @cont: if true schedule next execution chunk + * + */ +static int bnx2x_complete_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags) +{ + struct bnx2x_raw_obj *r = &o->raw; + int rc; + + /* Reset pending list */ + bnx2x_exe_queue_reset_pending(bp, &o->exe_queue); + + /* Clear pending */ + r->clear_pending(r); + + /* If ramrod failed this is most likely a SW bug */ + if (cqe->message.error) + return -EINVAL; + + /* Run the next bulk of pending commands if requeted */ + if (test_bit(RAMROD_CONT, ramrod_flags)) { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + if (rc < 0) + return rc; + } + + /* If there is more work to do return PENDING */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + return 1; + + return 0; +} + +/** + * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands. + * + * @bp: device handle + * @o: bnx2x_qable_obj + * @elem: bnx2x_exeq_elem + */ +static int bnx2x_optimize_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct bnx2x_exeq_elem *elem) +{ + struct bnx2x_exeq_elem query, *pos; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + + memcpy(&query, elem, sizeof(query)); + + switch (elem->cmd_data.vlan_mac.cmd) { + case BNX2X_VLAN_MAC_ADD: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL; + break; + case BNX2X_VLAN_MAC_DEL: + query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD; + break; + default: + /* Don't handle anything other than ADD or DEL */ + return 0; + } + + /* If we found the appropriate element - delete it */ + pos = exeq->get(exeq, &query); + if (pos) { + + /* Return the credit of the optimized command */ + if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, + &pos->cmd_data.vlan_mac.vlan_mac_flags)) { + if ((query.cmd_data.vlan_mac.cmd == + BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) { + BNX2X_ERR("Failed to return the credit for the " + "optimized ADD command\n"); + return -EINVAL; + } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */ + BNX2X_ERR("Failed to recover the credit from " + "the optimized DEL command\n"); + return -EINVAL; + } + } + + DP(BNX2X_MSG_SP, "Optimizing %s command\n", + (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ? + "ADD" : "DEL"); + + list_del(&pos->link); + bnx2x_exe_queue_free_elem(bp, pos); + return 1; + } + + return 0; +} + +/** + * bnx2x_vlan_mac_get_registry_elem - prepare a registry element + * + * @bp: device handle + * @o: + * @elem: + * @restore: + * @re: + * + * prepare a registry element according to the current command request. + */ +static inline int bnx2x_vlan_mac_get_registry_elem( + struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, + bool restore, + struct bnx2x_vlan_mac_registry_elem **re) +{ + int cmd = elem->cmd_data.vlan_mac.cmd; + struct bnx2x_vlan_mac_registry_elem *reg_elem; + + /* Allocate a new registry element if needed. */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC); + if (!reg_elem) + return -ENOMEM; + + /* Get a new CAM offset */ + if (!o->get_cam_offset(o, ®_elem->cam_offset)) { + /* + * This shell never happen, because we have checked the + * CAM availiability in the 'validate'. + */ + WARN_ON(1); + kfree(reg_elem); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset); + + /* Set a VLAN-MAC data */ + memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u, + sizeof(reg_elem->u)); + + /* Copy the flags (needed for DEL and RESTORE flows) */ + reg_elem->vlan_mac_flags = + elem->cmd_data.vlan_mac.vlan_mac_flags; + } else /* DEL, RESTORE */ + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + + *re = reg_elem; + return 0; +} + +/** + * bnx2x_execute_vlan_mac - execute vlan mac command + * + * @bp: device handle + * @qo: + * @exe_chunk: + * @ramrod_flags: + * + * go and send a ramrod! + */ +static int bnx2x_execute_vlan_mac(struct bnx2x *bp, + union bnx2x_qable_obj *qo, + struct list_head *exe_chunk, + unsigned long *ramrod_flags) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc, idx = 0; + bool restore = test_bit(RAMROD_RESTORE, ramrod_flags); + bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags); + struct bnx2x_vlan_mac_registry_elem *reg_elem; + int cmd; + + /* + * If DRIVER_ONLY execution is requested, cleanup a registry + * and exit. Otherwise send a ramrod to FW. + */ + if (!drv_only) { + WARN_ON(r->check_pending(r)); + + /* Set pending */ + r->set_pending(r); + + /* Fill tha ramrod data */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + /* + * We will add to the target object in MOVE command, so + * change the object for a CAM search. + */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj, + elem, restore, + ®_elem); + if (rc) + goto error_exit; + + WARN_ON(!reg_elem); + + /* Push a new entry into the registry */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) + list_add(®_elem->link, &cam_obj->head); + + /* Configure a single command in a ramrod data buffer */ + o->set_one_rule(bp, o, elem, idx, + reg_elem->cam_offset); + + /* MOVE command consumes 2 entries in the ramrod data */ + if (cmd == BNX2X_VLAN_MAC_MOVE) + idx += 2; + else + idx++; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + goto error_exit; + } + + /* Now, when we are done with the ramrod - clean up the registry */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + if ((cmd == BNX2X_VLAN_MAC_DEL) || + (cmd == BNX2X_VLAN_MAC_MOVE)) { + reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u); + + WARN_ON(!reg_elem); + + o->put_cam_offset(o, reg_elem->cam_offset); + list_del(®_elem->link); + kfree(reg_elem); + } + } + + if (!drv_only) + return 1; + else + return 0; + +error_exit: + r->clear_pending(r); + + /* Cleanup a registry in case of a failure */ + list_for_each_entry(elem, exe_chunk, link) { + cmd = elem->cmd_data.vlan_mac.cmd; + + if (cmd == BNX2X_VLAN_MAC_MOVE) + cam_obj = elem->cmd_data.vlan_mac.target_obj; + else + cam_obj = o; + + /* Delete all newly added above entries */ + if (!restore && + ((cmd == BNX2X_VLAN_MAC_ADD) || + (cmd == BNX2X_VLAN_MAC_MOVE))) { + reg_elem = o->check_del(cam_obj, + &elem->cmd_data.vlan_mac.u); + if (reg_elem) { + list_del(®_elem->link); + kfree(reg_elem); + } + } + } + + return rc; +} + +static inline int bnx2x_vlan_mac_push_new_cmd( + struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + struct bnx2x_exeq_elem *elem; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags); + + /* Allocate the execution queue element */ + elem = bnx2x_exe_queue_alloc_elem(bp); + if (!elem) + return -ENOMEM; + + /* Set the command 'length' */ + switch (p->user_req.cmd) { + case BNX2X_VLAN_MAC_MOVE: + elem->cmd_len = 2; + break; + default: + elem->cmd_len = 1; + } + + /* Fill the object specific info */ + memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req)); + + /* Try to add a new command to the pending list */ + return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore); +} + +/** + * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules. + * + * @bp: device handle + * @p: + * + */ +int bnx2x_config_vlan_mac( + struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p) +{ + int rc = 0; + struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj; + unsigned long *ramrod_flags = &p->ramrod_flags; + bool cont = test_bit(RAMROD_CONT, ramrod_flags); + struct bnx2x_raw_obj *raw = &o->raw; + + /* + * Add new elements to the execution list for commands that require it. + */ + if (!cont) { + rc = bnx2x_vlan_mac_push_new_cmd(bp, p); + if (rc) + return rc; + } + + /* + * If nothing will be executed further in this iteration we want to + * return PENDING if there are pending commands + */ + if (!bnx2x_exe_queue_empty(&o->exe_queue)) + rc = 1; + + if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) { + DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: " + "clearing a pending bit.\n"); + raw->clear_pending(raw); + } + + /* Execute commands if required */ + if (cont || test_bit(RAMROD_EXEC, ramrod_flags) || + test_bit(RAMROD_COMP_WAIT, ramrod_flags)) { + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags); + if (rc < 0) + return rc; + } + + /* + * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set + * then user want to wait until the last command is done. + */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + /* + * Wait maximum for the current exe_queue length iterations plus + * one (for the current pending command). + */ + int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1; + + while (!bnx2x_exe_queue_empty(&o->exe_queue) && + max_iterations--) { + + /* Wait for the current command to complete */ + rc = raw->wait_comp(bp, raw); + if (rc) + return rc; + + /* Make a next step */ + rc = bnx2x_exe_queue_step(bp, &o->exe_queue, + ramrod_flags); + if (rc < 0) + return rc; + } + + return 0; + } + + return rc; +} + + + +/** + * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec + * + * @bp: device handle + * @o: + * @vlan_mac_flags: + * @ramrod_flags: execution flags to be used for this deletion + * + * if the last operation has completed successfully and there are no + * moreelements left, positive value if the last operation has completed + * successfully and there are more previously configured elements, negative + * value is current operation has failed. + */ +static int bnx2x_vlan_mac_del_all(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags) +{ + struct bnx2x_vlan_mac_registry_elem *pos = NULL; + int rc = 0; + struct bnx2x_vlan_mac_ramrod_params p; + struct bnx2x_exe_queue_obj *exeq = &o->exe_queue; + struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n; + + /* Clear pending commands first */ + + spin_lock_bh(&exeq->lock); + + list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) { + if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags == + *vlan_mac_flags) + list_del(&exeq_pos->link); + } + + spin_unlock_bh(&exeq->lock); + + /* Prepare a command request */ + memset(&p, 0, sizeof(p)); + p.vlan_mac_obj = o; + p.ramrod_flags = *ramrod_flags; + p.user_req.cmd = BNX2X_VLAN_MAC_DEL; + + /* + * Add all but the last VLAN-MAC to the execution queue without actually + * execution anything. + */ + __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags); + __clear_bit(RAMROD_EXEC, &p.ramrod_flags); + __clear_bit(RAMROD_CONT, &p.ramrod_flags); + + list_for_each_entry(pos, &o->head, link) { + if (pos->vlan_mac_flags == *vlan_mac_flags) { + p.user_req.vlan_mac_flags = pos->vlan_mac_flags; + memcpy(&p.user_req.u, &pos->u, sizeof(pos->u)); + rc = bnx2x_config_vlan_mac(bp, &p); + if (rc < 0) { + BNX2X_ERR("Failed to add a new DEL command\n"); + return rc; + } + } + } + + p.ramrod_flags = *ramrod_flags; + __set_bit(RAMROD_CONT, &p.ramrod_flags); + + return bnx2x_config_vlan_mac(bp, &p); +} + +static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id, + u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type) +{ + raw->func_id = func_id; + raw->cid = cid; + raw->cl_id = cl_id; + raw->rdata = rdata; + raw->rdata_mapping = rdata_mapping; + raw->state = state; + raw->pstate = pstate; + raw->obj_type = type; + raw->check_pending = bnx2x_raw_check_pending; + raw->clear_pending = bnx2x_raw_clear_pending; + raw->set_pending = bnx2x_raw_set_pending; + raw->wait_comp = bnx2x_raw_wait; +} + +static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o, + u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + INIT_LIST_HEAD(&o->head); + + o->macs_pool = macs_pool; + o->vlans_pool = vlans_pool; + + o->delete_all = bnx2x_vlan_mac_del_all; + o->restore = bnx2x_vlan_mac_restore; + o->complete = bnx2x_complete_vlan_mac; + o->wait = bnx2x_wait_vlan_mac; + + bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping, + state, pstate, type); +} + + +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj; + + bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, NULL); + + /* CAM credit pool handling */ + mac_obj->get_credit = bnx2x_get_credit_mac; + mac_obj->put_credit = bnx2x_put_credit_mac; + mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1x(bp)) { + mac_obj->set_one_rule = bnx2x_set_one_mac_e1x; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move_always_err; + mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } else { + mac_obj->set_one_rule = bnx2x_set_one_mac_e2; + mac_obj->check_del = bnx2x_check_mac_del; + mac_obj->check_add = bnx2x_check_mac_add; + mac_obj->check_move = bnx2x_check_move; + mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &mac_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_mac); + } +} + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj; + + bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, NULL, + vlans_pool); + + vlan_obj->get_credit = bnx2x_get_credit_vlan; + vlan_obj->put_credit = bnx2x_put_credit_vlan; + vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan; + vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan; + + if (CHIP_IS_E1x(bp)) { + BNX2X_ERR("Do not support chips others than E2 and newer\n"); + BUG(); + } else { + vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2; + vlan_obj->check_del = bnx2x_check_vlan_del; + vlan_obj->check_add = bnx2x_check_vlan_add; + vlan_obj->check_move = bnx2x_check_move; + vlan_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan); + } +} + +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool) +{ + union bnx2x_qable_obj *qable_obj = + (union bnx2x_qable_obj *)vlan_mac_obj; + + bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type, + macs_pool, vlans_pool); + + /* CAM pool handling */ + vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac; + vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac; + /* + * CAM offset is relevant for 57710 and 57711 chips only which have a + * single CAM for both MACs and VLAN-MAC pairs. So the offset + * will be taken from MACs' pool object only. + */ + vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac; + vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac; + + if (CHIP_IS_E1(bp)) { + BNX2X_ERR("Do not support chips others than E2\n"); + BUG(); + } else if (CHIP_IS_E1H(bp)) { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move_always_err; + vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, 1, qable_obj, + bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } else { + vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2; + vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del; + vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add; + vlan_mac_obj->check_move = bnx2x_check_move; + vlan_mac_obj->ramrod_cmd = + RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; + + /* Exe Queue */ + bnx2x_exe_queue_init(bp, + &vlan_mac_obj->exe_queue, + CLASSIFY_RULES_COUNT, + qable_obj, bnx2x_validate_vlan_mac, + bnx2x_optimize_vlan_mac, + bnx2x_execute_vlan_mac, + bnx2x_exeq_get_vlan_mac); + } + +} + +/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ +static inline void __storm_memset_mac_filters(struct bnx2x *bp, + struct tstorm_eth_mac_filter_config *mac_filters, + u16 pf_id) +{ + size_t size = sizeof(struct tstorm_eth_mac_filter_config); + + u32 addr = BAR_TSTRORM_INTMEM + + TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id); + + __storm_memset_struct(bp, addr, size, (u32 *)mac_filters); +} + +static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* update the bp MAC filter structure */ + u32 mask = (1 << p->cl_id); + + struct tstorm_eth_mac_filter_config *mac_filters = + (struct tstorm_eth_mac_filter_config *)p->rdata; + + /* initial seeting is drop-all */ + u8 drop_all_ucast = 1, drop_all_mcast = 1; + u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; + u8 unmatched_unicast = 0; + + /* In e1x there we only take into account rx acceot flag since tx switching + * isn't enabled. */ + if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags)) + /* accept matched ucast */ + drop_all_ucast = 0; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags)) + /* accept matched mcast */ + drop_all_mcast = 0; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_ucast = 0; + accp_all_ucast = 1; + } + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) { + /* accept all mcast */ + drop_all_mcast = 0; + accp_all_mcast = 1; + } + if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags)) + /* accept (all) bcast */ + accp_all_bcast = 1; + if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags)) + /* accept unmatched unicasts */ + unmatched_unicast = 1; + + mac_filters->ucast_drop_all = drop_all_ucast ? + mac_filters->ucast_drop_all | mask : + mac_filters->ucast_drop_all & ~mask; + + mac_filters->mcast_drop_all = drop_all_mcast ? + mac_filters->mcast_drop_all | mask : + mac_filters->mcast_drop_all & ~mask; + + mac_filters->ucast_accept_all = accp_all_ucast ? + mac_filters->ucast_accept_all | mask : + mac_filters->ucast_accept_all & ~mask; + + mac_filters->mcast_accept_all = accp_all_mcast ? + mac_filters->mcast_accept_all | mask : + mac_filters->mcast_accept_all & ~mask; + + mac_filters->bcast_accept_all = accp_all_bcast ? + mac_filters->bcast_accept_all | mask : + mac_filters->bcast_accept_all & ~mask; + + mac_filters->unmatched_unicast = unmatched_unicast ? + mac_filters->unmatched_unicast | mask : + mac_filters->unmatched_unicast & ~mask; + + DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n" + "accp_mcast 0x%x\naccp_bcast 0x%x\n", + mac_filters->ucast_drop_all, + mac_filters->mcast_drop_all, + mac_filters->ucast_accept_all, + mac_filters->mcast_accept_all, + mac_filters->bcast_accept_all); + + /* write the MAC filter structure*/ + __storm_memset_mac_filters(bp, mac_filters, p->func_id); + + /* The operation is completed */ + clear_bit(p->state, p->pstate); + smp_mb__after_clear_bit(); + + return 0; +} + +/* Setup ramrod data */ +static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, + struct eth_classify_header *hdr, + u8 rule_cnt) +{ + hdr->echo = cid; + hdr->rule_cnt = rule_cnt; +} + +static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, + unsigned long accept_flags, + struct eth_filter_rules_cmd *cmd, + bool clear_accept_all) +{ + u16 state; + + /* start with 'drop-all' */ + state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | + ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (accept_flags) { + if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + + if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + } + + if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { + state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; + } + if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags)) + state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + + if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { + state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; + state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) + state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; + } + + /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ + if (clear_accept_all) { + state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; + state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED; + } + + cmd->state = cpu_to_le16(state); + +} + +static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + struct eth_filter_rules_ramrod_data *data = p->rdata; + int rc; + u8 rule_idx = 0; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* Setup ramrod data */ + + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, + &(data->rules[rule_idx++]), false); + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = p->cl_id; + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, + &(data->rules[rule_idx++]), false); + } + + + /* + * If FCoE Queue configuration has been requested configure the Rx and + * internal switching modes for this queue in separate rules. + * + * FCoE queue shell never be set to ACCEPT_ALL packets of any sort: + * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED. + */ + if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) { + /* Tx (internal switching) */ + if (test_bit(RAMROD_TX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_TX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, + &(data->rules[rule_idx++]), + true); + } + + /* Rx */ + if (test_bit(RAMROD_RX, &p->ramrod_flags)) { + data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id); + data->rules[rule_idx].func_id = p->func_id; + + data->rules[rule_idx].cmd_general_data = + ETH_FILTER_RULES_CMD_RX_CMD; + + bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, + &(data->rules[rule_idx++]), + true); + } + } + + /* + * Set the ramrod header (most importantly - number of rules to + * configure). + */ + bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx); + + DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, " + "tx_accept_flags 0x%lx\n", + data->header.rule_cnt, p->rx_accept_flags, + p->tx_accept_flags); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid, + U64_HI(p->rdata_mapping), + U64_LO(p->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; +} + +static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + return bnx2x_state_wait(bp, p->state, p->pstate); +} + +static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + /* Do nothing */ + return 0; +} + +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p) +{ + int rc; + + /* Configure the new classification in the chip */ + rc = p->rx_mode_obj->config_rx_mode(bp, p); + if (rc < 0) + return rc; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) { + rc = p->rx_mode_obj->wait_comp(bp, p); + if (rc) + return rc; + } + + return rc; +} + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o) +{ + if (CHIP_IS_E1x(bp)) { + o->wait_comp = bnx2x_empty_rx_mode_wait; + o->config_rx_mode = bnx2x_set_rx_mode_e1x; + } else { + o->wait_comp = bnx2x_wait_rx_mode_comp_e2; + o->config_rx_mode = bnx2x_set_rx_mode_e2; + } +} + +/********************* Multicast verbs: SET, CLEAR ****************************/ +static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac) +{ + return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff; +} + +struct bnx2x_mcast_mac_elem { + struct list_head link; + u8 mac[ETH_ALEN]; + u8 pad[2]; /* For a natural alignment of the following buffer */ +}; + +struct bnx2x_pending_mcast_cmd { + struct list_head link; + int type; /* BNX2X_MCAST_CMD_X */ + union { + struct list_head macs_head; + u32 macs_num; /* Needed for DEL command */ + int next_bin; /* Needed for RESTORE flow with aprox match */ + } data; + + bool done; /* set to true, when the command has been handled, + * practically used in 57712 handling only, where one pending + * command may be handled in a few operations. As long as for + * other chips every operation handling is completed in a + * single ramrod, there is no need to utilize this field. + */ +}; + +static int bnx2x_mcast_wait(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) || + o->raw.wait_comp(bp, &o->raw)) + return -EBUSY; + + return 0; +} + +static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + int total_sz; + struct bnx2x_pending_mcast_cmd *new_cmd; + struct bnx2x_mcast_mac_elem *cur_mac = NULL; + struct bnx2x_mcast_list_elem *pos; + int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ? + p->mcast_list_len : 0); + + /* If the command is empty ("handle pending commands only"), break */ + if (!p->mcast_list_len) + return 0; + + total_sz = sizeof(*new_cmd) + + macs_list_len * sizeof(struct bnx2x_mcast_mac_elem); + + /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */ + new_cmd = kzalloc(total_sz, GFP_ATOMIC); + + if (!new_cmd) + return -ENOMEM; + + DP(BNX2X_MSG_SP, "About to enqueue a new %d command. " + "macs_list_len=%d\n", cmd, macs_list_len); + + INIT_LIST_HEAD(&new_cmd->data.macs_head); + + new_cmd->type = cmd; + new_cmd->done = false; + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + cur_mac = (struct bnx2x_mcast_mac_elem *) + ((u8 *)new_cmd + sizeof(*new_cmd)); + + /* Push the MACs of the current command into the pendig command + * MACs list: FIFO + */ + list_for_each_entry(pos, &p->mcast_list, link) { + memcpy(cur_mac->mac, pos->mac, ETH_ALEN); + list_add_tail(&cur_mac->link, &new_cmd->data.macs_head); + cur_mac++; + } + + break; + + case BNX2X_MCAST_CMD_DEL: + new_cmd->data.macs_num = p->mcast_list_len; + break; + + case BNX2X_MCAST_CMD_RESTORE: + new_cmd->data.next_bin = 0; + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Push the new pending command to the tail of the pending list: FIFO */ + list_add_tail(&new_cmd->link, &o->pending_cmds_head); + + o->set_sched(o); + + return 1; +} + +/** + * bnx2x_mcast_get_next_bin - get the next set bin (index) + * + * @o: + * @last: index to start looking from (including) + * + * returns the next found (set) bin or a negative value if none is found. + */ +static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last) +{ + int i, j, inner_start = last % BIT_VEC64_ELEM_SZ; + + for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) { + if (o->registry.aprox_match.vec[i]) + for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) { + int cur_bit = j + BIT_VEC64_ELEM_SZ * i; + if (BIT_VEC64_TEST_BIT(o->registry.aprox_match. + vec, cur_bit)) { + return cur_bit; + } + } + inner_start = 0; + } + + /* None found */ + return -1; +} + +/** + * bnx2x_mcast_clear_first_bin - find the first set bin and clear it + * + * @o: + * + * returns the index of the found bin or -1 if none is found + */ +static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o) +{ + int cur_bit = bnx2x_mcast_get_next_bin(o, 0); + + if (cur_bit >= 0) + BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit); + + return cur_bit; +} + +static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + u8 rx_tx_flag = 0; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD; + + if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) || + (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX)) + rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD; + + return rx_tx_flag; +} + +static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + int cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + u8 func_id = r->func_id; + u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o); + int bin; + + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD; + + data->rules[idx].cmd_general_data |= rx_tx_add_flag; + + /* Get a bin and update a bins' vector */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bin = bnx2x_mcast_bin_from_mac(cfg_data->mac); + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin); + break; + + case BNX2X_MCAST_CMD_DEL: + /* If there were no more bins to clear + * (bnx2x_mcast_clear_first_bin() returns -1) then we would + * clear any (0xff) bin. + * See bnx2x_mcast_validate_e2() for explanation when it may + * happen. + */ + bin = bnx2x_mcast_clear_first_bin(o); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bin = cfg_data->bin; + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return; + } + + DP(BNX2X_MSG_SP, "%s bin %d\n", + ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ? + "Setting" : "Clearing"), bin); + + data->rules[idx].bin_id = (u8)bin; + data->rules[idx].func_id = func_id; + data->rules[idx].engine_id = o->engine_id; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry + * + * @bp: device handle + * @o: + * @start_bin: index in the registry to start from (including) + * @rdata_idx: index in the ramrod data to start from + * + * returns last handled bin index or -1 if all bins have been handled + */ +static inline int bnx2x_mcast_handle_restore_cmd_e2( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin, + int *rdata_idx) +{ + int cur_bin, cnt = *rdata_idx; + union bnx2x_mcast_config_data cfg_data = {0}; + + /* go through the registry and configure the bins from it */ + for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0; + cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) { + + cfg_data.bin = (u8)cur_bin; + o->set_one_rule(bp, o, cnt, &cfg_data, + BNX2X_MCAST_CMD_RESTORE); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *rdata_idx = cnt; + + return cur_bin; +} + +static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n; + int cnt = *line_idx; + union bnx2x_mcast_config_data cfg_data = {0}; + + list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head, + link) { + + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(pmac_pos->mac)); + + list_del(&pmac_pos->link); + + /* Break if we reached the maximum number + * of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* if no more MACs to configure - we are done */ + if (list_empty(&cmd_pos->data.macs_head)) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + int cnt = *line_idx; + + while (cmd_pos->data.macs_num) { + o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type); + + cnt++; + + cmd_pos->data.macs_num--; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", + cmd_pos->data.macs_num, cnt); + + /* Break if we reached the maximum + * number of rules. + */ + if (cnt >= o->max_cmd_len) + break; + } + + *line_idx = cnt; + + /* If we cleared all bins - we are done */ + if (!cmd_pos->data.macs_num) + cmd_pos->done = true; +} + +static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos, + int *line_idx) +{ + cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin, + line_idx); + + if (cmd_pos->data.next_bin < 0) + /* If o->set_restore returned -1 we are done */ + cmd_pos->done = true; + else + /* Start from the next bin next time */ + cmd_pos->data.next_bin++; +} + +static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n; + int cnt = 0; + struct bnx2x_mcast_obj *o = p->mcast_obj; + + list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head, + link) { + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos, + &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + /* If the command has been completed - remove it from the list + * and free the memory + */ + if (cmd_pos->done) { + list_del(&cmd_pos->link); + kfree(cmd_pos); + } + + /* Break if we reached the maximum number of rules */ + if (cnt >= o->max_cmd_len) + break; + } + + return cnt; +} + +static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + union bnx2x_mcast_config_data cfg_data = {0}; + int cnt = *line_idx; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + cfg_data.mac = mlist_pos->mac; + o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(mlist_pos->mac)); + } + + *line_idx = cnt; +} + +static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + int *line_idx) +{ + int cnt = *line_idx, i; + + for (i = 0; i < p->mcast_list_len; i++) { + o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL); + + cnt++; + + DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n", + p->mcast_list_len - i - 1); + } + + *line_idx = cnt; +} + +/** + * bnx2x_mcast_handle_current_cmd - + * + * @bp: device handle + * @p: + * @cmd: + * @start_cnt: first line in the ramrod data that may be used + * + * This function is called iff there is enough place for the current command in + * the ramrod data. + * Returns number of lines filled in the ramrod data in total. + */ +static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd, + int start_cnt) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int cnt = start_cnt; + + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_DEL: + bnx2x_mcast_hdl_del(bp, o, p, &cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* The current command has been handled */ + p->mcast_list_len = 0; + + return cnt; +} + +static int bnx2x_mcast_validate_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + /* Here we set the approximate amount of work to do, which in + * fact may be only less as some MACs in postponed ADD + * command(s) scheduled before this command may fall into + * the same bin and the actual number of bins set in the + * registry would be less than we estimated here. See + * bnx2x_mcast_set_one_rule_e2() for further details. + */ + p->mcast_list_len = reg_sz; + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Here we assume that all new MACs will fall into new bins. + * However we will correct the real registry size after we + * handle all pending commands. + */ + o->set_registry_size(o, reg_sz + p->mcast_list_len); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + + } + + /* Increase the total number of MACs pending to be configured */ + o->total_pending_num += p->mcast_list_len; + + return 0; +} + +static void bnx2x_mcast_revert_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_bins); + o->total_pending_num -= p->mcast_list_len; +} + +/** + * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(r->rdata); + + data->header.echo = ((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->header.rule_cnt = len; +} + +/** + * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins + * + * @bp: device handle + * @o: + * + * Recalculate the actual number of set bins in the registry using Brian + * Kernighan's algorithm: it's execution complexity is as a number of set bins. + * + * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1(). + */ +static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + int i, cnt = 0; + u64 elem; + + for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) { + elem = o->registry.aprox_match.vec[i]; + for (; elem; cnt++) + elem &= elem - 1; + } + + o->set_registry_size(o, cnt); + + return 0; +} + +static int bnx2x_mcast_setup_e2(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_raw_obj *raw = &p->mcast_obj->raw; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct eth_multicast_rules_ramrod_data *data = + (struct eth_multicast_rules_ramrod_data *)(raw->rdata); + int cnt = 0, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there was enough room in ramrod + * data for all pending commands and for the current + * command. Otherwise the current command would have been added + * to the pending commands and p->mcast_list_len would have been + * zeroed. + */ + if (p->mcast_list_len > 0) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt); + + /* We've pulled out some MACs - update the total number of + * outstanding. + */ + o->total_pending_num -= cnt; + + /* send a ramrod */ + WARN_ON(o->total_pending_num < 0); + WARN_ON(cnt > o->max_cmd_len); + + bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt); + + /* Update a registry size if there are no more pending operations. + * + * We don't want to change the value of the registry size if there are + * pending operations because we want it to always be equal to the + * exact or the approximate number (see bnx2x_mcast_validate_e2()) of + * set bins after the last requested operation in order to properly + * evaluate the size of the next DEL/RESTORE operation. + * + * Note that we update the registry itself during command(s) handling + * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we + * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but + * with a limited amount of update commands (per MAC/bin) and we don't + * know in this scope what the actual state of bins configuration is + * going to be after this ramrod. + */ + if (!o->total_pending_num) + bnx2x_mcast_refresh_registry_e2(bp, o); + + /* + * If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES, + raw->cid, U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } +} + +static int bnx2x_mcast_validate_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + /* Mark, that there is a work to do */ + if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE)) + p->mcast_list_len = 1; + + return 0; +} + +static void bnx2x_mcast_revert_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins) +{ + /* Do nothing */ +} + +#define BNX2X_57711_SET_MC_FILTER(filter, bit) \ +do { \ + (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \ +} while (0) + +static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + struct bnx2x_mcast_list_elem *mlist_pos; + int bit; + + list_for_each_entry(mlist_pos, &p->mcast_list, link) { + bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac); + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + + DP(BNX2X_MSG_SP, "About to configure " + BNX2X_MAC_FMT" mcast MAC, bin %d\n", + BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit); + + /* bookkeeping... */ + BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, + bit); + } +} + +static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p, + u32 *mc_filter) +{ + int bit; + + for (bit = bnx2x_mcast_get_next_bin(o, 0); + bit >= 0; + bit = bnx2x_mcast_get_next_bin(o, bit + 1)) { + BNX2X_57711_SET_MC_FILTER(mc_filter, bit); + DP(BNX2X_MSG_SP, "About to set bin %d\n", bit); + } +} + +/* On 57711 we write the multicast MACs' aproximate match + * table by directly into the TSTORM's internal RAM. So we don't + * really need to handle any tricks to make it work. + */ +static int bnx2x_mcast_setup_e1h(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + int i; + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* If CLEAR_ONLY has been requested - clear the registry + * and clear a pending bit. + */ + if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + u32 mc_filter[MC_HASH_SIZE] = {0}; + + /* Set the multicast filter bits before writing it into + * the internal memory. + */ + switch (cmd) { + case BNX2X_MCAST_CMD_ADD: + bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter); + break; + + case BNX2X_MCAST_CMD_DEL: + DP(BNX2X_MSG_SP, "Invalidating multicast " + "MACs configuration\n"); + + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + break; + + case BNX2X_MCAST_CMD_RESTORE: + bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + } + + /* Set the mcast filter in the internal memory */ + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]); + } else + /* clear the registry */ + memset(o->registry.aprox_match.vec, 0, + sizeof(o->registry.aprox_match.vec)); + + /* We are done */ + r->clear_pending(r); + + return 0; +} + +static int bnx2x_mcast_validate_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + int reg_sz = o->get_registry_size(o); + + switch (cmd) { + /* DEL command deletes all currently configured MACs */ + case BNX2X_MCAST_CMD_DEL: + o->set_registry_size(o, 0); + /* Don't break */ + + /* RESTORE command will restore the entire multicast configuration */ + case BNX2X_MCAST_CMD_RESTORE: + p->mcast_list_len = reg_sz; + DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", + cmd, p->mcast_list_len); + break; + + case BNX2X_MCAST_CMD_ADD: + case BNX2X_MCAST_CMD_CONT: + /* Multicast MACs on 57710 are configured as unicast MACs and + * there is only a limited number of CAM entries for that + * matter. + */ + if (p->mcast_list_len > o->max_cmd_len) { + BNX2X_ERR("Can't configure more than %d multicast MACs" + "on 57710\n", o->max_cmd_len); + return -EINVAL; + } + /* Every configured MAC should be cleared if DEL command is + * called. Only the last ADD command is relevant as long as + * every ADD commands overrides the previous configuration. + */ + DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len); + if (p->mcast_list_len > 0) + o->set_registry_size(o, p->mcast_list_len); + + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd); + return -EINVAL; + + } + + /* We want to ensure that commands are executed one by one for 57710. + * Therefore each none-empty command will consume o->max_cmd_len. + */ + if (p->mcast_list_len) + o->total_pending_num += o->max_cmd_len; + + return 0; +} + +static void bnx2x_mcast_revert_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_macs) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + + o->set_registry_size(o, old_num_macs); + + /* If current command hasn't been handled yet and we are + * here means that it's meant to be dropped and we have to + * update the number of outstandling MACs accordingly. + */ + if (p->mcast_list_len) + o->total_pending_num -= o->max_cmd_len; +} + +static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, + int cmd) +{ + struct bnx2x_raw_obj *r = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + /* copy mac */ + if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) { + bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr, + &data->config_table[idx].middle_mac_addr, + &data->config_table[idx].lsb_mac_addr, + cfg_data->mac); + + data->config_table[idx].vlan_id = 0; + data->config_table[idx].pf_id = r->func_id; + data->config_table[idx].clients_bit_vector = + cpu_to_le32(1 << r->cl_id); + + SET_FLAG(data->config_table[idx].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + } +} + +/** + * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd + * + * @bp: device handle + * @p: + * @len: number of rules to handle + */ +static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + u8 len) +{ + struct bnx2x_raw_obj *r = &p->mcast_obj->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(r->rdata); + + u8 offset = (CHIP_REV_IS_SLOW(bp) ? + BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) : + BNX2X_MAX_MULTICAST*(1 + r->func_id)); + + data->hdr.offset = offset; + data->hdr.client_id = 0xff; + data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) | + (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT)); + data->hdr.length = len; +} + +/** + * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710 + * + * @bp: device handle + * @o: + * @start_idx: index in the registry to start from + * @rdata_idx: index in the ramrod data to start from + * + * restore command for 57710 is like all other commands - always a stand alone + * command - start_idx and rdata_idx will always be 0. This function will always + * succeed. + * returns -1 to comply with 57712 variant. + */ +static inline int bnx2x_mcast_handle_restore_cmd_e1( + struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx, + int *rdata_idx) +{ + struct bnx2x_mcast_mac_elem *elem; + int i = 0; + union bnx2x_mcast_config_data cfg_data = {0}; + + /* go through the registry and configure the MACs from it. */ + list_for_each_entry(elem, &o->registry.exact_match.macs, link) { + cfg_data.mac = &elem->mac[0]; + o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE); + + i++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(cfg_data.mac)); + } + + *rdata_idx = i; + + return -1; +} + + +static inline int bnx2x_mcast_handle_pending_cmds_e1( + struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p) +{ + struct bnx2x_pending_mcast_cmd *cmd_pos; + struct bnx2x_mcast_mac_elem *pmac_pos; + struct bnx2x_mcast_obj *o = p->mcast_obj; + union bnx2x_mcast_config_data cfg_data = {0}; + int cnt = 0; + + + /* If nothing to be done - return */ + if (list_empty(&o->pending_cmds_head)) + return 0; + + /* Handle the first command */ + cmd_pos = list_first_entry(&o->pending_cmds_head, + struct bnx2x_pending_mcast_cmd, link); + + switch (cmd_pos->type) { + case BNX2X_MCAST_CMD_ADD: + list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) { + cfg_data.mac = &pmac_pos->mac[0]; + o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type); + + cnt++; + + DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT + " mcast MAC\n", + BNX2X_MAC_PRN_LIST(pmac_pos->mac)); + } + break; + + case BNX2X_MCAST_CMD_DEL: + cnt = cmd_pos->data.macs_num; + DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt); + break; + + case BNX2X_MCAST_CMD_RESTORE: + o->hdl_restore(bp, o, 0, &cnt); + break; + + default: + BNX2X_ERR("Unknown command: %d\n", cmd_pos->type); + return -EINVAL; + } + + list_del(&cmd_pos->link); + kfree(cmd_pos); + + return cnt; +} + +/** + * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr(). + * + * @fw_hi: + * @fw_mid: + * @fw_lo: + * @mac: + */ +static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid, + __le16 *fw_lo, u8 *mac) +{ + mac[1] = ((u8 *)fw_hi)[0]; + mac[0] = ((u8 *)fw_hi)[1]; + mac[3] = ((u8 *)fw_mid)[0]; + mac[2] = ((u8 *)fw_mid)[1]; + mac[5] = ((u8 *)fw_lo)[0]; + mac[4] = ((u8 *)fw_lo)[1]; +} + +/** + * bnx2x_mcast_refresh_registry_e1 - + * + * @bp: device handle + * @cnt: + * + * Check the ramrod data first entry flag to see if it's a DELETE or ADD command + * and update the registry correspondingly: if ADD - allocate a memory and add + * the entries to the registry (list), if DELETE - clear the registry and free + * the memory. + */ +static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp, + struct bnx2x_mcast_obj *o) +{ + struct bnx2x_raw_obj *raw = &o->raw; + struct bnx2x_mcast_mac_elem *elem; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + + /* If first entry contains a SET bit - the command was ADD, + * otherwise - DEL_ALL + */ + if (GET_FLAG(data->config_table[0].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) { + int i, len = data->hdr.length; + + /* Break if it was a RESTORE command */ + if (!list_empty(&o->registry.exact_match.macs)) + return 0; + + elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC); + if (!elem) { + BNX2X_ERR("Failed to allocate registry memory\n"); + return -ENOMEM; + } + + for (i = 0; i < len; i++, elem++) { + bnx2x_get_fw_mac_addr( + &data->config_table[i].msb_mac_addr, + &data->config_table[i].middle_mac_addr, + &data->config_table[i].lsb_mac_addr, + elem->mac); + DP(BNX2X_MSG_SP, "Adding registry entry for [" + BNX2X_MAC_FMT"]\n", + BNX2X_MAC_PRN_LIST(elem->mac)); + list_add_tail(&elem->link, + &o->registry.exact_match.macs); + } + } else { + elem = list_first_entry(&o->registry.exact_match.macs, + struct bnx2x_mcast_mac_elem, link); + DP(BNX2X_MSG_SP, "Deleting a registry\n"); + kfree(elem); + INIT_LIST_HEAD(&o->registry.exact_match.macs); + } + + return 0; +} + +static int bnx2x_mcast_setup_e1(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *raw = &o->raw; + struct mac_configuration_cmd *data = + (struct mac_configuration_cmd *)(raw->rdata); + int cnt = 0, i, rc; + + /* Reset the ramrod data buffer */ + memset(data, 0, sizeof(*data)); + + /* First set all entries as invalid */ + for (i = 0; i < o->max_cmd_len ; i++) + SET_FLAG(data->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); + + /* Handle pending commands first */ + cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p); + + /* If there are no more pending commands - clear SCHEDULED state */ + if (list_empty(&o->pending_cmds_head)) + o->clear_sched(o); + + /* The below may be true iff there were no pending commands */ + if (!cnt) + cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0); + + /* For 57710 every command has o->max_cmd_len length to ensure that + * commands are done one at a time. + */ + o->total_pending_num -= o->max_cmd_len; + + /* send a ramrod */ + + WARN_ON(cnt > o->max_cmd_len); + + /* Set ramrod header (in particular, a number of entries to update) */ + bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt); + + /* update a registry: we need the registry contents to be always up + * to date in order to be able to execute a RESTORE opcode. Here + * we use the fact that for 57710 we sent one command at a time + * hence we may take the registry update out of the command handling + * and do it in a simpler way here. + */ + rc = bnx2x_mcast_refresh_registry_e1(bp, o); + if (rc) + return rc; + + /* + * If CLEAR_ONLY was requested - don't send a ramrod and clear + * RAMROD_PENDING status immediately. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) { + raw->clear_pending(raw); + return 0; + } else { + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid, + U64_HI(raw->rdata_mapping), + U64_LO(raw->rdata_mapping), + ETH_CONNECTION_TYPE); + if (rc) + return rc; + + /* Ramrod completion is pending */ + return 1; + } + +} + +static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o) +{ + return o->registry.exact_match.num_macs_set; +} + +static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o) +{ + return o->registry.aprox_match.num_bins_set; +} + +static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.exact_match.num_macs_set = n; +} + +static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o, + int n) +{ + o->registry.aprox_match.num_bins_set = n; +} + +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int cmd) +{ + struct bnx2x_mcast_obj *o = p->mcast_obj; + struct bnx2x_raw_obj *r = &o->raw; + int rc = 0, old_reg_size; + + /* This is needed to recover number of currently configured mcast macs + * in case of failure. + */ + old_reg_size = o->get_registry_size(o); + + /* Do some calculations and checks */ + rc = o->validate(bp, p, cmd); + if (rc) + return rc; + + /* Return if there is no work to do */ + if ((!p->mcast_list_len) && (!o->check_sched(o))) + return 0; + + DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d " + "o->max_cmd_len=%d\n", o->total_pending_num, + p->mcast_list_len, o->max_cmd_len); + + /* Enqueue the current command to the pending list if we can't complete + * it in the current iteration + */ + if (r->check_pending(r) || + ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) { + rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd); + if (rc < 0) + goto error_exit1; + + /* As long as the current command is in a command list we + * don't need to handle it separately. + */ + p->mcast_list_len = 0; + } + + if (!r->check_pending(r)) { + + /* Set 'pending' state */ + r->set_pending(r); + + /* Configure the new classification in the chip */ + rc = o->config_mcast(bp, p, cmd); + if (rc < 0) + goto error_exit2; + + /* Wait for a ramrod completion if was requested */ + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = o->wait_comp(bp, o); + } + + return rc; + +error_exit2: + r->clear_pending(r); + +error_exit1: + o->revert(bp, p, old_reg_size); + + return rc; +} + +static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_clear_bit(); + clear_bit(o->sched_state, o->raw.pstate); + smp_mb__after_clear_bit(); +} + +static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) +{ + smp_mb__before_clear_bit(); + set_bit(o->sched_state, o->raw.pstate); + smp_mb__after_clear_bit(); +} + +static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) +{ + return !!test_bit(o->sched_state, o->raw.pstate); +} + +static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o) +{ + return o->raw.check_pending(&o->raw) || o->check_sched(o); +} + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, bnx2x_obj_type type) +{ + memset(mcast_obj, 0, sizeof(*mcast_obj)); + + bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id, + rdata, rdata_mapping, state, pstate, type); + + mcast_obj->engine_id = engine_id; + + INIT_LIST_HEAD(&mcast_obj->pending_cmds_head); + + mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED; + mcast_obj->check_sched = bnx2x_mcast_check_sched; + mcast_obj->set_sched = bnx2x_mcast_set_sched; + mcast_obj->clear_sched = bnx2x_mcast_clear_sched; + + if (CHIP_IS_E1(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e1; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + if (CHIP_REV_IS_SLOW(bp)) + mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI; + else + mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST; + + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1; + mcast_obj->validate = bnx2x_mcast_validate_e1; + mcast_obj->revert = bnx2x_mcast_revert_e1; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_exact; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_exact; + + /* 57710 is the only chip that uses the exact match for mcast + * at the moment. + */ + INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs); + + } else if (CHIP_IS_E1H(bp)) { + mcast_obj->config_mcast = bnx2x_mcast_setup_e1h; + mcast_obj->enqueue_cmd = NULL; + mcast_obj->hdl_restore = NULL; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + + /* 57711 doesn't send a ramrod, so it has unlimited credit + * for one command. + */ + mcast_obj->max_cmd_len = -1; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = NULL; + mcast_obj->validate = bnx2x_mcast_validate_e1h; + mcast_obj->revert = bnx2x_mcast_revert_e1h; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } else { + mcast_obj->config_mcast = bnx2x_mcast_setup_e2; + mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd; + mcast_obj->hdl_restore = + bnx2x_mcast_handle_restore_cmd_e2; + mcast_obj->check_pending = bnx2x_mcast_check_pending; + /* TODO: There should be a proper HSI define for this number!!! + */ + mcast_obj->max_cmd_len = 16; + mcast_obj->wait_comp = bnx2x_mcast_wait; + mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2; + mcast_obj->validate = bnx2x_mcast_validate_e2; + mcast_obj->revert = bnx2x_mcast_revert_e2; + mcast_obj->get_registry_size = + bnx2x_mcast_get_registry_size_aprox; + mcast_obj->set_registry_size = + bnx2x_mcast_set_registry_size_aprox; + } +} + +/*************************** Credit handling **********************************/ + +/** + * atomic_add_ifless - add if the result is less than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...if (v + a) is less than u. + * + * returns true if (v + a) was less than u, and false otherwise. + * + */ +static inline bool __atomic_add_ifless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c + a >= u)) + return false; + + old = atomic_cmpxchg((v), c, c + a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +/** + * atomic_dec_ifmoe - dec if the result is more or equal than a given value. + * + * @v: pointer of type atomic_t + * @a: the amount to dec from v... + * @u: ...if (v - a) is more or equal than u. + * + * returns true if (v - a) was more or equal than u, and false + * otherwise. + */ +static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + for (;;) { + if (unlikely(c - a < u)) + return false; + + old = atomic_cmpxchg((v), c, c - a); + if (likely(old == c)) + break; + c = old; + } + + return true; +} + +static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + rc = __atomic_dec_ifmoe(&o->credit, cnt, 0); + smp_mb(); + + return rc; +} + +static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt) +{ + bool rc; + + smp_mb(); + + /* Don't let to refill if credit + cnt > pool_sz */ + rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1); + + smp_mb(); + + return rc; +} + +static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o) +{ + int cur_credit; + + smp_mb(); + cur_credit = atomic_read(&o->credit); + + return cur_credit; +} + +static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o, + int cnt) +{ + return true; +} + + +static bool bnx2x_credit_pool_get_entry( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + int idx, vec, i; + + *offset = -1; + + /* Find "internal cam-offset" then add to base for this object... */ + for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) { + + /* Skip the current vector if there are no free entries in it */ + if (!o->pool_mirror[vec]) + continue; + + /* If we've got here we are going to find a free entry */ + for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0; + i < BIT_VEC64_ELEM_SZ; idx++, i++) + + if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) { + /* Got one!! */ + BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx); + *offset = o->base_pool_offset + idx; + return true; + } + } + + return false; +} + +static bool bnx2x_credit_pool_put_entry( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + if (offset < o->base_pool_offset) + return false; + + offset -= o->base_pool_offset; + + if (offset >= o->pool_sz) + return false; + + /* Return the entry to the pool */ + BIT_VEC64_SET_BIT(o->pool_mirror, offset); + + return true; +} + +static bool bnx2x_credit_pool_put_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int offset) +{ + return true; +} + +static bool bnx2x_credit_pool_get_entry_always_true( + struct bnx2x_credit_pool_obj *o, + int *offset) +{ + *offset = -1; + return true; +} +/** + * bnx2x_init_credit_pool - initialize credit pool internals. + * + * @p: + * @base: Base entry in the CAM to use. + * @credit: pool size. + * + * If base is negative no CAM entries handling will be performed. + * If credit is negative pool operations will always succeed (unlimited pool). + * + */ +static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p, + int base, int credit) +{ + /* Zero the object first */ + memset(p, 0, sizeof(*p)); + + /* Set the table to all 1s */ + memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror)); + + /* Init a pool as full */ + atomic_set(&p->credit, credit); + + /* The total poll size */ + p->pool_sz = credit; + + p->base_pool_offset = base; + + /* Commit the change */ + smp_mb(); + + p->check = bnx2x_credit_pool_check; + + /* if pool credit is negative - disable the checks */ + if (credit >= 0) { + p->put = bnx2x_credit_pool_put; + p->get = bnx2x_credit_pool_get; + p->put_entry = bnx2x_credit_pool_put_entry; + p->get_entry = bnx2x_credit_pool_get_entry; + } else { + p->put = bnx2x_credit_pool_always_true; + p->get = bnx2x_credit_pool_always_true; + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } + + /* If base is negative - disable entries handling */ + if (base < 0) { + p->put_entry = bnx2x_credit_pool_put_entry_always_true; + p->get_entry = bnx2x_credit_pool_get_entry_always_true; + } +} + +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num) +{ +/* TODO: this will be defined in consts as well... */ +#define BNX2X_CAM_SIZE_EMUL 5 + + int cam_sz; + + if (CHIP_IS_E1(bp)) { + /* In E1, Multicast is saved in cam... */ + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST; + else + cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI; + + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + + } else if (CHIP_IS_E1H(bp)) { + /* CAM credit is equaly divided between all active functions + * on the PORT!. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num)); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + + } else { + + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if ((func_num > 0)) { + if (!CHIP_REV_IS_SLOW(bp)) + cam_sz = (MAX_MAC_CREDIT_E2 / func_num); + else + cam_sz = BNX2X_CAM_SIZE_EMUL; + + /* + * No need for CAM entries handling for 57712 and + * newer. + */ + bnx2x_init_credit_pool(p, -1, cam_sz); + } else { + /* this should never happen! Block MAC operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } + + } +} + +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, + u8 func_id, + u8 func_num) +{ + if (CHIP_IS_E1x(bp)) { + /* + * There is no VLAN credit in HW on 57710 and 57711 only + * MAC / MAC-VLAN can be set + */ + bnx2x_init_credit_pool(p, 0, -1); + } else { + /* + * CAM credit is equaly divided between all active functions + * on the PATH. + */ + if (func_num > 0) { + int credit = MAX_VLAN_CREDIT_E2 / func_num; + bnx2x_init_credit_pool(p, func_id * credit, credit); + } else + /* this should never happen! Block VLAN operations. */ + bnx2x_init_credit_pool(p, 0, 0); + } +} + +/****************** RSS Configuration ******************/ +/** + * bnx2x_debug_print_ind_table - prints the indirection table configuration. + * + * @bp: driver hanlde + * @p: pointer to rss configuration + * + * Prints it when NETIF_MSG_IFUP debug level is configured. + */ +static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int i; + + DP(BNX2X_MSG_SP, "Setting indirection table to:\n"); + DP(BNX2X_MSG_SP, "0x0000: "); + for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { + DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]); + + /* Print 4 bytes in a line */ + if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && + (((i + 1) & 0x3) == 0)) { + DP_CONT(BNX2X_MSG_SP, "\n"); + DP(BNX2X_MSG_SP, "0x%04x: ", i + 1); + } + } + + DP_CONT(BNX2X_MSG_SP, "\n"); +} + +/** + * bnx2x_setup_rss - configure RSS + * + * @bp: device handle + * @p: rss configuration + * + * sends on UPDATE ramrod for that matter. + */ +static int bnx2x_setup_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + struct eth_rss_update_ramrod_data *data = + (struct eth_rss_update_ramrod_data *)(r->rdata); + u8 rss_mode = 0; + int rc; + + memset(data, 0, sizeof(*data)); + + DP(BNX2X_MSG_SP, "Configuring RSS\n"); + + /* Set an echo field */ + data->echo = (r->cid & BNX2X_SWCID_MASK) | + (r->state << BNX2X_SWCID_SHIFT); + + /* RSS mode */ + if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_DISABLED; + else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_REGULAR; + else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_VLAN_PRI; + else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_E1HOV_PRI; + else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags)) + rss_mode = ETH_RSS_MODE_IP_DSCP; + + data->rss_mode = rss_mode; + + DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode); + + /* RSS capabilities */ + if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + + if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) + data->capabilities |= + ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + + /* Hashing mask */ + data->rss_result_mask = p->rss_result_mask; + + /* RSS engine ID */ + data->rss_engine_id = o->engine_id; + + DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id); + + /* Indirection table */ + memcpy(data->indirection_table, p->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + + /* Remember the last configuration */ + memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + + /* Print the indirection table */ + if (netif_msg_ifup(bp)) + bnx2x_debug_print_ind_table(bp, p); + + /* RSS keys */ + if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { + memcpy(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + /* Send a ramrod */ + rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid, + U64_HI(r->rdata_mapping), + U64_LO(r->rdata_mapping), + ETH_CONNECTION_TYPE); + + if (rc < 0) + return rc; + + return 1; +} + +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table) +{ + memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table)); +} + +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p) +{ + int rc; + struct bnx2x_rss_config_obj *o = p->rss_obj; + struct bnx2x_raw_obj *r = &o->raw; + + /* Do nothing if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) + return 0; + + r->set_pending(r); + + rc = o->config_rss(bp, p); + if (rc < 0) { + r->clear_pending(r); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) + rc = r->wait_comp(bp, r); + + return rc; +} + + +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type) +{ + bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata, + rdata_mapping, state, pstate, type); + + rss_obj->engine_id = engine_id; + rss_obj->config_rss = bnx2x_setup_rss; +} + +/********************** Queue state object ***********************************/ + +/** + * bnx2x_queue_state_change - perform Queue state change transition + * + * @bp: device handle + * @params: parameters to perform the transition + * + * returns 0 in case of successfully completed transition, negative error + * code in case of failure, positive (EBUSY) value if there is a completion + * to that is still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous commands). + * + */ +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + int rc, pending_bit; + unsigned long *pending = &o->pending; + + /* Check that the requested transition is legal */ + if (o->check_transition(bp, o, params)) + return -EINVAL; + + /* Set "pending" bit */ + pending_bit = o->set_pending(o, params); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) + o->complete_cmd(bp, o, pending_bit); + else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + if (rc) { + o->next_state = BNX2X_Q_STATE_MAX; + clear_bit(pending_bit, pending); + smp_mb__after_clear_bit(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, pending_bit); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(pending_bit, pending); +} + + +static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_queue_cmd cmd = params->cmd, bit; + + /* ACTIVATE and DEACTIVATE commands are implemented on top of + * UPDATE command. + */ + if ((cmd == BNX2X_Q_CMD_ACTIVATE) || + (cmd == BNX2X_Q_CMD_DEACTIVATE)) + bit = BNX2X_Q_CMD_UPDATE; + else + bit = cmd; + + set_bit(bit, &obj->pending); + return bit; +} + +static int bnx2x_queue_wait_comp(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_queue_comp_cmd - complete the state change command. + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_queue_comp_cmd(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for queue %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], + o->state, cur_pending, o->next_state); + return -EINVAL; + } + + if (o->next_tx_only >= o->max_cos) + /* >= becuase tx only must always be smaller than cos since the + * primary connection suports COS 0 + */ + BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d", + o->next_tx_only, o->max_cos); + + DP(BNX2X_MSG_SP, "Completing command %d for queue %d, " + "setting state to %d\n", cmd, + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state); + + if (o->next_tx_only) /* print num tx-only if any exist */ + DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d", + o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only); + + o->state = o->next_state; + o->num_tx_only = o->next_tx_only; + o->next_state = BNX2X_Q_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_clear_bit(); + + return 0; +} + +static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + struct bnx2x_queue_setup_params *params = &cmd_params->params.setup; + + /* Rx data */ + + /* IPv6 TPA supported for E2 and above only */ + data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, ¶ms->flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV6; +} + +static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_general_setup_params *params, + struct client_init_general_data *gen_data, + unsigned long *flags) +{ + gen_data->client_id = o->cl_id; + + if (test_bit(BNX2X_Q_FLG_STATS, flags)) { + gen_data->statistics_counter_id = + params->stat_id; + gen_data->statistics_en_flg = 1; + gen_data->statistics_zero_flg = + test_bit(BNX2X_Q_FLG_ZERO_STATS, flags); + } else + gen_data->statistics_counter_id = + DISABLE_STATISTIC_COUNTER_ID_VALUE; + + gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags); + gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags); + gen_data->sp_client_id = params->spcl_id; + gen_data->mtu = cpu_to_le16(params->mtu); + gen_data->func_id = o->func_id; + + + gen_data->cos = params->cos; + + gen_data->traffic_type = + test_bit(BNX2X_Q_FLG_FCOE, flags) ? + LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d", + gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); +} + +static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_txq_setup_params *params, + struct client_init_tx_data *tx_data, + unsigned long *flags) +{ + tx_data->enforce_security_flg = + test_bit(BNX2X_Q_FLG_TX_SEC, flags); + tx_data->default_vlan = + cpu_to_le16(params->default_vlan); + tx_data->default_vlan_flg = + test_bit(BNX2X_Q_FLG_DEF_VLAN, flags); + tx_data->tx_switching_flg = + test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); + tx_data->anti_spoofing_flg = + test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); + tx_data->tx_status_block_id = params->fw_sb_id; + tx_data->tx_sb_index_number = params->sb_cq_index; + tx_data->tss_leading_client_id = params->tss_leading_cl_id; + + tx_data->tx_bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + tx_data->tx_bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + + /* Don't configure any Tx switching mode during queue SETUP */ + tx_data->state = 0; +} + +static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o, + struct rxq_pause_params *params, + struct client_init_rx_data *rx_data) +{ + /* flow control data */ + rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo); + rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi); + rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo); + rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi); + rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo); + rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi); + rx_data->rx_cos_mask = cpu_to_le16(params->pri_map); +} + +static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o, + struct bnx2x_rxq_setup_params *params, + struct client_init_rx_data *rx_data, + unsigned long *flags) +{ + /* Rx data */ + rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * + CLIENT_INIT_RX_DATA_TPA_EN_IPV4; + rx_data->vmqueue_mode_en_flg = 0; + + rx_data->cache_line_alignment_log_size = + params->cache_line_log; + rx_data->enable_dynamic_hc = + test_bit(BNX2X_Q_FLG_DHC, flags); + rx_data->max_sges_for_packet = params->max_sges_pkt; + rx_data->client_qzone_id = params->cl_qzone_id; + rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz); + + /* Always start in DROP_ALL mode */ + rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL | + CLIENT_INIT_RX_DATA_MCAST_DROP_ALL); + + /* We don't set drop flags */ + rx_data->drop_ip_cs_err_flg = 0; + rx_data->drop_tcp_cs_err_flg = 0; + rx_data->drop_ttl0_flg = 0; + rx_data->drop_udp_cs_err_flg = 0; + rx_data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_VLAN, flags); + rx_data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_FLG_OV, flags); + rx_data->status_block_id = params->fw_sb_id; + rx_data->rx_sb_index_number = params->sb_cq_index; + rx_data->max_tpa_queues = params->max_tpa_queues; + rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz); + rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz); + rx_data->bd_page_base.lo = + cpu_to_le32(U64_LO(params->dscr_map)); + rx_data->bd_page_base.hi = + cpu_to_le32(U64_HI(params->dscr_map)); + rx_data->sge_page_base.lo = + cpu_to_le32(U64_LO(params->sge_map)); + rx_data->sge_page_base.hi = + cpu_to_le32(U64_HI(params->sge_map)); + rx_data->cqe_page_base.lo = + cpu_to_le32(U64_LO(params->rcq_map)); + rx_data->cqe_page_base.hi = + cpu_to_le32(U64_HI(params->rcq_map)); + rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags); + + if (test_bit(BNX2X_Q_FLG_MCAST, flags)) { + rx_data->approx_mcast_engine_id = o->func_id; + rx_data->is_approx_mcast = 1; + } + + rx_data->rss_engine_id = params->rss_engine_id; + + /* silent vlan removal */ + rx_data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags); + rx_data->silent_vlan_value = + cpu_to_le16(params->silent_removal_value); + rx_data->silent_vlan_mask = + cpu_to_le16(params->silent_removal_mask); + +} + +/* initialize the general, tx and rx parts of a queue object */ +static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct client_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.setup.gen_params, + &data->general, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.setup.txq_params, + &data->tx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_rx_data(cmd_params->q_obj, + &cmd_params->params.setup.rxq_params, + &data->rx, + &cmd_params->params.setup.flags); + + bnx2x_q_fill_init_pause_data(cmd_params->q_obj, + &cmd_params->params.setup.pause_params, + &data->rx); +} + +/* initialize the general and tx parts of a tx-only queue object */ +static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *cmd_params, + struct tx_queue_init_ramrod_data *data) +{ + bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj, + &cmd_params->params.tx_only.gen_params, + &data->general, + &cmd_params->params.tx_only.flags); + + bnx2x_q_fill_init_tx_data(cmd_params->q_obj, + &cmd_params->params.tx_only.txq_params, + &data->tx, + &cmd_params->params.tx_only.flags); + + DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0], + data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi); +} + +/** + * bnx2x_q_init - init HW/FW queue + * + * @bp: device handle + * @params: + * + * HW/FW initial Queue configuration: + * - HC: Rx and Tx + * - CDU context validation + * + */ +static inline int bnx2x_q_init(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct bnx2x_queue_init_params *init = ¶ms->params.init; + u16 hc_usec; + u8 cos; + + /* Tx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) { + hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id, + init->tx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags), + hc_usec); + } + + /* Rx HC configuration */ + if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) && + test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) { + hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0; + + bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id, + init->rx.sb_cq_index, + !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags), + hc_usec); + } + + /* Set CDU context validation values */ + for (cos = 0; cos < o->max_cos; cos++) { + DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d", + o->cids[cos], cos); + DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]); + bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]); + } + + /* As no ramrod is sent, complete the command immediately */ + o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT); + + mmiowb(); + smp_mb(); + + return 0; +} + +static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_init_ramrod_data *rdata = + (struct client_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP; + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_data_cmn(bp, params, rdata); + bnx2x_q_fill_setup_data_e2(bp, params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct tx_queue_init_ramrod_data *rdata = + (struct tx_queue_init_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP; + struct bnx2x_queue_setup_tx_only_params *tx_only_params = + ¶ms->params.tx_only; + u8 cid_index = tx_only_params->cid_index; + + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d", + tx_only_params->gen_params.cos, + tx_only_params->gen_params.spcl_id); + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_setup_tx_only(bp, params, rdata); + + DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d," + "sp-client id %d, cos %d", + o->cids[cid_index], + rdata->general.client_id, + rdata->general.sp_client_id, rdata->general.cos); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], + U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +static void bnx2x_q_fill_update_data(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + struct bnx2x_queue_update_params *params, + struct client_update_ramrod_data *data) +{ + /* Client ID of the client to update */ + data->client_id = obj->cl_id; + + /* Function ID of the client to update */ + data->func_id = obj->func_id; + + /* Default VLAN value */ + data->default_vlan = cpu_to_le16(params->def_vlan); + + /* Inner VLAN stripping */ + data->inner_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags); + data->inner_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Outer VLAN sripping */ + data->outer_vlan_removal_enable_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags); + data->outer_vlan_removal_change_flg = + test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + ¶ms->update_flags); + + /* Drop packets that have source MAC that doesn't belong to this + * Queue. + */ + data->anti_spoofing_enable_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags); + data->anti_spoofing_change_flg = + test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags); + + /* Activate/Deactivate */ + data->activate_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags); + data->activate_change_flg = + test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags); + + /* Enable default VLAN */ + data->default_vlan_enable_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags); + data->default_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + ¶ms->update_flags); + + /* silent vlan removal */ + data->silent_vlan_change_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + ¶ms->update_flags); + data->silent_vlan_removal_flg = + test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags); + data->silent_vlan_value = cpu_to_le16(params->silent_removal_value); + data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask); +} + +static inline int bnx2x_q_send_update(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + struct client_update_ramrod_data *rdata = + (struct client_update_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 cid_index = update_params->cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + + /* Clear the ramrod data */ + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data */ + bnx2x_q_fill_update_data(bp, o, update_params, rdata); + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, + o->cids[cid_index], U64_HI(data_mapping), + U64_LO(data_mapping), ETH_CONNECTION_TYPE); +} + +/** + * bnx2x_q_send_deactivate - send DEACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_deactivate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +/** + * bnx2x_q_send_activate - send ACTIVATE command + * + * @bp: device handle + * @params: + * + * implemented using the UPDATE command. + */ +static inline int bnx2x_q_send_activate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_update_params *update = ¶ms->params.update; + + memset(update, 0, sizeof(*update)); + + __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags); + __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags); + + return bnx2x_q_send_update(bp, params); +} + +static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + /* TODO: Not implemented yet. */ + return -1; +} + +static inline int bnx2x_q_send_halt(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_idx = params->params.cfc_del.cid_index; + + if (cid_idx >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_idx); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, + o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_terminate(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + u8 cid_index = params->params.terminate.cid_index; + + if (cid_index >= o->max_cos) { + BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n", + o->cl_id, cid_index); + return -EINVAL; + } + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, + o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_q_send_empty(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + struct bnx2x_queue_sp_obj *o = params->q_obj; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, + o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0, + ETH_CONNECTION_TYPE); +} + +static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_INIT: + return bnx2x_q_init(bp, params); + case BNX2X_Q_CMD_SETUP_TX_ONLY: + return bnx2x_q_send_setup_tx_only(bp, params); + case BNX2X_Q_CMD_DEACTIVATE: + return bnx2x_q_send_deactivate(bp, params); + case BNX2X_Q_CMD_ACTIVATE: + return bnx2x_q_send_activate(bp, params); + case BNX2X_Q_CMD_UPDATE: + return bnx2x_q_send_update(bp, params); + case BNX2X_Q_CMD_UPDATE_TPA: + return bnx2x_q_send_update_tpa(bp, params); + case BNX2X_Q_CMD_HALT: + return bnx2x_q_send_halt(bp, params); + case BNX2X_Q_CMD_CFC_DEL: + return bnx2x_q_send_cfc_del(bp, params); + case BNX2X_Q_CMD_TERMINATE: + return bnx2x_q_send_terminate(bp, params); + case BNX2X_Q_CMD_EMPTY: + return bnx2x_q_send_empty(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e1x(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp, + struct bnx2x_queue_state_params *params) +{ + switch (params->cmd) { + case BNX2X_Q_CMD_SETUP: + return bnx2x_q_send_setup_e2(bp, params); + case BNX2X_Q_CMD_INIT: + case BNX2X_Q_CMD_SETUP_TX_ONLY: + case BNX2X_Q_CMD_DEACTIVATE: + case BNX2X_Q_CMD_ACTIVATE: + case BNX2X_Q_CMD_UPDATE: + case BNX2X_Q_CMD_UPDATE_TPA: + case BNX2X_Q_CMD_HALT: + case BNX2X_Q_CMD_CFC_DEL: + case BNX2X_Q_CMD_TERMINATE: + case BNX2X_Q_CMD_EMPTY: + return bnx2x_queue_send_cmd_cmn(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +/** + * bnx2x_queue_chk_transition - check state machine of a regular Queue + * + * @bp: device handle + * @o: + * @params: + * + * (not Forwarding) + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_queue_chk_transition(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params) +{ + enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX; + enum bnx2x_queue_cmd cmd = params->cmd; + struct bnx2x_queue_update_params *update_params = + ¶ms->params.update; + u8 next_tx_only = o->num_tx_only; + + /* + * Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_Q_STATE_MAX; + } + + /* + * Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return -EBUSY; + + switch (state) { + case BNX2X_Q_STATE_RESET: + if (cmd == BNX2X_Q_CMD_INIT) + next_state = BNX2X_Q_STATE_INITIALIZED; + + break; + case BNX2X_Q_STATE_INITIALIZED: + if (cmd == BNX2X_Q_CMD_SETUP) { + if (test_bit(BNX2X_Q_FLG_ACTIVE, + ¶ms->params.setup.flags)) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_ACTIVE: + if (cmd == BNX2X_Q_CMD_DEACTIVATE) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = 1; + } + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_ACTIVE; + } + + break; + case BNX2X_Q_STATE_MULTI_COS: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_MCOS_TERMINATED; + + else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) { + next_state = BNX2X_Q_STATE_MULTI_COS; + next_tx_only = o->num_tx_only + 1; + } + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_MULTI_COS; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + !test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)) + next_state = BNX2X_Q_STATE_INACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_MCOS_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) { + next_tx_only = o->num_tx_only - 1; + if (next_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else + next_state = BNX2X_Q_STATE_MULTI_COS; + } + + break; + case BNX2X_Q_STATE_INACTIVE: + if (cmd == BNX2X_Q_CMD_ACTIVATE) + next_state = BNX2X_Q_STATE_ACTIVE; + + else if ((cmd == BNX2X_Q_CMD_EMPTY) || + (cmd == BNX2X_Q_CMD_UPDATE_TPA)) + next_state = BNX2X_Q_STATE_INACTIVE; + + else if (cmd == BNX2X_Q_CMD_HALT) + next_state = BNX2X_Q_STATE_STOPPED; + + else if (cmd == BNX2X_Q_CMD_UPDATE) { + /* If "active" state change is requested, update the + * state accordingly. + */ + if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, + &update_params->update_flags) && + test_bit(BNX2X_Q_UPDATE_ACTIVATE, + &update_params->update_flags)){ + if (o->num_tx_only == 0) + next_state = BNX2X_Q_STATE_ACTIVE; + else /* tx only queues exist for this queue */ + next_state = BNX2X_Q_STATE_MULTI_COS; + } else + next_state = BNX2X_Q_STATE_INACTIVE; + } + + break; + case BNX2X_Q_STATE_STOPPED: + if (cmd == BNX2X_Q_CMD_TERMINATE) + next_state = BNX2X_Q_STATE_TERMINATED; + + break; + case BNX2X_Q_STATE_TERMINATED: + if (cmd == BNX2X_Q_CMD_CFC_DEL) + next_state = BNX2X_Q_STATE_RESET; + + break; + default: + BNX2X_ERR("Illegal state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_Q_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + o->next_tx_only = next_tx_only; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd); + + return -EINVAL; +} + +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id, + void *rdata, + dma_addr_t rdata_mapping, unsigned long type) +{ + memset(obj, 0, sizeof(*obj)); + + /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */ + BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt); + + memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt); + obj->max_cos = cid_cnt; + obj->cl_id = cl_id; + obj->func_id = func_id; + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + obj->type = type; + obj->next_state = BNX2X_Q_STATE_MAX; + + if (CHIP_IS_E1x(bp)) + obj->send_cmd = bnx2x_queue_send_cmd_e1x; + else + obj->send_cmd = bnx2x_queue_send_cmd_e2; + + obj->check_transition = bnx2x_queue_chk_transition; + + obj->complete_cmd = bnx2x_queue_comp_cmd; + obj->wait_comp = bnx2x_queue_wait_comp; + obj->set_pending = bnx2x_queue_set_pending; +} + +void bnx2x_queue_set_cos_cid(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, + u32 cid, u8 index) +{ + obj->cids[index] = cid; +} + +/********************** Function state object *********************************/ +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o) +{ + /* in the middle of transaction - return INVALID state */ + if (o->pending) + return BNX2X_F_STATE_MAX; + + /* + * unsure the order of reading of o->pending and o->state + * o->pending should be read first + */ + rmb(); + + return o->state; +} + +static int bnx2x_func_wait_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + return bnx2x_state_wait(bp, cmd, &o->pending); +} + +/** + * bnx2x_func_state_change_comp - complete the state machine transition + * + * @bp: device handle + * @o: + * @cmd: + * + * Called on state change transition. Completes the state + * machine transition only - no HW interaction. + */ +static inline int bnx2x_func_state_change_comp(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + unsigned long cur_pending = o->pending; + + if (!test_and_clear_bit(cmd, &cur_pending)) { + BNX2X_ERR("Bad MC reply %d for func %d in state %d " + "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp), + o->state, cur_pending, o->next_state); + return -EINVAL; + } + + DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to " + "%d\n", cmd, BP_FUNC(bp), o->next_state); + + o->state = o->next_state; + o->next_state = BNX2X_F_STATE_MAX; + + /* It's important that o->state and o->next_state are + * updated before o->pending. + */ + wmb(); + + clear_bit(cmd, &o->pending); + smp_mb__after_clear_bit(); + + return 0; +} + +/** + * bnx2x_func_comp_cmd - complete the state change command + * + * @bp: device handle + * @o: + * @cmd: + * + * Checks that the arrived completion is expected. + */ +static int bnx2x_func_comp_cmd(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd) +{ + /* Complete the state machine part first, check if it's a + * legal completion. + */ + int rc = bnx2x_func_state_change_comp(bp, o, cmd); + return rc; +} + +/** + * bnx2x_func_chk_transition - perform function state machine transition + * + * @bp: device handle + * @o: + * @params: + * + * It both checks if the requested command is legal in a current + * state and, if it's legal, sets a `next_state' in the object + * that will be used in the completion flow to set the `state' + * of the object. + * + * returns 0 if a requested command is a legal transition, + * -EINVAL otherwise. + */ +static int bnx2x_func_chk_transition(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params) +{ + enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX; + enum bnx2x_func_cmd cmd = params->cmd; + + /* + * Forget all pending for completion commands if a driver only state + * transition has been requested. + */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + o->pending = 0; + o->next_state = BNX2X_F_STATE_MAX; + } + + /* + * Don't allow a next state transition if we are in the middle of + * the previous one. + */ + if (o->pending) + return -EBUSY; + + switch (state) { + case BNX2X_F_STATE_RESET: + if (cmd == BNX2X_F_CMD_HW_INIT) + next_state = BNX2X_F_STATE_INITIALIZED; + + break; + case BNX2X_F_STATE_INITIALIZED: + if (cmd == BNX2X_F_CMD_START) + next_state = BNX2X_F_STATE_STARTED; + + else if (cmd == BNX2X_F_CMD_HW_RESET) + next_state = BNX2X_F_STATE_RESET; + + break; + case BNX2X_F_STATE_STARTED: + if (cmd == BNX2X_F_CMD_STOP) + next_state = BNX2X_F_STATE_INITIALIZED; + else if (cmd == BNX2X_F_CMD_TX_STOP) + next_state = BNX2X_F_STATE_TX_STOPPED; + + break; + case BNX2X_F_STATE_TX_STOPPED: + if (cmd == BNX2X_F_CMD_TX_START) + next_state = BNX2X_F_STATE_STARTED; + + break; + default: + BNX2X_ERR("Unknown state: %d\n", state); + } + + /* Transition is assured */ + if (next_state != BNX2X_F_STATE_MAX) { + DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n", + state, cmd, next_state); + o->next_state = next_state; + return 0; + } + + DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n", + state, cmd); + + return -EINVAL; +} + +/** + * bnx2x_func_init_func - performs HW init at function stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only + * HW blocks. + */ +static inline int bnx2x_func_init_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + return drv->init_hw_func(bp); +} + +/** + * bnx2x_func_init_port - performs HW init at port stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and + * FUNCTION-only HW blocks. + * + */ +static inline int bnx2x_func_init_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_port(bp); + if (rc) + return rc; + + return bnx2x_func_init_func(bp, drv); +} + +/** + * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn_chip(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +/** + * bnx2x_func_init_cmn - performs HW init at common stage + * + * @bp: device handle + * @drv: + * + * Init HW when the current phase is + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON, + * PORT-only and FUNCTION-only HW blocks. + */ +static inline int bnx2x_func_init_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + int rc = drv->init_hw_cmn(bp); + if (rc) + return rc; + + return bnx2x_func_init_port(bp, drv); +} + +static int bnx2x_func_hw_init(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 load_code = params->params.hw_init.load_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + int rc = 0; + + DP(BNX2X_MSG_SP, "function %d load_code %x\n", + BP_ABS_FUNC(bp), load_code); + + /* Prepare buffers for unzipping the FW */ + rc = drv->gunzip_init(bp); + if (rc) + return rc; + + /* Prepare FW */ + rc = drv->init_fw(bp); + if (rc) { + BNX2X_ERR("Error loading firmware\n"); + goto fw_init_err; + } + + /* Handle the beginning of COMMON_XXX pases separatelly... */ + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: + rc = bnx2x_func_init_cmn_chip(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_COMMON: + rc = bnx2x_func_init_cmn(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = bnx2x_func_init_port(bp, drv); + if (rc) + goto init_hw_err; + + break; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = bnx2x_func_init_func(bp, drv); + if (rc) + goto init_hw_err; + + break; + default: + BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code); + rc = -EINVAL; + } + +init_hw_err: + drv->release_fw(bp); + +fw_init_err: + drv->gunzip_end(bp); + + /* In case of success, complete the comand immediatelly: no ramrods + * have been sent. + */ + if (!rc) + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT); + + return rc; +} + +/** + * bnx2x_func_reset_func - reset HW at function stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only + * FUNCTION-only HW blocks. + */ +static inline void bnx2x_func_reset_func(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_func(bp); +} + +/** + * bnx2x_func_reset_port - reser HW at port stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset + * FUNCTION-only and PORT-only HW blocks. + * + * !!!IMPORTANT!!! + * + * It's important to call reset_port before reset_func() as the last thing + * reset_func does is pf_disable() thus disabling PGLUE_B, which + * makes impossible any DMAE transactions. + */ +static inline void bnx2x_func_reset_port(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + drv->reset_hw_port(bp); + bnx2x_func_reset_func(bp, drv); +} + +/** + * bnx2x_func_reset_cmn - reser HW at common stage + * + * @bp: device handle + * @drv: + * + * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and + * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON, + * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks. + */ +static inline void bnx2x_func_reset_cmn(struct bnx2x *bp, + const struct bnx2x_func_sp_drv_ops *drv) +{ + bnx2x_func_reset_port(bp, drv); + drv->reset_hw_cmn(bp); +} + + +static inline int bnx2x_func_hw_reset(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + u32 reset_phase = params->params.hw_reset.reset_phase; + struct bnx2x_func_sp_obj *o = params->f_obj; + const struct bnx2x_func_sp_drv_ops *drv = o->drv; + + DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp), + reset_phase); + + switch (reset_phase) { + case FW_MSG_CODE_DRV_UNLOAD_COMMON: + bnx2x_func_reset_cmn(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_PORT: + bnx2x_func_reset_port(bp, drv); + break; + case FW_MSG_CODE_DRV_UNLOAD_FUNCTION: + bnx2x_func_reset_func(bp, drv); + break; + default: + BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n", + reset_phase); + break; + } + + /* Complete the comand immediatelly: no ramrods have been sent. */ + o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET); + + return 0; +} + +static inline int bnx2x_func_send_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct function_start_data *rdata = + (struct function_start_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_start_params *start_params = ¶ms->params.start; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->function_mode = cpu_to_le16(start_params->mf_mode); + rdata->sd_vlan_tag = start_params->sd_vlan_tag; + rdata->path_id = BP_PATH(bp); + rdata->network_cos_mode = start_params->network_cos_mode; + + /* + * No need for an explicit memory barrier here as long we would + * need to ensure the ordering of writing to the SPQ element + * and updating of the SPQ producer which involves a memory + * read and we will have to put a full memory barrier there + * (inside bnx2x_sp_post()). + */ + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, + NONE_CONNECTION_TYPE); +} + +static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0, + NONE_CONNECTION_TYPE); +} +static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct flow_control_configuration *rdata = + (struct flow_control_configuration *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_tx_start_params *tx_start_params = + ¶ms->params.tx_start; + int i; + + memset(rdata, 0, sizeof(*rdata)); + + rdata->dcb_enabled = tx_start_params->dcb_enabled; + rdata->dcb_version = tx_start_params->dcb_version; + rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en; + + for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++) + rdata->traffic_type_to_priority_cos[i] = + tx_start_params->traffic_type_to_priority_cos[i]; + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + +static int bnx2x_func_send_cmd(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + switch (params->cmd) { + case BNX2X_F_CMD_HW_INIT: + return bnx2x_func_hw_init(bp, params); + case BNX2X_F_CMD_START: + return bnx2x_func_send_start(bp, params); + case BNX2X_F_CMD_STOP: + return bnx2x_func_send_stop(bp, params); + case BNX2X_F_CMD_HW_RESET: + return bnx2x_func_hw_reset(bp, params); + case BNX2X_F_CMD_TX_STOP: + return bnx2x_func_send_tx_stop(bp, params); + case BNX2X_F_CMD_TX_START: + return bnx2x_func_send_tx_start(bp, params); + default: + BNX2X_ERR("Unknown command: %d\n", params->cmd); + return -EINVAL; + } +} + +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface) +{ + memset(obj, 0, sizeof(*obj)); + + mutex_init(&obj->one_pending_mutex); + + obj->rdata = rdata; + obj->rdata_mapping = rdata_mapping; + + obj->send_cmd = bnx2x_func_send_cmd; + obj->check_transition = bnx2x_func_chk_transition; + obj->complete_cmd = bnx2x_func_comp_cmd; + obj->wait_comp = bnx2x_func_wait_comp; + + obj->drv = drv_iface; +} + +/** + * bnx2x_func_state_change - perform Function state change transition + * + * @bp: device handle + * @params: parameters to perform the transaction + * + * returns 0 in case of successfully completed transition, + * negative error code in case of failure, positive + * (EBUSY) value if there is a completion to that is + * still pending (possible only if RAMROD_COMP_WAIT is + * not set in params->ramrod_flags for asynchronous + * commands). + */ +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + int rc; + enum bnx2x_func_cmd cmd = params->cmd; + unsigned long *pending = &o->pending; + + mutex_lock(&o->one_pending_mutex); + + /* Check that the requested transition is legal */ + if (o->check_transition(bp, o, params)) { + mutex_unlock(&o->one_pending_mutex); + return -EINVAL; + } + + /* Set "pending" bit */ + set_bit(cmd, pending); + + /* Don't send a command if only driver cleanup was requested */ + if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) { + bnx2x_func_state_change_comp(bp, o, cmd); + mutex_unlock(&o->one_pending_mutex); + } else { + /* Send a ramrod */ + rc = o->send_cmd(bp, params); + + mutex_unlock(&o->one_pending_mutex); + + if (rc) { + o->next_state = BNX2X_F_STATE_MAX; + clear_bit(cmd, pending); + smp_mb__after_clear_bit(); + return rc; + } + + if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) { + rc = o->wait_comp(bp, o, cmd); + if (rc) + return rc; + + return 0; + } + } + + return !!test_bit(cmd, pending); +} diff --git a/drivers/net/bnx2x/bnx2x_sp.h b/drivers/net/bnx2x/bnx2x_sp.h new file mode 100644 index 00000000000..9a517c2e9f1 --- /dev/null +++ b/drivers/net/bnx2x/bnx2x_sp.h @@ -0,0 +1,1297 @@ +/* bnx2x_sp.h: Broadcom Everest network driver. + * + * Copyright 2011 Broadcom Corporation + * + * Unless you and Broadcom execute a separate written software license + * agreement governing use of this software, this software is licensed to you + * under the terms of the GNU General Public License version 2, available + * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). + * + * Notwithstanding the above, under no circumstances may you combine this + * software in any way with any other Broadcom software provided under a + * license other than the GPL, without Broadcom's express prior written + * consent. + * + * Maintained by: Eilon Greenstein <eilong@broadcom.com> + * Written by: Vladislav Zolotarov + * + */ +#ifndef BNX2X_SP_VERBS +#define BNX2X_SP_VERBS + +struct bnx2x; +struct eth_context; + +/* Bits representing general command's configuration */ +enum { + RAMROD_TX, + RAMROD_RX, + /* Wait until all pending commands complete */ + RAMROD_COMP_WAIT, + /* Don't send a ramrod, only update a registry */ + RAMROD_DRV_CLR_ONLY, + /* Configure HW according to the current object state */ + RAMROD_RESTORE, + /* Execute the next command now */ + RAMROD_EXEC, + /* + * Don't add a new command and continue execution of posponed + * commands. If not set a new command will be added to the + * pending commands list. + */ + RAMROD_CONT, +}; + +typedef enum { + BNX2X_OBJ_TYPE_RX, + BNX2X_OBJ_TYPE_TX, + BNX2X_OBJ_TYPE_RX_TX, +} bnx2x_obj_type; + +/* Filtering states */ +enum { + BNX2X_FILTER_MAC_PENDING, + BNX2X_FILTER_VLAN_PENDING, + BNX2X_FILTER_VLAN_MAC_PENDING, + BNX2X_FILTER_RX_MODE_PENDING, + BNX2X_FILTER_RX_MODE_SCHED, + BNX2X_FILTER_ISCSI_ETH_START_SCHED, + BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, + BNX2X_FILTER_FCOE_ETH_START_SCHED, + BNX2X_FILTER_FCOE_ETH_STOP_SCHED, + BNX2X_FILTER_MCAST_PENDING, + BNX2X_FILTER_MCAST_SCHED, + BNX2X_FILTER_RSS_CONF_PENDING, +}; + +struct bnx2x_raw_obj { + u8 func_id; + + /* Queue params */ + u8 cl_id; + u32 cid; + + /* Ramrod data buffer params */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Ramrod state params */ + int state; /* "ramrod is pending" state bit */ + unsigned long *pstate; /* pointer to state buffer */ + + bnx2x_obj_type obj_type; + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_raw_obj *o); + + bool (*check_pending)(struct bnx2x_raw_obj *o); + void (*clear_pending)(struct bnx2x_raw_obj *o); + void (*set_pending)(struct bnx2x_raw_obj *o); +}; + +/************************* VLAN-MAC commands related parameters ***************/ +struct bnx2x_mac_ramrod_data { + u8 mac[ETH_ALEN]; +}; + +struct bnx2x_vlan_ramrod_data { + u16 vlan; +}; + +struct bnx2x_vlan_mac_ramrod_data { + u8 mac[ETH_ALEN]; + u16 vlan; +}; + +union bnx2x_classification_ramrod_data { + struct bnx2x_mac_ramrod_data mac; + struct bnx2x_vlan_ramrod_data vlan; + struct bnx2x_vlan_mac_ramrod_data vlan_mac; +}; + +/* VLAN_MAC commands */ +enum bnx2x_vlan_mac_cmd { + BNX2X_VLAN_MAC_ADD, + BNX2X_VLAN_MAC_DEL, + BNX2X_VLAN_MAC_MOVE, +}; + +struct bnx2x_vlan_mac_data { + /* Requested command: BNX2X_VLAN_MAC_XX */ + enum bnx2x_vlan_mac_cmd cmd; + /* + * used to contain the data related vlan_mac_flags bits from + * ramrod parameters. + */ + unsigned long vlan_mac_flags; + + /* Needed for MOVE command */ + struct bnx2x_vlan_mac_obj *target_obj; + + union bnx2x_classification_ramrod_data u; +}; + +/*************************** Exe Queue obj ************************************/ +union bnx2x_exe_queue_cmd_data { + struct bnx2x_vlan_mac_data vlan_mac; + + struct { + /* TODO */ + } mcast; +}; + +struct bnx2x_exeq_elem { + struct list_head link; + + /* Length of this element in the exe_chunk. */ + int cmd_len; + + union bnx2x_exe_queue_cmd_data cmd_data; +}; + +union bnx2x_qable_obj; + +union bnx2x_exeq_comp_elem { + union event_ring_elem *elem; +}; + +struct bnx2x_exe_queue_obj; + +typedef int (*exe_q_validate)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); + +/** + * @return positive is entry was optimized, 0 - if not, negative + * in case of an error. + */ +typedef int (*exe_q_optimize)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct bnx2x_exeq_elem *elem); +typedef int (*exe_q_execute)(struct bnx2x *bp, + union bnx2x_qable_obj *o, + struct list_head *exe_chunk, + unsigned long *ramrod_flags); +typedef struct bnx2x_exeq_elem * + (*exe_q_get)(struct bnx2x_exe_queue_obj *o, + struct bnx2x_exeq_elem *elem); + +struct bnx2x_exe_queue_obj { + /* + * Commands pending for an execution. + */ + struct list_head exe_queue; + + /* + * Commands pending for an completion. + */ + struct list_head pending_comp; + + spinlock_t lock; + + /* Maximum length of commands' list for one execution */ + int exe_chunk_len; + + union bnx2x_qable_obj *owner; + + /****** Virtual functions ******/ + /** + * Called before commands execution for commands that are really + * going to be executed (after 'optimize'). + * + * Must run under exe_queue->lock + */ + exe_q_validate validate; + + + /** + * This will try to cancel the current pending commands list + * considering the new command. + * + * Must run under exe_queue->lock + */ + exe_q_optimize optimize; + + /** + * Run the next commands chunk (owner specific). + */ + exe_q_execute execute; + + /** + * Return the exe_queue element containing the specific command + * if any. Otherwise return NULL. + */ + exe_q_get get; +}; +/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/ +/* + * Element in the VLAN_MAC registry list having all currenty configured + * rules. + */ +struct bnx2x_vlan_mac_registry_elem { + struct list_head link; + + /* + * Used to store the cam offset used for the mac/vlan/vlan-mac. + * Relevant for 57710 and 57711 only. VLANs and MACs share the + * same CAM for these chips. + */ + int cam_offset; + + /* Needed for DEL and RESTORE flows */ + unsigned long vlan_mac_flags; + + union bnx2x_classification_ramrod_data u; +}; + +/* Bits representing VLAN_MAC commands specific flags */ +enum { + BNX2X_UC_LIST_MAC, + BNX2X_ETH_MAC, + BNX2X_ISCSI_ETH_MAC, + BNX2X_NETQ_ETH_MAC, + BNX2X_DONT_CONSUME_CAM_CREDIT, + BNX2X_DONT_CONSUME_CAM_CREDIT_DEST, +}; + +struct bnx2x_vlan_mac_ramrod_params { + /* Object to run the command from */ + struct bnx2x_vlan_mac_obj *vlan_mac_obj; + + /* General command flags: COMP_WAIT, etc. */ + unsigned long ramrod_flags; + + /* Command specific configuration request */ + struct bnx2x_vlan_mac_data user_req; +}; + +struct bnx2x_vlan_mac_obj { + struct bnx2x_raw_obj raw; + + /* Bookkeeping list: will prevent the addition of already existing + * entries. + */ + struct list_head head; + + /* TODO: Add it's initialization in the init functions */ + struct bnx2x_exe_queue_obj exe_queue; + + /* MACs credit pool */ + struct bnx2x_credit_pool_obj *macs_pool; + + /* VLANs credit pool */ + struct bnx2x_credit_pool_obj *vlans_pool; + + /* RAMROD command to be used */ + int ramrod_cmd; + + /** + * Checks if ADD-ramrod with the given params may be performed. + * + * @return zero if the element may be added + */ + + int (*check_add)(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + struct bnx2x_vlan_mac_registry_elem * + (*check_del)(struct bnx2x_vlan_mac_obj *o, + union bnx2x_classification_ramrod_data *data); + + /** + * Checks if DEL-ramrod with the given params may be performed. + * + * @return true if the element may be deleted + */ + bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o, + struct bnx2x_vlan_mac_obj *dst_o, + union bnx2x_classification_ramrod_data *data); + + /** + * Update the relevant credit object(s) (consume/return + * correspondingly). + */ + bool (*get_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*put_credit)(struct bnx2x_vlan_mac_obj *o); + bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset); + bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset); + + /** + * Configures one rule in the ramrod data buffer. + */ + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + struct bnx2x_exeq_elem *elem, int rule_idx, + int cam_offset); + + /** + * Delete all configured elements having the given + * vlan_mac_flags specification. Assumes no pending for + * execution commands. Will schedule all all currently + * configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags + * specification for deletion and will use the given + * ramrod_flags for the last DEL operation. + * + * @param bp + * @param o + * @param ramrod_flags RAMROD_XX flags + * + * @return 0 if the last operation has completed successfully + * and there are no more elements left, positive value + * if there are pending for completion commands, + * negative value in case of failure. + */ + int (*delete_all)(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *o, + unsigned long *vlan_mac_flags, + unsigned long *ramrod_flags); + + /** + * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously + * configured elements list. + * + * @param bp + * @param p Command parameters (RAMROD_COMP_WAIT bit in + * ramrod_flags is only taken into an account) + * @param ppos a pointer to the cooky that should be given back in the + * next call to make function handle the next element. If + * *ppos is set to NULL it will restart the iterator. + * If returned *ppos == NULL this means that the last + * element has been handled. + * + * @return int + */ + int (*restore)(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_registry_elem **ppos); + + /** + * Should be called on a completion arival. + * + * @param bp + * @param o + * @param cqe Completion element we are handling + * @param ramrod_flags if RAMROD_CONT is set the next bulk of + * pending commands will be executed. + * RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE + * may also be set if needed. + * + * @return 0 if there are neither pending nor waiting for + * completion commands. Positive value if there are + * pending for execution or for completion commands. + * Negative value in case of an error (including an + * error in the cqe). + */ + int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, + union event_ring_elem *cqe, + unsigned long *ramrod_flags); + + /** + * Wait for completion of all commands. Don't schedule new ones, + * just wait. It assumes that the completion code will schedule + * for new commands. + */ + int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o); +}; + +/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ + +/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in + * a bnx2x_rx_mode_ramrod_params. + */ +enum { + BNX2X_RX_MODE_FCOE_ETH, + BNX2X_RX_MODE_ISCSI_ETH, +}; + +enum { + BNX2X_ACCEPT_UNICAST, + BNX2X_ACCEPT_MULTICAST, + BNX2X_ACCEPT_ALL_UNICAST, + BNX2X_ACCEPT_ALL_MULTICAST, + BNX2X_ACCEPT_BROADCAST, + BNX2X_ACCEPT_UNMATCHED, + BNX2X_ACCEPT_ANY_VLAN +}; + +struct bnx2x_rx_mode_ramrod_params { + struct bnx2x_rx_mode_obj *rx_mode_obj; + unsigned long *pstate; + int state; + u8 cl_id; + u32 cid; + u8 func_id; + unsigned long ramrod_flags; + unsigned long rx_mode_flags; + + /* + * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to + * a tstorm_eth_mac_filter_config (e1x). + */ + void *rdata; + dma_addr_t rdata_mapping; + + /* Rx mode settings */ + unsigned long rx_accept_flags; + + /* internal switching settings */ + unsigned long tx_accept_flags; +}; + +struct bnx2x_rx_mode_obj { + int (*config_rx_mode)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); +}; + +/********************** Set multicast group ***********************************/ + +struct bnx2x_mcast_list_elem { + struct list_head link; + u8 *mac; +}; + +union bnx2x_mcast_config_data { + u8 *mac; + u8 bin; /* used in a RESTORE flow */ +}; + +struct bnx2x_mcast_ramrod_params { + struct bnx2x_mcast_obj *mcast_obj; + + /* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */ + unsigned long ramrod_flags; + + struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */ + /** TODO: + * - rename it to macs_num. + * - Add a new command type for handling pending commands + * (remove "zero semantics"). + * + * Length of mcast_list. If zero and ADD_CONT command - post + * pending commands. + */ + int mcast_list_len; +}; + +enum { + BNX2X_MCAST_CMD_ADD, + BNX2X_MCAST_CMD_CONT, + BNX2X_MCAST_CMD_DEL, + BNX2X_MCAST_CMD_RESTORE, +}; + +struct bnx2x_mcast_obj { + struct bnx2x_raw_obj raw; + + union { + struct { + #define BNX2X_MCAST_BINS_NUM 256 + #define BNX2X_MCAST_VEC_SZ (BNX2X_MCAST_BINS_NUM / 64) + u64 vec[BNX2X_MCAST_VEC_SZ]; + + /** Number of BINs to clear. Should be updated + * immediately when a command arrives in order to + * properly create DEL commands. + */ + int num_bins_set; + } aprox_match; + + struct { + struct list_head macs; + int num_macs_set; + } exact_match; + } registry; + + /* Pending commands */ + struct list_head pending_cmds_head; + + /* A state that is set in raw.pstate, when there are pending commands */ + int sched_state; + + /* Maximal number of mcast MACs configured in one command */ + int max_cmd_len; + + /* Total number of currently pending MACs to configure: both + * in the pending commands list and in the current command. + */ + int total_pending_num; + + u8 engine_id; + + /** + * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above) + */ + int (*config_mcast)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + /** + * Fills the ramrod data during the RESTORE flow. + * + * @param bp + * @param o + * @param start_idx Registry index to start from + * @param rdata_idx Index in the ramrod data to start from + * + * @return -1 if we handled the whole registry or index of the last + * handled registry element. + */ + int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + int start_bin, int *rdata_idx); + + int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + void (*set_one_rule)(struct bnx2x *bp, + struct bnx2x_mcast_obj *o, int idx, + union bnx2x_mcast_config_data *cfg_data, int cmd); + + /** Checks if there are more mcast MACs to be set or a previous + * command is still pending. + */ + bool (*check_pending)(struct bnx2x_mcast_obj *o); + + /** + * Set/Clear/Check SCHEDULED state of the object + */ + void (*set_sched)(struct bnx2x_mcast_obj *o); + void (*clear_sched)(struct bnx2x_mcast_obj *o); + bool (*check_sched)(struct bnx2x_mcast_obj *o); + + /* Wait until all pending commands complete */ + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o); + + /** + * Handle the internal object counters needed for proper + * commands handling. Checks that the provided parameters are + * feasible. + */ + int (*validate)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + + /** + * Restore the values of internal counters in case of a failure. + */ + void (*revert)(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, + int old_num_bins); + + int (*get_registry_size)(struct bnx2x_mcast_obj *o); + void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n); +}; + +/*************************** Credit handling **********************************/ +struct bnx2x_credit_pool_obj { + + /* Current amount of credit in the pool */ + atomic_t credit; + + /* Maximum allowed credit. put() will check against it. */ + int pool_sz; + + /* + * Allocate a pool table statically. + * + * Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272) + * + * The set bit in the table will mean that the entry is available. + */ +#define BNX2X_POOL_VEC_SIZE (MAX_MAC_CREDIT_E2 / 64) + u64 pool_mirror[BNX2X_POOL_VEC_SIZE]; + + /* Base pool offset (initialized differently */ + int base_pool_offset; + + /** + * Get the next free pool entry. + * + * @return true if there was a free entry in the pool + */ + bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry); + + /** + * Return the entry back to the pool. + * + * @return true if entry is legal and has been successfully + * returned to the pool. + */ + bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry); + + /** + * Get the requested amount of credit from the pool. + * + * @param cnt Amount of requested credit + * @return true if the operation is successful + */ + bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Returns the credit to the pool. + * + * @param cnt Amount of credit to return + * @return true if the operation is successful + */ + bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt); + + /** + * Reads the current amount of credit. + */ + int (*check)(struct bnx2x_credit_pool_obj *o); +}; + +/*************************** RSS configuration ********************************/ +enum { + /* RSS_MODE bits are mutually exclusive */ + BNX2X_RSS_MODE_DISABLED, + BNX2X_RSS_MODE_REGULAR, + BNX2X_RSS_MODE_VLAN_PRI, + BNX2X_RSS_MODE_E1HOV_PRI, + BNX2X_RSS_MODE_IP_DSCP, + + BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ + + BNX2X_RSS_IPV4, + BNX2X_RSS_IPV4_TCP, + BNX2X_RSS_IPV6, + BNX2X_RSS_IPV6_TCP, +}; + +struct bnx2x_config_rss_params { + struct bnx2x_rss_config_obj *rss_obj; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* BNX2X_RSS_X bits */ + unsigned long rss_flags; + + /* Number hash bits to take into an account */ + u8 rss_result_mask; + + /* Indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + /* RSS hash values */ + u32 rss_key[10]; + + /* valid only iff BNX2X_RSS_UPDATE_TOE is set */ + u16 toe_rss_bitmap; +}; + +struct bnx2x_rss_config_obj { + struct bnx2x_raw_obj raw; + + /* RSS engine to use */ + u8 engine_id; + + /* Last configured indirection table */ + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + + int (*config_rss)(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); +}; + +/*********************** Queue state update ***********************************/ + +/* UPDATE command options */ +enum { + BNX2X_Q_UPDATE_IN_VLAN_REM, + BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_OUT_VLAN_REM, + BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_ANTI_SPOOF, + BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, + BNX2X_Q_UPDATE_ACTIVATE, + BNX2X_Q_UPDATE_ACTIVATE_CHNG, + BNX2X_Q_UPDATE_DEF_VLAN_EN, + BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, + BNX2X_Q_UPDATE_SILENT_VLAN_REM +}; + +/* Allowed Queue states */ +enum bnx2x_q_state { + BNX2X_Q_STATE_RESET, + BNX2X_Q_STATE_INITIALIZED, + BNX2X_Q_STATE_ACTIVE, + BNX2X_Q_STATE_MULTI_COS, + BNX2X_Q_STATE_MCOS_TERMINATED, + BNX2X_Q_STATE_INACTIVE, + BNX2X_Q_STATE_STOPPED, + BNX2X_Q_STATE_TERMINATED, + BNX2X_Q_STATE_FLRED, + BNX2X_Q_STATE_MAX, +}; + +/* Allowed commands */ +enum bnx2x_queue_cmd { + BNX2X_Q_CMD_INIT, + BNX2X_Q_CMD_SETUP, + BNX2X_Q_CMD_SETUP_TX_ONLY, + BNX2X_Q_CMD_DEACTIVATE, + BNX2X_Q_CMD_ACTIVATE, + BNX2X_Q_CMD_UPDATE, + BNX2X_Q_CMD_UPDATE_TPA, + BNX2X_Q_CMD_HALT, + BNX2X_Q_CMD_CFC_DEL, + BNX2X_Q_CMD_TERMINATE, + BNX2X_Q_CMD_EMPTY, + BNX2X_Q_CMD_MAX, +}; + +/* queue SETUP + INIT flags */ +enum { + BNX2X_Q_FLG_TPA, + BNX2X_Q_FLG_TPA_IPV6, + BNX2X_Q_FLG_STATS, + BNX2X_Q_FLG_ZERO_STATS, + BNX2X_Q_FLG_ACTIVE, + BNX2X_Q_FLG_OV, + BNX2X_Q_FLG_VLAN, + BNX2X_Q_FLG_COS, + BNX2X_Q_FLG_HC, + BNX2X_Q_FLG_HC_EN, + BNX2X_Q_FLG_DHC, + BNX2X_Q_FLG_FCOE, + BNX2X_Q_FLG_LEADING_RSS, + BNX2X_Q_FLG_MCAST, + BNX2X_Q_FLG_DEF_VLAN, + BNX2X_Q_FLG_TX_SWITCH, + BNX2X_Q_FLG_TX_SEC, + BNX2X_Q_FLG_ANTI_SPOOF, + BNX2X_Q_FLG_SILENT_VLAN_REM +}; + +/* Queue type options: queue type may be a compination of below. */ +enum bnx2x_q_type { + /** TODO: Consider moving both these flags into the init() + * ramrod params. + */ + BNX2X_Q_TYPE_HAS_RX, + BNX2X_Q_TYPE_HAS_TX, +}; + +#define BNX2X_PRIMARY_CID_INDEX 0 +#define BNX2X_MULTI_TX_COS_E1X 1 +#define BNX2X_MULTI_TX_COS_E2_E3A0 2 +#define BNX2X_MULTI_TX_COS_E3B0 3 +#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0 + + +struct bnx2x_queue_init_params { + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } tx; + + struct { + unsigned long flags; + u16 hc_rate; + u8 fw_sb_id; + u8 sb_cq_index; + } rx; + + /* CID context in the host memory */ + struct eth_context *cxts[BNX2X_MULTI_TX_COS]; + + /* maximum number of cos supported by hardware */ + u8 max_cos; +}; + +struct bnx2x_queue_terminate_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_cfc_del_params { + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_update_params { + unsigned long update_flags; /* BNX2X_Q_UPDATE_XX bits */ + u16 def_vlan; + u16 silent_removal_value; + u16 silent_removal_mask; +/* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct rxq_pause_params { + u16 bd_th_lo; + u16 bd_th_hi; + u16 rcq_th_lo; + u16 rcq_th_hi; + u16 sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */ + u16 sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */ + u16 pri_map; +}; + +/* general */ +struct bnx2x_general_setup_params { + /* valid iff BNX2X_Q_FLG_STATS */ + u8 stat_id; + + u8 spcl_id; + u16 mtu; + u8 cos; +}; + +struct bnx2x_rxq_setup_params { + /* dma */ + dma_addr_t dscr_map; + dma_addr_t sge_map; + dma_addr_t rcq_map; + dma_addr_t rcq_np_map; + + u16 drop_flags; + u16 buf_sz; + u8 fw_sb_id; + u8 cl_qzone_id; + + /* valid iff BNX2X_Q_FLG_TPA */ + u16 tpa_agg_sz; + u16 sge_buf_sz; + u8 max_sges_pkt; + u8 max_tpa_queues; + u8 rss_engine_id; + + u8 cache_line_log; + + u8 sb_cq_index; + + /* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */ + u16 silent_removal_value; + u16 silent_removal_mask; +}; + +struct bnx2x_txq_setup_params { + /* dma */ + dma_addr_t dscr_map; + + u8 fw_sb_id; + u8 sb_cq_index; + u8 cos; /* valid iff BNX2X_Q_FLG_COS */ + u16 traffic_type; + /* equals to the leading rss client id, used for TX classification*/ + u8 tss_leading_cl_id; + + /* valid iff BNX2X_Q_FLG_DEF_VLAN */ + u16 default_vlan; +}; + +struct bnx2x_queue_setup_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + struct bnx2x_rxq_setup_params rxq_params; + struct rxq_pause_params pause_params; + unsigned long flags; +}; + +struct bnx2x_queue_setup_tx_only_params { + struct bnx2x_general_setup_params gen_params; + struct bnx2x_txq_setup_params txq_params; + unsigned long flags; + /* index within the tx_only cids of this queue object */ + u8 cid_index; +}; + +struct bnx2x_queue_state_params { + struct bnx2x_queue_sp_obj *q_obj; + + /* Current command */ + enum bnx2x_queue_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_queue_update_params update; + struct bnx2x_queue_setup_params setup; + struct bnx2x_queue_init_params init; + struct bnx2x_queue_setup_tx_only_params tx_only; + struct bnx2x_queue_terminate_params terminate; + struct bnx2x_queue_cfc_del_params cfc_del; + } params; +}; + +struct bnx2x_queue_sp_obj { + u32 cids[BNX2X_MULTI_TX_COS]; + u8 cl_id; + u8 func_id; + + /* + * number of traffic classes supported by queue. + * The primary connection of the queue suppotrs the first traffic + * class. Any further traffic class is suppoted by a tx-only + * connection. + * + * Therefore max_cos is also a number of valid entries in the cids + * array. + */ + u8 max_cos; + u8 num_tx_only, next_tx_only; + + enum bnx2x_q_state state, next_state; + + /* bits from enum bnx2x_q_type */ + unsigned long type; + + /* BNX2X_Q_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convinient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + + /** + * Sets the pending bit according to the requested transition. + */ + int (*set_pending)(struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + struct bnx2x_queue_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd); + + int (*wait_comp)(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *o, + enum bnx2x_queue_cmd cmd); +}; + +/********************** Function state update *********************************/ +/* Allowed Function states */ +enum bnx2x_func_state { + BNX2X_F_STATE_RESET, + BNX2X_F_STATE_INITIALIZED, + BNX2X_F_STATE_STARTED, + BNX2X_F_STATE_TX_STOPPED, + BNX2X_F_STATE_MAX, +}; + +/* Allowed Function commands */ +enum bnx2x_func_cmd { + BNX2X_F_CMD_HW_INIT, + BNX2X_F_CMD_START, + BNX2X_F_CMD_STOP, + BNX2X_F_CMD_HW_RESET, + BNX2X_F_CMD_TX_STOP, + BNX2X_F_CMD_TX_START, + BNX2X_F_CMD_MAX, +}; + +struct bnx2x_func_hw_init_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 load_phase; +}; + +struct bnx2x_func_hw_reset_params { + /* A load phase returned by MCP. + * + * May be: + * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP + * FW_MSG_CODE_DRV_LOAD_COMMON + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + */ + u32 reset_phase; +}; + +struct bnx2x_func_start_params { + /* Multi Function mode: + * - Single Function + * - Switch Dependent + * - Switch Independent + */ + u16 mf_mode; + + /* Switch Dependent mode outer VLAN tag */ + u16 sd_vlan_tag; + + /* Function cos mode */ + u8 network_cos_mode; +}; + +struct bnx2x_func_tx_start_params { + struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; + u8 dcb_enabled; + u8 dcb_version; + u8 dont_add_pri_0_en; +}; + +struct bnx2x_func_state_params { + struct bnx2x_func_sp_obj *f_obj; + + /* Current command */ + enum bnx2x_func_cmd cmd; + + /* may have RAMROD_COMP_WAIT set only */ + unsigned long ramrod_flags; + + /* Params according to the current command */ + union { + struct bnx2x_func_hw_init_params hw_init; + struct bnx2x_func_hw_reset_params hw_reset; + struct bnx2x_func_start_params start; + struct bnx2x_func_tx_start_params tx_start; + } params; +}; + +struct bnx2x_func_sp_drv_ops { + /* Init tool + runtime initialization: + * - Common Chip + * - Common (per Path) + * - Port + * - Function phases + */ + int (*init_hw_cmn_chip)(struct bnx2x *bp); + int (*init_hw_cmn)(struct bnx2x *bp); + int (*init_hw_port)(struct bnx2x *bp); + int (*init_hw_func)(struct bnx2x *bp); + + /* Reset Function HW: Common, Port, Function phases. */ + void (*reset_hw_cmn)(struct bnx2x *bp); + void (*reset_hw_port)(struct bnx2x *bp); + void (*reset_hw_func)(struct bnx2x *bp); + + /* Init/Free GUNZIP resources */ + int (*gunzip_init)(struct bnx2x *bp); + void (*gunzip_end)(struct bnx2x *bp); + + /* Prepare/Release FW resources */ + int (*init_fw)(struct bnx2x *bp); + void (*release_fw)(struct bnx2x *bp); +}; + +struct bnx2x_func_sp_obj { + enum bnx2x_func_state state, next_state; + + /* BNX2X_FUNC_CMD_XX bits. This object implements "one + * pending" paradigm but for debug and tracing purposes it's + * more convinient to have different bits for different + * commands. + */ + unsigned long pending; + + /* Buffer to use as a ramrod data and its mapping */ + void *rdata; + dma_addr_t rdata_mapping; + + /* this mutex validates that when pending flag is taken, the next + * ramrod to be sent will be the one set the pending bit + */ + struct mutex one_pending_mutex; + + /* Driver interface */ + struct bnx2x_func_sp_drv_ops *drv; + + /** + * Performs one state change according to the given parameters. + * + * @return 0 in case of success and negative value otherwise. + */ + int (*send_cmd)(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + + /** + * Checks that the requested state transition is legal. + */ + int (*check_transition)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + struct bnx2x_func_state_params *params); + + /** + * Completes the pending command. + */ + int (*complete_cmd)(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); + + int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o, + enum bnx2x_func_cmd cmd); +}; + +/********************** Interfaces ********************************************/ +/* Queueable objects set */ +union bnx2x_qable_obj { + struct bnx2x_vlan_mac_obj vlan_mac; +}; +/************** Function state update *********/ +void bnx2x_init_func_obj(struct bnx2x *bp, + struct bnx2x_func_sp_obj *obj, + void *rdata, dma_addr_t rdata_mapping, + struct bnx2x_func_sp_drv_ops *drv_iface); + +int bnx2x_func_state_change(struct bnx2x *bp, + struct bnx2x_func_state_params *params); + +enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp, + struct bnx2x_func_sp_obj *o); +/******************* Queue State **************/ +void bnx2x_init_queue_obj(struct bnx2x *bp, + struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids, + u8 cid_cnt, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, unsigned long type); + +int bnx2x_queue_state_change(struct bnx2x *bp, + struct bnx2x_queue_state_params *params); + +/********************* VLAN-MAC ****************/ +void bnx2x_init_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool); + +void bnx2x_init_vlan_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *vlans_pool); + +void bnx2x_init_vlan_mac_obj(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac_obj, + u8 cl_id, u32 cid, u8 func_id, void *rdata, + dma_addr_t rdata_mapping, int state, + unsigned long *pstate, bnx2x_obj_type type, + struct bnx2x_credit_pool_obj *macs_pool, + struct bnx2x_credit_pool_obj *vlans_pool); + +int bnx2x_config_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p); + +int bnx2x_vlan_mac_move(struct bnx2x *bp, + struct bnx2x_vlan_mac_ramrod_params *p, + struct bnx2x_vlan_mac_obj *dest_o); + +/********************* RX MODE ****************/ + +void bnx2x_init_rx_mode_obj(struct bnx2x *bp, + struct bnx2x_rx_mode_obj *o); + +/** + * Send and RX_MODE ramrod according to the provided parameters. + * + * @param bp + * @param p Command parameters + * + * @return 0 - if operation was successfull and there is no pending completions, + * positive number - if there are pending completions, + * negative - if there were errors + */ +int bnx2x_config_rx_mode(struct bnx2x *bp, + struct bnx2x_rx_mode_ramrod_params *p); + +/****************** MULTICASTS ****************/ + +void bnx2x_init_mcast_obj(struct bnx2x *bp, + struct bnx2x_mcast_obj *mcast_obj, + u8 mcast_cl_id, u32 mcast_cid, u8 func_id, + u8 engine_id, void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * Configure multicast MACs list. May configure a new list + * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up + * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current + * configuration, continue to execute the pending commands + * (BNX2X_MCAST_CMD_CONT). + * + * If previous command is still pending or if number of MACs to + * configure is more that maximum number of MACs in one command, + * the current command will be enqueued to the tail of the + * pending commands list. + * + * @param bp + * @param p + * @param command to execute: BNX2X_MCAST_CMD_X + * + * @return 0 is operation was sucessfull and there are no pending completions, + * negative if there were errors, positive if there are pending + * completions. + */ +int bnx2x_config_mcast(struct bnx2x *bp, + struct bnx2x_mcast_ramrod_params *p, int cmd); + +/****************** CREDIT POOL ****************/ +void bnx2x_init_mac_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); +void bnx2x_init_vlan_credit_pool(struct bnx2x *bp, + struct bnx2x_credit_pool_obj *p, u8 func_id, + u8 func_num); + + +/****************** RSS CONFIGURATION ****************/ +void bnx2x_init_rss_config_obj(struct bnx2x *bp, + struct bnx2x_rss_config_obj *rss_obj, + u8 cl_id, u32 cid, u8 func_id, u8 engine_id, + void *rdata, dma_addr_t rdata_mapping, + int state, unsigned long *pstate, + bnx2x_obj_type type); + +/** + * Updates RSS configuration according to provided parameters. + * + * @param bp + * @param p + * + * @return 0 in case of success + */ +int bnx2x_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *p); + +/** + * Return the current ind_table configuration. + * + * @param bp + * @param ind_table buffer to fill with the current indirection + * table content. Should be at least + * T_ETH_INDIRECTION_TABLE_SIZE bytes long. + */ +void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, + u8 *ind_table); + +#endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index e535bfa0894..771f6803b23 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c @@ -14,120 +14,11 @@ * Statistics and Link management by Yitchak Gertner * */ -#include "bnx2x_cmn.h" #include "bnx2x_stats.h" +#include "bnx2x_cmn.h" -/* Statistics */ -/**************************************************************************** -* Macros -****************************************************************************/ - -/* sum[hi:lo] += add[hi:lo] */ -#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ - do { \ - s_lo += a_lo; \ - s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ - } while (0) - -/* difference = minuend - subtrahend */ -#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ - do { \ - if (m_lo < s_lo) { \ - /* underflow */ \ - d_hi = m_hi - s_hi; \ - if (d_hi > 0) { \ - /* we can 'loan' 1 */ \ - d_hi--; \ - d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ - } else { \ - /* m_hi <= s_hi */ \ - d_hi = 0; \ - d_lo = 0; \ - } \ - } else { \ - /* m_lo >= s_lo */ \ - if (m_hi < s_hi) { \ - d_hi = 0; \ - d_lo = 0; \ - } else { \ - /* m_hi >= s_hi */ \ - d_hi = m_hi - s_hi; \ - d_lo = m_lo - s_lo; \ - } \ - } \ - } while (0) - -#define UPDATE_STAT64(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ - diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ - pstats->mac_stx[0].t##_hi = new->s##_hi; \ - pstats->mac_stx[0].t##_lo = new->s##_lo; \ - ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ - pstats->mac_stx[1].t##_lo, diff.lo); \ - } while (0) - -#define UPDATE_STAT64_NIG(s, t) \ - do { \ - DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ - diff.lo, new->s##_lo, old->s##_lo); \ - ADD_64(estats->t##_hi, diff.hi, \ - estats->t##_lo, diff.lo); \ - } while (0) - -/* sum[hi:lo] += add */ -#define ADD_EXTEND_64(s_hi, s_lo, a) \ - do { \ - s_lo += a; \ - s_hi += (s_lo < a) ? 1 : 0; \ - } while (0) - -#define UPDATE_EXTEND_STAT(s) \ - do { \ - ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ - pstats->mac_stx[1].s##_lo, \ - new->s); \ - } while (0) - -#define UPDATE_EXTEND_TSTAT(s, t) \ - do { \ - diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ - old_tclient->s = tclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -#define UPDATE_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - old_uclient->s = uclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -#define UPDATE_EXTEND_XSTAT(s, t) \ - do { \ - diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ - old_xclient->s = xclient->s; \ - ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) - -/* minuend -= subtrahend */ -#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ - do { \ - DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ - } while (0) - -/* minuend[hi:lo] -= subtrahend */ -#define SUB_EXTEND_64(m_hi, m_lo, s) \ - do { \ - SUB_64(m_hi, 0, m_lo, s); \ - } while (0) - -#define SUB_EXTEND_USTAT(s, t) \ - do { \ - diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ - SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ - } while (0) +/* Statistics */ /* * General service functions @@ -149,12 +40,16 @@ static inline long bnx2x_hilo(u32 *hiref) * Init service functions */ - +/* Post the next statistics ramrod. Protect it with the spin in + * order to ensure the strict order between statistics ramrods + * (each ramrod has a sequence number passed in a + * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be + * sent in order). + */ static void bnx2x_storm_stats_post(struct bnx2x *bp) { if (!bp->stats_pending) { - struct common_query_ramrod_data ramrod_data = {0}; - int i, rc; + int rc; spin_lock_bh(&bp->stats_lock); @@ -163,14 +58,19 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) return; } - ramrod_data.drv_counter = bp->stats_counter++; - ramrod_data.collect_port = bp->port.pmf ? 1 : 0; - for_each_eth_queue(bp, i) - ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); + bp->fw_stats_req->hdr.drv_stats_counter = + cpu_to_le16(bp->stats_counter++); + DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n", + bp->fw_stats_req->hdr.drv_stats_counter); + + + + /* send FW stats ramrod */ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, - ((u32 *)&ramrod_data)[1], - ((u32 *)&ramrod_data)[0], 1); + U64_HI(bp->fw_stats_req_mapping), + U64_LO(bp->fw_stats_req_mapping), + NONE_CONNECTION_TYPE); if (rc == 0) bp->stats_pending = 1; @@ -230,7 +130,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp) break; } cnt--; - msleep(1); + usleep_range(1000, 1000); } return 1; } @@ -338,69 +238,8 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, true, DMAE_COMP_GRC); - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) { - - mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM); - - /* BIGMAC_REGISTER_TX_STAT_GTPKT .. - BIGMAC_REGISTER_TX_STAT_GTBYT */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - if (CHIP_IS_E1x(bp)) { - dmae->src_addr_lo = (mac_addr + - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; - } else { - dmae->src_addr_lo = (mac_addr + - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; - dmae->len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; - } - - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - /* BIGMAC_REGISTER_RX_STAT_GR64 .. - BIGMAC_REGISTER_RX_STAT_GRIPJ */ - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_hi = 0; - if (CHIP_IS_E1x(bp)) { - dmae->src_addr_lo = (mac_addr + - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - dmae->dst_addr_lo = - U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac1_stats, rx_stat_gr64_lo)); - dmae->dst_addr_hi = - U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac1_stats, rx_stat_gr64_lo)); - dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; - } else { - dmae->src_addr_lo = - (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; - dmae->dst_addr_lo = - U64_LO(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac2_stats, rx_stat_gr64_lo)); - dmae->dst_addr_hi = - U64_HI(bnx2x_sp_mapping(bp, mac_stats) + - offsetof(struct bmac2_stats, rx_stat_gr64_lo)); - dmae->len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - - BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; - } - - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { - + /* EMAC is special */ + if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ @@ -445,46 +284,122 @@ static void bnx2x_port_stats_init(struct bnx2x *bp) dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; + } else { + u32 tx_src_addr_lo, rx_src_addr_lo; + u16 rx_len, tx_len; + + /* configure the params according to MAC type */ + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: + mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : + NIG_REG_INGRESS_BMAC0_MEM); + + /* BIGMAC_REGISTER_TX_STAT_GTPKT .. + BIGMAC_REGISTER_TX_STAT_GTBYT */ + if (CHIP_IS_E1x(bp)) { + tx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; + } else { + tx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; + rx_src_addr_lo = (mac_addr + + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; + } + break; + + case MAC_TYPE_UMAC: /* handled by MSTAT */ + case MAC_TYPE_XMAC: /* handled by MSTAT */ + default: + mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; + tx_src_addr_lo = (mac_addr + + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; + rx_src_addr_lo = (mac_addr + + MSTAT_REG_RX_STAT_GR64_LO) >> 2; + tx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_tx) >> 2; + rx_len = sizeof(bp->slowpath-> + mac_stats.mstat_stats.stats_rx) >> 2; + break; + } + + /* TX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = tx_src_addr_lo; + dmae->src_addr_hi = 0; + dmae->len = tx_len; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + /* RX stats */ + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_hi = 0; + dmae->src_addr_lo = rx_src_addr_lo; + dmae->dst_addr_lo = + U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->dst_addr_hi = + U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); + dmae->len = rx_len; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; } /* NIG */ + if (!CHIP_IS_E3(bp)) { + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : + NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt0_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); + dmae->opcode = opcode; + dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : + NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; + dmae->src_addr_hi = 0; + dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + + offsetof(struct nig_stats, egress_mac_pkt1_lo)); + dmae->len = (2*sizeof(u32)) >> 2; + dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; + dmae->comp_addr_hi = 0; + dmae->comp_val = 1; + } + dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; + dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, + true, DMAE_COMP_PCI); dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : NIG_REG_STAT0_BRB_DISCARD) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = opcode; - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : - NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt0_lo)); - dmae->len = (2*sizeof(u32)) >> 2; - dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; - dmae->comp_addr_hi = 0; - dmae->comp_val = 1; - - dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); - dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, - true, DMAE_COMP_PCI); - dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : - NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; - dmae->src_addr_hi = 0; - dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + - offsetof(struct nig_stats, egress_mac_pkt1_lo)); - dmae->len = (2*sizeof(u32)) >> 2; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; @@ -566,7 +481,8 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); + UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); @@ -580,13 +496,13 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); - UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); - UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); - UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); - UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); } else { struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); @@ -600,7 +516,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); - UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); + UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); @@ -614,19 +530,96 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp) tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); - UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047); - UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095); - UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216); - UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383); + UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); + UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); + UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); + UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); - UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl); + UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); } estats->pause_frames_received_hi = - pstats->mac_stx[1].rx_stat_bmac_xpf_hi; + pstats->mac_stx[1].rx_stat_mac_xpf_hi; estats->pause_frames_received_lo = - pstats->mac_stx[1].rx_stat_bmac_xpf_lo; + pstats->mac_stx[1].rx_stat_mac_xpf_lo; + + estats->pause_frames_sent_hi = + pstats->mac_stx[1].tx_stat_outxoffsent_hi; + estats->pause_frames_sent_lo = + pstats->mac_stx[1].tx_stat_outxoffsent_lo; +} + +static void bnx2x_mstat_stats_update(struct bnx2x *bp) +{ + struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); + struct bnx2x_eth_stats *estats = &bp->eth_stats; + + struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); + + ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); + ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); + ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); + ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); + ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); + ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); + ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); + ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); + + + ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); + ADD_STAT64(stats_tx.tx_gt127, + tx_stat_etherstatspkts65octetsto127octets); + ADD_STAT64(stats_tx.tx_gt255, + tx_stat_etherstatspkts128octetsto255octets); + ADD_STAT64(stats_tx.tx_gt511, + tx_stat_etherstatspkts256octetsto511octets); + ADD_STAT64(stats_tx.tx_gt1023, + tx_stat_etherstatspkts512octetsto1023octets); + ADD_STAT64(stats_tx.tx_gt1518, + tx_stat_etherstatspkts1024octetsto1522octets); + ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); + + ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); + ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); + ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); + + ADD_STAT64(stats_tx.tx_gterr, + tx_stat_dot3statsinternalmactransmiterrors); + ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); + + ADD_64(estats->etherstatspkts1024octetsto1522octets_hi, + new->stats_tx.tx_gt1518_hi, + estats->etherstatspkts1024octetsto1522octets_lo, + new->stats_tx.tx_gt1518_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt2047_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt2047_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt4095_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt4095_lo); + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt9216_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt9216_lo); + + + ADD_64(estats->etherstatspktsover1522octets_hi, + new->stats_tx.tx_gt16383_hi, + estats->etherstatspktsover1522octets_lo, + new->stats_tx.tx_gt16383_lo); + + estats->pause_frames_received_hi = + pstats->mac_stx[1].rx_stat_mac_xpf_hi; + estats->pause_frames_received_lo = + pstats->mac_stx[1].rx_stat_mac_xpf_lo; estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; @@ -702,15 +695,26 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) u32 hi; } diff; - if (bp->link_vars.mac_type == MAC_TYPE_BMAC) + switch (bp->link_vars.mac_type) { + case MAC_TYPE_BMAC: bnx2x_bmac_stats_update(bp); + break; - else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) + case MAC_TYPE_EMAC: bnx2x_emac_stats_update(bp); + break; + + case MAC_TYPE_UMAC: + case MAC_TYPE_XMAC: + bnx2x_mstat_stats_update(bp); + break; - else { /* unreached */ + case MAC_TYPE_NONE: /* unreached */ BNX2X_ERR("stats updated by DMAE but no MAC active\n"); return -1; + + default: /* unreached */ + BNX2X_ERR("Unknown MAC type\n"); } ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, @@ -718,9 +722,12 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, new->brb_truncate - old->brb_truncate); - UPDATE_STAT64_NIG(egress_mac_pkt0, + if (!CHIP_IS_E3(bp)) { + UPDATE_STAT64_NIG(egress_mac_pkt0, etherstatspkts1024octetsto1522octets); - UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); + UPDATE_STAT64_NIG(egress_mac_pkt1, + etherstatspktsover1522octets); + } memcpy(old, new, sizeof(struct nig_stats)); @@ -746,11 +753,13 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp) static int bnx2x_storm_stats_update(struct bnx2x *bp) { - struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); struct tstorm_per_port_stats *tport = - &stats->tstorm_common.port_statistics; + &bp->fw_stats_data->port.tstorm_port_statistics; + struct tstorm_per_pf_stats *tfunc = + &bp->fw_stats_data->pf.tstorm_pf_statistics; struct host_func_stats *fstats = bnx2x_sp(bp, func_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; + struct stats_counter *counters = &bp->fw_stats_data->storm_counters; int i; u16 cur_stats_counter; @@ -761,6 +770,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) cur_stats_counter = bp->stats_counter - 1; spin_unlock_bh(&bp->stats_lock); + /* are storm stats valid? */ + if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by xstorm" + " xstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->xstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by ustorm" + " ustorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->ustats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by cstorm" + " cstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->cstats_counter), bp->stats_counter); + return -EAGAIN; + } + + if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { + DP(BNX2X_MSG_STATS, "stats not updated by tstorm" + " tstorm counter (0x%x) != stats_counter (0x%x)\n", + le16_to_cpu(counters->tstats_counter), bp->stats_counter); + return -EAGAIN; + } + memcpy(&(fstats->total_bytes_received_hi), &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi), sizeof(struct host_func_stats) - 2*sizeof(u32)); @@ -770,94 +808,84 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) estats->etherstatsoverrsizepkts_lo = 0; estats->no_buff_discard_hi = 0; estats->no_buff_discard_lo = 0; + estats->total_tpa_aggregations_hi = 0; + estats->total_tpa_aggregations_lo = 0; + estats->total_tpa_aggregated_frames_hi = 0; + estats->total_tpa_aggregated_frames_lo = 0; + estats->total_tpa_bytes_hi = 0; + estats->total_tpa_bytes_lo = 0; for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - int cl_id = fp->cl_id; - struct tstorm_per_client_stats *tclient = - &stats->tstorm_common.client_statistics[cl_id]; - struct tstorm_per_client_stats *old_tclient = &fp->old_tclient; - struct ustorm_per_client_stats *uclient = - &stats->ustorm_common.client_statistics[cl_id]; - struct ustorm_per_client_stats *old_uclient = &fp->old_uclient; - struct xstorm_per_client_stats *xclient = - &stats->xstorm_common.client_statistics[cl_id]; - struct xstorm_per_client_stats *old_xclient = &fp->old_xclient; + struct tstorm_per_queue_stats *tclient = + &bp->fw_stats_data->queue_stats[i]. + tstorm_queue_statistics; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; + struct ustorm_per_queue_stats *uclient = + &bp->fw_stats_data->queue_stats[i]. + ustorm_queue_statistics; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; + struct xstorm_per_queue_stats *xclient = + &bp->fw_stats_data->queue_stats[i]. + xstorm_queue_statistics; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; u32 diff; - /* are storm stats valid? */ - if (le16_to_cpu(xclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" - " xstorm counter (0x%x) != stats_counter (0x%x)\n", - i, xclient->stats_counter, cur_stats_counter + 1); - return -1; - } - if (le16_to_cpu(tclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" - " tstorm counter (0x%x) != stats_counter (0x%x)\n", - i, tclient->stats_counter, cur_stats_counter + 1); - return -2; - } - if (le16_to_cpu(uclient->stats_counter) != cur_stats_counter) { - DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" - " ustorm counter (0x%x) != stats_counter (0x%x)\n", - i, uclient->stats_counter, cur_stats_counter + 1); - return -4; - } + DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, " + "bcast_sent 0x%x mcast_sent 0x%x\n", + i, xclient->ucast_pkts_sent, + xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); + + DP(BNX2X_MSG_STATS, "---------------\n"); + + qstats->total_broadcast_bytes_received_hi = + le32_to_cpu(tclient->rcv_bcast_bytes.hi); + qstats->total_broadcast_bytes_received_lo = + le32_to_cpu(tclient->rcv_bcast_bytes.lo); + qstats->total_multicast_bytes_received_hi = + le32_to_cpu(tclient->rcv_mcast_bytes.hi); + qstats->total_multicast_bytes_received_lo = + le32_to_cpu(tclient->rcv_mcast_bytes.lo); + + qstats->total_unicast_bytes_received_hi = + le32_to_cpu(tclient->rcv_ucast_bytes.hi); + qstats->total_unicast_bytes_received_lo = + le32_to_cpu(tclient->rcv_ucast_bytes.lo); + + /* + * sum to total_bytes_received all + * unicast/multicast/broadcast + */ qstats->total_bytes_received_hi = - le32_to_cpu(tclient->rcv_broadcast_bytes.hi); + qstats->total_broadcast_bytes_received_hi; qstats->total_bytes_received_lo = - le32_to_cpu(tclient->rcv_broadcast_bytes.lo); + qstats->total_broadcast_bytes_received_lo; ADD_64(qstats->total_bytes_received_hi, - le32_to_cpu(tclient->rcv_multicast_bytes.hi), + qstats->total_multicast_bytes_received_hi, qstats->total_bytes_received_lo, - le32_to_cpu(tclient->rcv_multicast_bytes.lo)); + qstats->total_multicast_bytes_received_lo); ADD_64(qstats->total_bytes_received_hi, - le32_to_cpu(tclient->rcv_unicast_bytes.hi), + qstats->total_unicast_bytes_received_hi, qstats->total_bytes_received_lo, - le32_to_cpu(tclient->rcv_unicast_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->bcast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->bcast_no_buff_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->mcast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->mcast_no_buff_bytes.lo)); - - SUB_64(qstats->total_bytes_received_hi, - le32_to_cpu(uclient->ucast_no_buff_bytes.hi), - qstats->total_bytes_received_lo, - le32_to_cpu(uclient->ucast_no_buff_bytes.lo)); + qstats->total_unicast_bytes_received_lo); qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi; qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; - qstats->error_bytes_received_hi = - le32_to_cpu(tclient->rcv_error_bytes.hi); - qstats->error_bytes_received_lo = - le32_to_cpu(tclient->rcv_error_bytes.lo); - - ADD_64(qstats->total_bytes_received_hi, - qstats->error_bytes_received_hi, - qstats->total_bytes_received_lo, - qstats->error_bytes_received_lo); - UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, + UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_multicast_pkts, + UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); - UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts, + UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); - UPDATE_EXTEND_TSTAT(packets_too_big_discard, + UPDATE_EXTEND_TSTAT(pkts_too_big_discard, etherstatsoverrsizepkts); UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard); @@ -871,30 +899,78 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard); UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard); + qstats->total_broadcast_bytes_transmitted_hi = + le32_to_cpu(xclient->bcast_bytes_sent.hi); + qstats->total_broadcast_bytes_transmitted_lo = + le32_to_cpu(xclient->bcast_bytes_sent.lo); + + qstats->total_multicast_bytes_transmitted_hi = + le32_to_cpu(xclient->mcast_bytes_sent.hi); + qstats->total_multicast_bytes_transmitted_lo = + le32_to_cpu(xclient->mcast_bytes_sent.lo); + + qstats->total_unicast_bytes_transmitted_hi = + le32_to_cpu(xclient->ucast_bytes_sent.hi); + qstats->total_unicast_bytes_transmitted_lo = + le32_to_cpu(xclient->ucast_bytes_sent.lo); + /* + * sum to total_bytes_transmitted all + * unicast/multicast/broadcast + */ qstats->total_bytes_transmitted_hi = - le32_to_cpu(xclient->unicast_bytes_sent.hi); + qstats->total_unicast_bytes_transmitted_hi; qstats->total_bytes_transmitted_lo = - le32_to_cpu(xclient->unicast_bytes_sent.lo); + qstats->total_unicast_bytes_transmitted_lo; ADD_64(qstats->total_bytes_transmitted_hi, - le32_to_cpu(xclient->multicast_bytes_sent.hi), + qstats->total_broadcast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, - le32_to_cpu(xclient->multicast_bytes_sent.lo)); + qstats->total_broadcast_bytes_transmitted_lo); ADD_64(qstats->total_bytes_transmitted_hi, - le32_to_cpu(xclient->broadcast_bytes_sent.hi), + qstats->total_multicast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, - le32_to_cpu(xclient->broadcast_bytes_sent.lo)); + qstats->total_multicast_bytes_transmitted_lo); - UPDATE_EXTEND_XSTAT(unicast_pkts_sent, + UPDATE_EXTEND_XSTAT(ucast_pkts_sent, total_unicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(multicast_pkts_sent, + UPDATE_EXTEND_XSTAT(mcast_pkts_sent, total_multicast_packets_transmitted); - UPDATE_EXTEND_XSTAT(broadcast_pkts_sent, + UPDATE_EXTEND_XSTAT(bcast_pkts_sent, total_broadcast_packets_transmitted); - old_tclient->checksum_discard = tclient->checksum_discard; - old_tclient->ttl0_discard = tclient->ttl0_discard; + UPDATE_EXTEND_TSTAT(checksum_discard, + total_packets_received_checksum_discarded); + UPDATE_EXTEND_TSTAT(ttl0_discard, + total_packets_received_ttl0_discarded); + + UPDATE_EXTEND_XSTAT(error_drop_pkts, + total_transmitted_dropped_packets_error); + + /* TPA aggregations completed */ + UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations); + /* Number of network frames aggregated by TPA */ + UPDATE_EXTEND_USTAT(coalesced_pkts, + total_tpa_aggregated_frames); + /* Total number of bytes in completed TPA aggregations */ + qstats->total_tpa_bytes_lo = + le32_to_cpu(uclient->coalesced_bytes.lo); + qstats->total_tpa_bytes_hi = + le32_to_cpu(uclient->coalesced_bytes.hi); + + /* TPA stats per-function */ + ADD_64(estats->total_tpa_aggregations_hi, + qstats->total_tpa_aggregations_hi, + estats->total_tpa_aggregations_lo, + qstats->total_tpa_aggregations_lo); + ADD_64(estats->total_tpa_aggregated_frames_hi, + qstats->total_tpa_aggregated_frames_hi, + estats->total_tpa_aggregated_frames_lo, + qstats->total_tpa_aggregated_frames_lo); + ADD_64(estats->total_tpa_bytes_hi, + qstats->total_tpa_bytes_hi, + estats->total_tpa_bytes_lo, + qstats->total_tpa_bytes_lo); ADD_64(fstats->total_bytes_received_hi, qstats->total_bytes_received_hi, @@ -933,10 +1009,6 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) fstats->valid_bytes_received_lo, qstats->valid_bytes_received_lo); - ADD_64(estats->error_bytes_received_hi, - qstats->error_bytes_received_hi, - estats->error_bytes_received_lo, - qstats->error_bytes_received_lo); ADD_64(estats->etherstatsoverrsizepkts_hi, qstats->etherstatsoverrsizepkts_hi, estats->etherstatsoverrsizepkts_lo, @@ -950,9 +1022,19 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) fstats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); + ADD_64(fstats->total_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + fstats->total_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + memcpy(estats, &(fstats->total_bytes_received_hi), sizeof(struct host_func_stats) - 2*sizeof(u32)); + ADD_64(estats->error_bytes_received_hi, + tfunc->rcv_error_bytes.hi, + estats->error_bytes_received_lo, + tfunc->rcv_error_bytes.lo); + ADD_64(estats->etherstatsoverrsizepkts_hi, estats->rx_stat_dot3statsframestoolong_hi, estats->etherstatsoverrsizepkts_lo, @@ -965,8 +1047,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) if (bp->port.pmf) { estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard); - estats->xxoverflow_discard = - le32_to_cpu(tport->xxoverflow_discard); + estats->mf_tag_discard = + le32_to_cpu(tport->mf_tag_discard); estats->brb_truncate_discard = le32_to_cpu(tport->brb_truncate_discard); estats->mac_discard = le32_to_cpu(tport->mac_discard); @@ -1023,7 +1105,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->rx_frame_errors = bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); - nstats->rx_missed_errors = estats->xxoverflow_discard; + nstats->rx_missed_errors = 0; nstats->rx_errors = nstats->rx_length_errors + nstats->rx_over_errors + @@ -1065,10 +1147,27 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) } } +static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) +{ + u32 val; + + if (SHMEM2_HAS(bp, edebug_driver_if[1])) { + val = SHMEM2_RD(bp, edebug_driver_if[1]); + + if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) + return true; + } + + return false; +} + static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); + if (bnx2x_edebug_stats_stopped(bp)) + return; + if (*stats_comp != DMAE_COMP_VAL) return; @@ -1086,10 +1185,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; - int i; + int i, cos; - printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n", - bp->dev->name, + netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", estats->brb_drop_lo, estats->brb_truncate_lo); for_each_eth_queue(bp, i) { @@ -1108,20 +1206,32 @@ static void bnx2x_stats_update(struct bnx2x *bp) for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_fp_txdata *txdata; struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; - struct netdev_queue *txq = - netdev_get_tx_queue(bp->dev, i); - - printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)" - " tx pkt(%lu) tx calls (%lu)" - " %s (Xoff events %u)\n", - fp->name, bnx2x_tx_avail(fp), - le16_to_cpu(*fp->tx_cons_sb), - bnx2x_hilo(&qstats-> - total_unicast_packets_transmitted_hi), - fp->tx_pkt, - (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"), - qstats->driver_xoff); + struct netdev_queue *txq; + + printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)", + fp->name, bnx2x_hilo( + &qstats->total_unicast_packets_transmitted_hi), + qstats->driver_xoff); + + for_each_cos_in_tx_queue(fp, cos) { + txdata = &fp->txdata[cos]; + txq = netdev_get_tx_queue(bp->dev, + FP_COS_TO_TXQ(fp, cos)); + + printk(KERN_DEBUG "%d: tx avail(%4u)" + " *tx_cons_sb(%u)" + " tx calls (%lu)" + " %s\n", + cos, + bnx2x_tx_avail(bp, txdata), + le16_to_cpu(*txdata->tx_cons_sb), + txdata->tx_pkt, + (netif_tx_queue_stopped(txq) ? + "Xoff" : "Xon") + ); + } } } @@ -1149,6 +1259,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp) else dmae->opcode = bnx2x_dmae_opcode_add_comp( opcode, DMAE_COMP_PCI); + dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; @@ -1235,13 +1346,9 @@ static const struct { void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; - if (unlikely(bp->panic)) return; - bnx2x_stats_stm[bp->stats_state][event].action(bp); - - /* Protect a state change flow */ spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; @@ -1297,7 +1404,7 @@ static void bnx2x_func_stats_base_init(struct bnx2x *bp) func_stx = bp->func_stx; for (vn = VN_0; vn < vn_max; vn++) { - int mb_idx = !CHIP_IS_E2(bp) ? 2*vn + BP_PORT(bp) : vn; + int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn; bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); bnx2x_func_stats_init(bp); @@ -1339,12 +1446,97 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp) bnx2x_stats_comp(bp); } +/** + * This function will prepare the statistics ramrod data the way + * we will only have to increment the statistics counter and + * send the ramrod each time we have to. + * + * @param bp + */ +static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) +{ + int i; + struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; + + dma_addr_t cur_data_offset; + struct stats_query_entry *cur_query_entry; + + stats_hdr->cmd_num = bp->fw_stats_num; + stats_hdr->drv_stats_counter = 0; + + /* storm_counters struct contains the counters of completed + * statistics requests per storm which are incremented by FW + * each time it completes hadning a statistics ramrod. We will + * check these counters in the timer handler and discard a + * (statistics) ramrod completion. + */ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, storm_counters); + + stats_hdr->stats_counters_addrs.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + stats_hdr->stats_counters_addrs.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + /* prepare to the first stats ramrod (will be completed with + * the counters equal to zero) - init counters to somethig different. + */ + memset(&bp->fw_stats_data->storm_counters, 0xff, + sizeof(struct stats_counter)); + + /**** Port FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, port); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PORT; + /* For port query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + /* For port query funcID is a DONT CARE */ + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** PF FW statistics data ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, pf); + + cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; + + cur_query_entry->kind = STATS_TYPE_PF; + /* For PF query index is a DONT CARE */ + cur_query_entry->index = BP_PORT(bp); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); + + /**** Clients' queries ****/ + cur_data_offset = bp->fw_stats_data_mapping + + offsetof(struct bnx2x_fw_stats_data, queue_stats); + + for_each_eth_queue(bp, i) { + cur_query_entry = + &bp->fw_stats_req-> + query[BNX2X_FIRST_QUEUE_QUERY_IDX + i]; + + cur_query_entry->kind = STATS_TYPE_QUEUE; + cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); + cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); + cur_query_entry->address.hi = + cpu_to_le32(U64_HI(cur_data_offset)); + cur_query_entry->address.lo = + cpu_to_le32(U64_LO(cur_data_offset)); + + cur_data_offset += sizeof(struct per_queue_stats); + } +} + void bnx2x_stats_init(struct bnx2x *bp) { - int port = BP_PORT(bp); + int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); int i; - struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats); bp->stats_pending = 0; bp->executer_idx = 0; @@ -1362,45 +1554,35 @@ void bnx2x_stats_init(struct bnx2x *bp) DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", bp->port.port_stx, bp->func_stx); + port = BP_PORT(bp); /* port stats */ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); bp->port.old_nig_stats.brb_discard = REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); bp->port.old_nig_stats.brb_truncate = REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); - REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, - &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + if (!CHIP_IS_E3(bp)) { + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); + REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, + &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); + } /* function stats */ for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - memset(&fp->old_tclient, 0, - sizeof(struct tstorm_per_client_stats)); - memset(&fp->old_uclient, 0, - sizeof(struct ustorm_per_client_stats)); - memset(&fp->old_xclient, 0, - sizeof(struct xstorm_per_client_stats)); - memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); + memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); + memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); } - /* FW stats are currently collected for ETH clients only */ - for_each_eth_queue(bp, i) { - /* Set initial stats counter in the stats ramrod data to -1 */ - int cl_id = bp->fp[i].cl_id; - - stats->xstorm_common.client_statistics[cl_id]. - stats_counter = 0xffff; - stats->ustorm_common.client_statistics[cl_id]. - stats_counter = 0xffff; - stats->tstorm_common.client_statistics[cl_id]. - stats_counter = 0xffff; - } + /* Prepare statistics ramrod data */ + bnx2x_prep_fw_stats_req(bp); - memset(&bp->dev->stats, 0, sizeof(struct net_device_stats)); - memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats)); + memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); + memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); bp->stats_state = STATS_STATE_DISABLED; diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h index 45d14d8bc1a..5d8ce2f6afe 100644 --- a/drivers/net/bnx2x/bnx2x_stats.h +++ b/drivers/net/bnx2x/bnx2x_stats.h @@ -14,48 +14,11 @@ * Statistics and Link management by Yitchak Gertner * */ - #ifndef BNX2X_STATS_H #define BNX2X_STATS_H #include <linux/types.h> -struct bnx2x_eth_q_stats { - u32 total_bytes_received_hi; - u32 total_bytes_received_lo; - u32 total_bytes_transmitted_hi; - u32 total_bytes_transmitted_lo; - u32 total_unicast_packets_received_hi; - u32 total_unicast_packets_received_lo; - u32 total_multicast_packets_received_hi; - u32 total_multicast_packets_received_lo; - u32 total_broadcast_packets_received_hi; - u32 total_broadcast_packets_received_lo; - u32 total_unicast_packets_transmitted_hi; - u32 total_unicast_packets_transmitted_lo; - u32 total_multicast_packets_transmitted_hi; - u32 total_multicast_packets_transmitted_lo; - u32 total_broadcast_packets_transmitted_hi; - u32 total_broadcast_packets_transmitted_lo; - u32 valid_bytes_received_hi; - u32 valid_bytes_received_lo; - - u32 error_bytes_received_hi; - u32 error_bytes_received_lo; - u32 etherstatsoverrsizepkts_hi; - u32 etherstatsoverrsizepkts_lo; - u32 no_buff_discard_hi; - u32 no_buff_discard_lo; - - u32 driver_xoff; - u32 rx_err_discard_pkt; - u32 rx_skb_alloc_failed; - u32 hw_csum_err; -}; - -#define Q_STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) - struct nig_stats { u32 brb_discard; u32 brb_packet; @@ -212,7 +175,7 @@ struct bnx2x_eth_stats { u32 brb_truncate_lo; u32 mac_filter_discard; - u32 xxoverflow_discard; + u32 mf_tag_discard; u32 brb_truncate_discard; u32 mac_discard; @@ -222,16 +185,197 @@ struct bnx2x_eth_stats { u32 hw_csum_err; u32 nig_timer_max; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; +}; + + +struct bnx2x_eth_q_stats { + u32 total_unicast_bytes_received_hi; + u32 total_unicast_bytes_received_lo; + u32 total_broadcast_bytes_received_hi; + u32 total_broadcast_bytes_received_lo; + u32 total_multicast_bytes_received_hi; + u32 total_multicast_bytes_received_lo; + u32 total_bytes_received_hi; + u32 total_bytes_received_lo; + u32 total_unicast_bytes_transmitted_hi; + u32 total_unicast_bytes_transmitted_lo; + u32 total_broadcast_bytes_transmitted_hi; + u32 total_broadcast_bytes_transmitted_lo; + u32 total_multicast_bytes_transmitted_hi; + u32 total_multicast_bytes_transmitted_lo; + u32 total_bytes_transmitted_hi; + u32 total_bytes_transmitted_lo; + u32 total_unicast_packets_received_hi; + u32 total_unicast_packets_received_lo; + u32 total_multicast_packets_received_hi; + u32 total_multicast_packets_received_lo; + u32 total_broadcast_packets_received_hi; + u32 total_broadcast_packets_received_lo; + u32 total_unicast_packets_transmitted_hi; + u32 total_unicast_packets_transmitted_lo; + u32 total_multicast_packets_transmitted_hi; + u32 total_multicast_packets_transmitted_lo; + u32 total_broadcast_packets_transmitted_hi; + u32 total_broadcast_packets_transmitted_lo; + u32 valid_bytes_received_hi; + u32 valid_bytes_received_lo; + + u32 etherstatsoverrsizepkts_hi; + u32 etherstatsoverrsizepkts_lo; + u32 no_buff_discard_hi; + u32 no_buff_discard_lo; + + u32 driver_xoff; + u32 rx_err_discard_pkt; + u32 rx_skb_alloc_failed; + u32 hw_csum_err; + + u32 total_packets_received_checksum_discarded_hi; + u32 total_packets_received_checksum_discarded_lo; + u32 total_packets_received_ttl0_discarded_hi; + u32 total_packets_received_ttl0_discarded_lo; + u32 total_transmitted_dropped_packets_error_hi; + u32 total_transmitted_dropped_packets_error_lo; + + /* TPA */ + u32 total_tpa_aggregations_hi; + u32 total_tpa_aggregations_lo; + u32 total_tpa_aggregated_frames_hi; + u32 total_tpa_aggregated_frames_lo; + u32 total_tpa_bytes_hi; + u32 total_tpa_bytes_lo; }; -#define STATS_OFFSET32(stat_name) \ - (offsetof(struct bnx2x_eth_stats, stat_name) / 4) +/**************************************************************************** +* Macros +****************************************************************************/ + +/* sum[hi:lo] += add[hi:lo] */ +#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ + do { \ + s_lo += a_lo; \ + s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ + } while (0) + +/* difference = minuend - subtrahend */ +#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ + do { \ + if (m_lo < s_lo) { \ + /* underflow */ \ + d_hi = m_hi - s_hi; \ + if (d_hi > 0) { \ + /* we can 'loan' 1 */ \ + d_hi--; \ + d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ + } else { \ + /* m_hi <= s_hi */ \ + d_hi = 0; \ + d_lo = 0; \ + } \ + } else { \ + /* m_lo >= s_lo */ \ + if (m_hi < s_hi) { \ + d_hi = 0; \ + d_lo = 0; \ + } else { \ + /* m_hi >= s_hi */ \ + d_hi = m_hi - s_hi; \ + d_lo = m_lo - s_lo; \ + } \ + } \ + } while (0) + +#define UPDATE_STAT64(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \ + diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \ + pstats->mac_stx[0].t##_hi = new->s##_hi; \ + pstats->mac_stx[0].t##_lo = new->s##_lo; \ + ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \ + pstats->mac_stx[1].t##_lo, diff.lo); \ + } while (0) + +#define UPDATE_STAT64_NIG(s, t) \ + do { \ + DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \ + diff.lo, new->s##_lo, old->s##_lo); \ + ADD_64(estats->t##_hi, diff.hi, \ + estats->t##_lo, diff.lo); \ + } while (0) + +/* sum[hi:lo] += add */ +#define ADD_EXTEND_64(s_hi, s_lo, a) \ + do { \ + s_lo += a; \ + s_hi += (s_lo < a) ? 1 : 0; \ + } while (0) + +#define ADD_STAT64(diff, t) \ + do { \ + ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \ + pstats->mac_stx[1].t##_lo, new->diff##_lo); \ + } while (0) + +#define UPDATE_EXTEND_STAT(s) \ + do { \ + ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \ + pstats->mac_stx[1].s##_lo, \ + new->s); \ + } while (0) + +#define UPDATE_EXTEND_TSTAT(s, t) \ + do { \ + diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \ + old_tclient->s = tclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + old_uclient->s = uclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +#define UPDATE_EXTEND_XSTAT(s, t) \ + do { \ + diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \ + old_xclient->s = xclient->s; \ + ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + +/* minuend -= subtrahend */ +#define SUB_64(m_hi, s_hi, m_lo, s_lo) \ + do { \ + DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \ + } while (0) + +/* minuend[hi:lo] -= subtrahend */ +#define SUB_EXTEND_64(m_hi, m_lo, s) \ + do { \ + SUB_64(m_hi, 0, m_lo, s); \ + } while (0) + +#define SUB_EXTEND_USTAT(s, t) \ + do { \ + diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \ + SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ + } while (0) + -/* Forward declaration */ +/* forward */ struct bnx2x; void bnx2x_stats_init(struct bnx2x *bp); -extern const u32 dmae_reg_go_c[]; +void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); #endif /* BNX2X_STATS_H */ |