diff options
Diffstat (limited to 'drivers/net/ethernet')
53 files changed, 1404 insertions, 490 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index a5f91e1e8fe..becef25fa19 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -148,7 +148,7 @@ config PCMCIA_PCNET config NE_H8300 tristate "NE2000 compatible support for H8/300" - depends on H8300 + depends on H8300H_AKI3068NET || H8300H_H8MAX ---help--- Say Y here if you want to use the NE2000 compatible controller on the Renesas H8/300 processor. diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index e8d0ef508f4..10ceca523fc 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -1147,7 +1147,7 @@ static struct net_device *atarilance_dev; static int __init atarilance_module_init(void) { atarilance_dev = atarilance_probe(-1); - return PTR_RET(atarilance_dev); + return PTR_ERR_OR_ZERO(atarilance_dev); } static void __exit atarilance_module_exit(void) diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index a51497c9d2a..e108e911da0 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -188,7 +188,7 @@ static struct net_device *dev_mvme147_lance; int __init init_module(void) { dev_mvme147_lance = mvme147lance_probe(-1); - return PTR_RET(dev_mvme147_lance); + return PTR_ERR_OR_ZERO(dev_mvme147_lance); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 26fc0ce0faa..1cf33addd15 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); int __init init_module(void) { dev_ni65 = ni65_probe(-1); - return PTR_RET(dev_ni65); + return PTR_ERR_OR_ZERO(dev_ni65); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 4375abe61da..d6b20296b8e 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -940,7 +940,7 @@ static struct net_device *sun3lance_dev; int __init init_module(void) { sun3lance_dev = sun3lance_probe(-1); - return PTR_RET(sun3lance_dev); + return PTR_ERR_OR_ZERO(sun3lance_dev); } void __exit cleanup_module(void) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 55d79cb53a7..9e160148726 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev) struct sk_buff *skb = tx_buff->skb; unsigned int info = le32_to_cpu(txbd->info); - *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; - if ((info & FOR_EMAC) || !txbd->data) break; @@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev) txbd->data = 0; txbd->info = 0; + *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; + if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 3e77a1b1a44..0c338026ce0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) #define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */ -#define BNX2X_DB_SHIFT 7 /* 128 bytes*/ +#define BNX2X_DB_SHIFT 3 /* 8 bytes*/ #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DPM_TRIGER_TYPE 0x40 #define DOORBELL(bp, cid, val) \ do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \ - DPM_TRIGER_TYPE); \ + writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ } while (0) /* TX CSUM helpers */ @@ -1100,13 +1098,27 @@ struct bnx2x_port { extern struct workqueue_struct *bnx2x_wq; #define BNX2X_MAX_NUM_OF_VFS 64 -#define BNX2X_VF_CID_WND 0 +#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */ #define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) -#define BNX2X_CLIENTS_PER_VF 1 -#define BNX2X_FIRST_VF_CID 256 + +/* We need to reserve doorbell addresses for all VF and queue combinations */ #define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) + +/* The doorbell is configured to have the same number of CIDs for PFs and for + * VFs. For this reason the PF CID zone is as large as the VF zone. + */ +#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS +#define BNX2X_MAX_NUM_VF_QUEUES 64 #define BNX2X_VF_ID_INVALID 0xFF +/* the number of VF CIDS multiplied by the amount of bytes reserved for each + * cid must not exceed the size of the VF doorbell + */ +#define BNX2X_VF_BAR_SIZE 512 +#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT)) +#error "VF doorbell bar size is 512" +#endif + /* * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is * control by the number of fast-path status blocks supported by the @@ -1650,10 +1662,10 @@ struct bnx2x { dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB + /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB * context size we need 8 ILT entries. */ -#define ILT_MAX_L2_LINES 8 +#define ILT_MAX_L2_LINES 32 struct hw_context context[ILT_MAX_L2_LINES]; struct bnx2x_ilt *ilt; @@ -1869,7 +1881,7 @@ extern int num_queues; #define FUNC_FLG_TPA 0x0008 #define FUNC_FLG_SPQ 0x0010 #define FUNC_FLG_LEADING 0x0020 /* PF only */ - +#define FUNC_FLG_LEADING_STATS 0x0040 struct bnx2x_func_init_params { /* dma */ dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2e90868a927..2361bf236ce 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; int old_max_eth_txqs, new_max_eth_txqs; int old_txdata_index = 0, new_txdata_index = 0; + struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info; /* Copy the NAPI object as it has been already initialized */ from_fp->napi = to_fp->napi; @@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; + /* Retain the tpa_info of the original `to' version as we don't want + * 2 FPs to contain the same tpa_info pointer. + */ + to_fp->tpa_info = old_tpa_info; + /* move sp_objs contents as well, as their indices match fp ones */ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); @@ -1942,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) } } -static int bnx2x_init_rss_pf(struct bnx2x *bp) +static int bnx2x_init_rss(struct bnx2x *bp) { int i; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); @@ -1966,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); } -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash) +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable) { struct bnx2x_config_rss_params params = {NULL}; @@ -1982,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); - __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); - - /* RSS configuration */ - __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); - __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v4) - __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v6) - __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + if (enable) { + __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags); + + /* RSS configuration */ + __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); + __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v4) + __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); + if (rss_obj->udp_rss_v6) + __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + } else { + __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); + } /* Hash bits */ params.rss_result_mask = MULTI_MASK; @@ -2001,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, if (config_hash) { /* RSS keys */ - prandom_bytes(params.rss_key, sizeof(params.rss_key)); + prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4); __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags); } - return bnx2x_config_rss(bp, ¶ms); + if (IS_PF(bp)) + return bnx2x_config_rss(bp, ¶ms); + else + return bnx2x_vfpf_config_rss(bp, ¶ms); } static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) @@ -2645,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* initialize FW coalescing state machines in RAM */ bnx2x_update_coalesce(bp); + } - /* setup the leading queue */ - rc = bnx2x_setup_leading(bp); - if (rc) { - BNX2X_ERR("Setup leading failed!\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - - /* set up the rest of the queues */ - for_each_nondefault_eth_queue(bp, i) { - rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup the leading queue */ + rc = bnx2x_setup_leading(bp); + if (rc) { + BNX2X_ERR("Setup leading failed!\n"); + LOAD_ERROR_EXIT(bp, load_error3); + } - /* setup rss */ - rc = bnx2x_init_rss_pf(bp); + /* set up the rest of the queues */ + for_each_nondefault_eth_queue(bp, i) { + if (IS_PF(bp)) + rc = bnx2x_setup_queue(bp, &bp->fp[i], false); + else /* VF */ + rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false); if (rc) { - BNX2X_ERR("PF RSS init failed\n"); + BNX2X_ERR("Queue %d setup failed\n", i); LOAD_ERROR_EXIT(bp, load_error3); } + } - } else { /* vf */ - for_each_eth_queue(bp, i) { - rc = bnx2x_vfpf_setup_q(bp, i); - if (rc) { - BNX2X_ERR("Queue setup failed\n"); - LOAD_ERROR_EXIT(bp, load_error3); - } - } + /* setup rss */ + rc = bnx2x_init_rss(bp); + if (rc) { + BNX2X_ERR("PF RSS init failed\n"); + LOAD_ERROR_EXIT(bp, load_error3); } /* Now when Clients are configured we are ready to work */ @@ -2958,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) if (IS_PF(bp)) { if (CNIC_LOADED(bp)) bnx2x_free_mem_cnic(bp); - bnx2x_free_mem(bp); } + bnx2x_free_mem(bp); + bp->state = BNX2X_STATE_CLOSED; bp->cnic_loaded = false; @@ -4784,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev) void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { + if (!cxt) { + BNX2X_ERR("bad context pointer %p\n", cxt); + return; + } + /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid), diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index affb7646241..da8fcaa7449 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -105,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); * @rss_obj: RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration + * @enable: enabled or disabled configuration */ -int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash); +int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, + bool config_hash, bool enable); /** * bnx2x__init_func_obj - init function object @@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn) static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) { - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); + return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true); } /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index c5f22510168..2612e3c715d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } else if ((info->flow_type == UDP_V6_FLOW) && (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; DP(BNX2X_MSG_ETHTOOL, "rss re-configured, UDP 4-tupple %s\n", udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); + return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); } return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9d64b988ab3..d60a2ea3da1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6503,10 +6503,8 @@ static int bnx2x_link_initialize(struct link_params *params, (CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) bnx2x_set_parallel_detection(phy, params); - if (params->phy[INT_PHY].config_init) - params->phy[INT_PHY].config_init(phy, - params, - vars); + if (params->phy[INT_PHY].config_init) + params->phy[INT_PHY].config_init(phy, params, vars); } /* Init external phy*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 17f117c1d8d..634a793c1c4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -6893,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); - REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); + if (!CHIP_REV_IS_SLOW(bp)) /* enable hw interrupt from doorbell Q */ REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0); @@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp) { int i; - BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, - sizeof(struct host_sp_status_block)); - BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, bp->fw_stats_data_sz + bp->fw_stats_req_sz); + if (IS_VF(bp)) + return; + + BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping, + sizeof(struct host_sp_status_block)); + BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); @@ -8060,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) int bnx2x_setup_leading(struct bnx2x *bp) { - return bnx2x_setup_queue(bp, &bp->fp[0], 1); + if (IS_PF(bp)) + return bnx2x_setup_queue(bp, &bp->fp[0], true); + else /* VF */ + return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true); } /** @@ -8074,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp) { int rc = 0; - if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) + if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) { + BNX2X_ERR("VF not loaded since interrupt mode not msix\n"); return -EINVAL; + } switch (int_mode) { case BNX2X_INT_MODE_MSIX: @@ -9955,8 +9963,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi) static int bnx2x_do_flr(struct bnx2x *bp) { - int i; - u16 status; struct pci_dev *dev = bp->pdev; if (CHIP_IS_E1x(bp)) { @@ -9971,20 +9977,8 @@ static int bnx2x_do_flr(struct bnx2x *bp) return -EINVAL; } - /* Wait for Transaction Pending bit clean */ - for (i = 0; i < 4; i++) { - if (i) - msleep((1 << (i - 1)) * 100); - - pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status); - if (!(status & PCI_EXP_DEVSTA_TRPND)) - goto clear; - } - - dev_err(&dev->dev, - "transaction is not cleared; proceeding with reset anyway\n"); - -clear: + if (!pci_wait_for_pending_transaction(dev)) + dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); @@ -11658,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp) * second status block for the L2 queue, and a third status block for * CNIC if supported. */ - if (CNIC_SUPPORT(bp)) + if (IS_VF(bp)) + bp->min_msix_vec_cnt = 1; + else if (CNIC_SUPPORT(bp)) bp->min_msix_vec_cnt = 3; - else + else /* PF w/o cnic */ bp->min_msix_vec_cnt = 2; BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt); @@ -12571,8 +12567,7 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) * @dev: pci device * */ -static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, - int cnic_cnt, bool is_vf) +static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt) { int index; u16 control = 0; @@ -12598,7 +12593,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, index = control & PCI_MSIX_FLAGS_QSIZE; - return is_vf ? index + 1 : index; + return index; } static int set_max_cos_est(int chip_id) @@ -12678,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev, is_vf = set_is_vf(ent->driver_data); cnic_cnt = is_vf ? 0 : 1; - max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf); + max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt); + + /* add another SB for VF as it has no default SB */ + max_non_def_sbs += is_vf ? 1 : 0; /* Maximum number of RSS queues: one IGU SB goes to CNIC */ - rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt; + rss_count = max_non_def_sbs - cnic_cnt; if (rss_count < 1) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 8e627b886d7..5ecf267dc4c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -6335,6 +6335,7 @@ #define PCI_ID_VAL2 0x438 #define PCI_ID_VAL3 0x43c +#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C #define GRC_CONFIG_REG_PF_INIT_VF 0x624 #define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf /* First VF_NUM for PF is encoded in this register. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 1d46b68fb76..9fbeee522d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4416,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, rss_obj->config_rss = bnx2x_setup_rss; } +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac) +{ + if (!vlan_mac->get_n_elements) { + BNX2X_ERR("vlan mac object was not intialized\n"); + return -EINVAL; + } + return 0; +} + /********************** Queue state object ***********************************/ /** diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 533a3abd8c8..658f4e33abf 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1407,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp, void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, u8 *ind_table); +int validate_vlan_mac(struct bnx2x *bp, + struct bnx2x_vlan_mac_obj *vlan_mac); #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index fbc026c4cab..b26eb83069b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state { BNX2X_VFOP_QTEARDOWN_DONE }; +enum bnx2x_vfop_rss_state { + BNX2X_VFOP_RSS_CONFIG, + BNX2X_VFOP_RSS_DONE +}; + #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, @@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp, __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); - if (vfq_is_leading(q)) { - __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); - __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); - } - /* Setup-op rx parameters */ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; @@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) BNX2X_Q_LOGICAL_STATE_STOPPED) { DP(BNX2X_MSG_IOV, "Entered qdtor but queue was already stopped. Aborting gracefully\n"); - goto op_done; + + /* next state */ + vfop->state = BNX2X_VFOP_QDTOR_DONE; + + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); } /* next state */ @@ -432,8 +436,10 @@ op_err: op_done: case BNX2X_VFOP_QDTOR_DONE: /* invalidate the context */ - qdtor->cxt->ustorm_ag_context.cdu_usage = 0; - qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + if (qdtor->cxt) { + qdtor->cxt->ustorm_ag_context.cdu_usage = 0; + qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; + } bnx2x_vfop_end(bp, vf, vfop); return; default: @@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, cmd->block); } - DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); + DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", + vf->abs_vfid, vfop->rc); return -ENOMEM; } @@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) { struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); if (vf) { + /* the first igu entry belonging to VFs of this PF */ + if (!BP_VFDB(bp)->first_vf_igu_entry) + BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; + + /* the first igu entry belonging to this VF */ if (!vf_sb_count(vf)) vf->igu_base_id = igu_sb_id; + ++vf_sb_count(vf); + ++vf->sb_count; } + BP_VFDB(bp)->vf_sbs_pool++; } /* VFOP MAC/VLAN helpers */ @@ -530,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp, return 0; } -static int -bnx2x_vfop_config_vlan0(struct bnx2x *bp, - struct bnx2x_vlan_mac_ramrod_params *vlan_mac, - bool add) -{ - int rc; - - vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : - BNX2X_VLAN_MAC_DEL; - vlan_mac->user_req.u.vlan.vlan = 0; - - rc = bnx2x_config_vlan_mac(bp, vlan_mac); - if (rc == -EEXIST) - rc = 0; - return rc; -} - static int bnx2x_vfop_config_list(struct bnx2x *bp, struct bnx2x_vfop_filters *filters, struct bnx2x_vlan_mac_ramrod_params *vlan_mac) @@ -651,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) case BNX2X_VFOP_VLAN_CONFIG_LIST: /* next state */ - vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; - - /* remove vlan0 - could be no-op */ - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); - if (vfop->rc) - goto op_err; + vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - /* Do vlan list config. if this operation fails we try to - * restore vlan0 to keep the queue is working order - */ + /* do list config */ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); if (!vfop->rc) { set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); } - bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ - - case BNX2X_VFOP_VLAN_CONFIG_LIST_0: - /* next state */ - vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; - - if (list_empty(&obj->head)) - /* add vlan0 */ - vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); default: @@ -733,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -752,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -772,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -794,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); /* set extra args */ @@ -814,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, int qid, u16 vid, bool add) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -834,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, ramrod->user_req.u.vlan.vlan = vid; /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -853,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -872,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -892,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, int qid, bool drv_only) { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + int rc; if (vfop) { struct bnx2x_vfop_args_filters filters = { @@ -911,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); /* set object */ + rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); + if (rc) + return rc; ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); /* set extra args */ @@ -1021,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) case BNX2X_VFOP_QFLR_CLR_VLAN: /* vlan-clear-all: driver-only, don't consume credit */ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; - vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) + vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, + true); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_CLR_MAC: /* mac-clear-all: driver only consume credit */ vfop->state = BNX2X_VFOP_QFLR_TERMINATE; - vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); + if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) + vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, + true); DP(BNX2X_MSG_IOV, "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", vf->abs_vfid, vfop->rc); if (vfop->rc) goto op_err; - return; + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); case BNX2X_VFOP_QFLR_TERMINATE: qstate = &vfop->op_p->qctor.qstate; @@ -1332,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, { struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + /* for non leading queues skip directly to qdown sate */ if (vfop) { vfop->args.qx.qid = qid; - bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, - bnx2x_vfop_qdown, cmd->done); + bnx2x_vfop_opset(qid == LEADING_IDX ? + BNX2X_VFOP_QTEARDOWN_RXMODE : + BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, + cmd->done); return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, cmd->block); } @@ -1488,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) * both known */ static void -bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) +bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { + struct vf_pf_resc_request *resc = &vf->alloc_resc; u16 vlan_count = 0; /* will be set only during VF-ACQUIRE */ resc->num_rxqs = 0; resc->num_txqs = 0; - /* no credit calculcis for macs (just yet) */ + /* no credit calculations for macs (just yet) */ resc->num_mac_filters = 1; /* divvy up vlan rules */ @@ -1508,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) resc->num_mc_filters = 0; /* num_sbs already set */ + resc->num_sbs = vf->sb_count; } /* FLR routines: */ static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) { /* reset the state variables */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); vf->state = VF_FREE; } @@ -1734,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp) /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match * the Pf doorbell size although the 2 are independent. */ - REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, - BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); + REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); /* No security checks for now - * configure single rule (out of 16) mask = 0x1, value = 0x0, @@ -1802,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; u32 val; - u8 fid; + u8 fid, current_pf = 0; /* IGU in normal mode - read CAM */ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { @@ -1810,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) continue; fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); - if (!(fid & IGU_FID_ENCODE_IS_PF)) + if (fid & IGU_FID_ENCODE_IS_PF) + current_pf = fid & IGU_FID_PF_NUM_MASK; + else if (current_pf == BP_ABS_FUNC(bp)) bnx2x_vf_set_igu_info(bp, sb_id, (fid & IGU_FID_VF_NUM_MASK)); - DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : (fid & IGU_FID_VF_NUM_MASK)), sb_id, GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } + DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1885,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) return 0; } -static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) -{ - int i; - u8 queue_count = 0; - - if (IS_SRIOV(bp)) - for_each_vf(bp, i) - queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); - - return queue_count; -} - /* must be called after PF bars are mapped */ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, - int num_vfs_param) + int num_vfs_param) { - int err, i, qcount; + int err, i; struct bnx2x_sriov *iov; struct pci_dev *dev = bp->pdev; @@ -1999,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ bnx2x_get_vf_igu_cam_info(bp); - /* get the total queue count and allocate the global queue arrays */ - qcount = bnx2x_iov_get_max_queue_count(bp); - /* allocate the queue arrays for all VFs */ - bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), - GFP_KERNEL); + bp->vfdb->vfqs = kzalloc( + BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), + GFP_KERNEL); + + DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); + if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -2125,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, q_type); DP(BNX2X_MSG_IOV, - "initialized vf %d's queue object. func id set to %d\n", - vf->abs_vfid, q->sp_obj.func_id); - - /* mac/vlan objects are per queue, but only those - * that belong to the leading queue are initialized - */ - if (vfq_is_leading(q)) { - /* mac */ - bnx2x_init_mac_obj(bp, &q->mac_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, mac_rdata), - bnx2x_vf_sp_map(bp, vf, mac_rdata), - BNX2X_FILTER_MAC_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->macs_pool); - /* vlan */ - bnx2x_init_vlan_obj(bp, &q->vlan_obj, - cl_id, q->cid, func_id, - bnx2x_vf_sp(bp, vf, vlan_rdata), - bnx2x_vf_sp_map(bp, vf, vlan_rdata), - BNX2X_FILTER_VLAN_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX, - &bp->vlans_pool); - - /* mcast */ - bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, - q->cid, func_id, func_id, - bnx2x_vf_sp(bp, vf, mcast_rdata), - bnx2x_vf_sp_map(bp, vf, mcast_rdata), - BNX2X_FILTER_MCAST_PENDING, - &vf->filter_state, - BNX2X_OBJ_TYPE_RX_TX); - - vf->leading_rss = cl_id; - } + "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", + vf->abs_vfid, q->sp_obj.func_id, q->cid); } /* called by bnx2x_nic_load */ int bnx2x_iov_nic_init(struct bnx2x *bp) { - int vfid, qcount, i; + int vfid; if (!IS_SRIOV(bp)) { DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); @@ -2196,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); /* init statically provisioned resources */ - bnx2x_iov_static_resc(bp, &vf->alloc_resc); + bnx2x_iov_static_resc(bp, vf); /* queues are initialized during VF-ACQUIRE */ @@ -2232,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) } /* Final VF init */ - qcount = 0; - for_each_vf(bp, i) { - struct bnx2x_virtf *vf = BP_VF(bp, i); + for_each_vf(bp, vfid) { + struct bnx2x_virtf *vf = BP_VF(bp, vfid); /* fill in the BDF and bars */ - vf->bus = bnx2x_vf_bus(bp, i); - vf->devfn = bnx2x_vf_devfn(bp, i); + vf->bus = bnx2x_vf_bus(bp, vfid); + vf->devfn = bnx2x_vf_devfn(bp, vfid); bnx2x_vf_set_bars(bp, vf); DP(BNX2X_MSG_IOV, @@ -2247,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) (unsigned)vf->bars[0].bar, vf->bars[0].size, (unsigned)vf->bars[1].bar, vf->bars[1].size, (unsigned)vf->bars[2].bar, vf->bars[2].size); - - /* set local queue arrays */ - vf->vfqs = &bp->vfdb->vfqs[qcount]; - qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); } return 0; @@ -2556,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) for_each_vfq(vf, j) { struct bnx2x_vf_queue *rxq = vfq_get(vf, j); + dma_addr_t q_stats_addr = + vf->fw_stat_map + j * vf->stats_stride; + /* collect stats fro active queues only */ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == BNX2X_Q_LOGICAL_STATE_STOPPED) @@ -2563,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) /* create stats query entry for this queue */ cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = vfq_cl_id(vf, rxq); + cur_query_entry->index = vfq_stat_id(vf, rxq); cur_query_entry->funcID = cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); cur_query_entry->address.hi = - cpu_to_le32(U64_HI(vf->fw_stat_map)); + cpu_to_le32(U64_HI(q_stats_addr)); cur_query_entry->address.lo = - cpu_to_le32(U64_LO(vf->fw_stat_map)); + cpu_to_le32(U64_LO(q_stats_addr)); DP(BNX2X_MSG_IOV, "added address %x %x for vf %d queue %d client %d\n", cur_query_entry->address.hi, @@ -2578,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cur_query_entry++; cur_data_offset += sizeof(struct per_queue_stats); stats_count++; + + /* all stats are coalesced to the leading queue */ + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + break; } } bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; @@ -2596,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp) for_each_vf(bp, i) { struct bnx2x_virtf *vf = BP_VF(bp, i); + if (!vf) { + BNX2X_ERR("VF was null! skipping...\n"); + continue; + } + if (!list_empty(&vf->op_list_head) && atomic_read(&vf->op_in_progress)) { DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); @@ -2743,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q = vfq_get(vf, i); if (!q) { - DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); + BNX2X_ERR("q number %d was not allocated\n", i); return -EINVAL; } @@ -2827,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) return 0; } +struct set_vf_state_cookie { + struct bnx2x_virtf *vf; + u8 state; +}; + +void bnx2x_set_vf_state(void *cookie) +{ + struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; + + p->vf->state = p->state; +} + /* VFOP close (teardown the queues, delete mcasts and close HW) */ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) { @@ -2877,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) op_err: BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); op_done: - vf->state = VF_ACQUIRED; + + /* need to make sure there are no outstanding stats ramrods which may + * cause the device to access the VF's stats buffer which it will free + * as soon as we return from the close flow. + */ + { + struct set_vf_state_cookie cookie; + + cookie.vf = vf; + cookie.state = VF_ACQUIRED; + bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); + } + DP(BNX2X_MSG_IOV, "set state to acquired\n"); bnx2x_vfop_end(bp, vf, vfop); } @@ -2947,6 +2944,43 @@ op_done: bnx2x_vfop_end(bp, vf, vfop); } +static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); + enum bnx2x_vfop_rss_state state; + + if (!vfop) { + BNX2X_ERR("vfop was null\n"); + return; + } + + state = vfop->state; + bnx2x_vfop_reset_wq(vf); + + if (vfop->rc < 0) + goto op_err; + + DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); + + switch (state) { + case BNX2X_VFOP_RSS_CONFIG: + /* next state */ + vfop->state = BNX2X_VFOP_RSS_DONE; + bnx2x_config_rss(bp, &vfop->op_p->rss); + bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); +op_err: + BNX2X_ERR("RSS error: rc %d\n", vfop->rc); +op_done: + case BNX2X_VFOP_RSS_DONE: + bnx2x_vfop_end(bp, vf, vfop); + return; + default: + bnx2x_vfop_default(state); + } +op_pending: + return; +} + int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd) @@ -2961,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, return -ENOMEM; } +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd) +{ + struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); + + if (vfop) { + bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, + cmd->done); + return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, + cmd->block); + } + return -ENOMEM; +} + /* VF release ~ VF close + VF release-resources * Release is the ultimate SW shutdown and is called whenever an * irrecoverable error is encountered. @@ -2972,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) .block = block, }; int rc; + + DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); @@ -3000,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { + /* we don't lock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(tlv)) { + BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); + return; + } + /* lock the channel */ mutex_lock(&vf->op_mutex); @@ -3014,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs expected_tlv) { + enum channel_tlvs current_tlv; + + if (!vf) { + BNX2X_ERR("VF was %p\n", vf); + return; + } + + current_tlv = vf->op_current; + + /* we don't unlock the channel for unsupported tlvs */ + if (!bnx2x_tlv_supported(expected_tlv)) + return; + WARN(expected_tlv != vf->op_current, "lock mismatch: expected %d found %d", expected_tlv, vf->op_current); + /* record the locking op */ + vf->op_current = CHANNEL_TLV_NONE; + /* lock the channel */ mutex_unlock(&vf->op_mutex); /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", vf->abs_vfid, vf->op_current); - - /* record the locking op */ - vf->op_current = CHANNEL_TLV_NONE; } int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) @@ -3057,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) return bnx2x_enable_sriov(bp); } } +#define IGU_ENTRY_SIZE 4 int bnx2x_enable_sriov(struct bnx2x *bp) { int rc = 0, req_vfs = bp->requested_nr_virtfn; + int vf_idx, sb_idx, vfq_idx, qcount, first_vf; + u32 igu_entry, address; + u16 num_vf_queues; + + if (req_vfs == 0) + return 0; + + first_vf = bp->vfdb->sriov.first_vf_in_pf; + + /* statically distribute vf sb pool between VFs */ + num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, + BP_VFDB(bp)->vf_sbs_pool / req_vfs); + + /* zero previous values learned from igu cam */ + for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + vf->sb_count = 0; + vf_sb_count(BP_VF(bp, vf_idx)) = 0; + } + bp->vfdb->vf_sbs_pool = 0; + + /* prepare IGU cam */ + sb_idx = BP_VFDB(bp)->first_vf_igu_entry; + address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { + igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | + vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | + IGU_REG_MAPPING_MEMORY_VALID; + DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", + sb_idx, vf_idx); + REG_WR(bp, address, igu_entry); + sb_idx++; + address += IGU_ENTRY_SIZE; + } + } + + /* Reinitialize vf database according to igu cam */ + bnx2x_get_vf_igu_cam_info(bp); + DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", + BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); + + qcount = 0; + for_each_vf(bp, vf_idx) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + + /* set local queue arrays */ + vf->vfqs = &bp->vfdb->vfqs[qcount]; + qcount += vf_sb_count(vf); + } + + /* prepare msix vectors in VF configuration space */ + for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { + bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); + REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, + num_vf_queues); + } + bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); + + /* enable sriov. This will probe all the VFs, and consequentially cause + * the "acquire" messages to appear on the VF PF channel. + */ + DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); + pci_disable_sriov(bp->pdev); rc = pci_enable_sriov(bp->pdev, req_vfs); if (rc) { BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); @@ -3089,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp) pci_disable_sriov(bp->pdev); } -static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, - struct bnx2x_virtf **vf, - struct pf_vf_bulletin_content **bulletin) +int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, + struct pf_vf_bulletin_content **bulletin) { if (bp->state != BNX2X_STATE_OPEN) { BNX2X_ERR("vf ndo called though PF is down\n"); @@ -3114,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, *bulletin = BP_VF_BULLETIN(bp, vfidx); if (!*vf) { - BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", + BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", + vfidx); + return -EINVAL; + } + + if (!(*vf)->vfqs) { + BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", vfidx); return -EINVAL; } @@ -3142,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); if (rc) return rc; - mac_obj = &bnx2x_vfq(vf, 0, mac_obj); - vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); + mac_obj = &bnx2x_leading_vfq(vf, mac_obj); + vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); if (!mac_obj || !vlan_obj) { BNX2X_ERR("VF partially initialized\n"); return -EINVAL; @@ -3155,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx, ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ - mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, - 0, ETH_ALEN); - vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, - 0, VLAN_HLEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) + mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, + 0, ETH_ALEN); + if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) + vlan_obj->get_n_elements(bp, vlan_obj, 1, + (u8 *)&ivi->vlan, 0, + VLAN_HLEN); } else { /* mac */ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) @@ -3226,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) return rc; } - /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the mac in device on this vf's queue */ unsigned long ramrod_flags = 0; - struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); + struct bnx2x_vlan_mac_obj *mac_obj = + &bnx2x_leading_vfq(vf, mac_obj); + + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; /* must lock vfpf channel to protect against vf flows */ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); @@ -3293,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) /* is vf initialized and queue set up? */ q_logical_state = - bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); + bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); if (vf->state == VF_ENABLED && q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { /* configure the vlan in device on this vf's queue */ unsigned long ramrod_flags = 0; unsigned long vlan_mac_flags = 0; struct bnx2x_vlan_mac_obj *vlan_obj = - &bnx2x_vfq(vf, 0, vlan_obj); + &bnx2x_leading_vfq(vf, vlan_obj); struct bnx2x_vlan_mac_ramrod_params ramrod_param; struct bnx2x_queue_state_params q_params = {NULL}; struct bnx2x_queue_update_params *update_params; + rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); + if (rc) + return rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* must lock vfpf channel to protect against vf flows */ @@ -3324,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); q_params.cmd = BNX2X_Q_CMD_UPDATE; - q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); + q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); update_params = &q_params.params.update; __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, &update_params->update_flags); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index d143a7cdbbb..059f0d460af 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -81,6 +81,7 @@ struct bnx2x_vf_queue { u32 cid; u16 index; u16 sb_idx; + bool is_leading; }; /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: @@ -194,6 +195,7 @@ struct bnx2x_virtf { #define VF_CFG_INT_SIMD 0x0008 #define VF_CACHE_LINE 0x0010 #define VF_CFG_VLAN 0x0020 +#define VF_CFG_STATS_COALESCE 0x0040 u8 state; #define VF_FREE 0 /* VF ready to be acquired holds no resc */ @@ -213,6 +215,7 @@ struct bnx2x_virtf { /* dma */ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ + u16 stats_stride; dma_addr_t spq_map; dma_addr_t bulletin_map; @@ -239,7 +242,10 @@ struct bnx2x_virtf { u8 igu_base_id; /* base igu status block id */ struct bnx2x_vf_queue *vfqs; -#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define LEADING_IDX 0 +#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) +#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) +#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) u8 index; /* index in the vf array */ u8 abs_vfid; @@ -358,6 +364,10 @@ struct bnx2x_vf_sp { struct client_init_ramrod_data init_data; struct client_update_ramrod_data update_data; } q_data; + + union { + struct eth_rss_update_ramrod_data e2; + } rss_rdata; }; struct hw_dma { @@ -403,6 +413,10 @@ struct bnx2x_vfdb { #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) u32 flrd_vfs[FLRD_VFS_DWORDS]; + + /* the number of msix vectors belonging to this PF designated for VFs */ + u16 vf_sbs_pool; + u16 first_vf_igu_entry; }; /* queue access */ @@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) return &(vf->vfqs[index]); } -static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq) -{ - return (vfq->index == 0); -} - /* FW ids */ static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) { @@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) { - return vfq_cl_id(vf, q); + if (vf->cfg_flags & VF_CFG_STATS_COALESCE) + return vf->leading_rss; + else + return vfq_cl_id(vf, q); } static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) @@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vfop_cmd *cmd); +int bnx2x_vfop_rss_cmd(struct bnx2x *bp, + struct bnx2x_virtf *vf, + struct bnx2x_vfop_cmd *cmd); + /* VF release ~ VF close + VF release-resources * * Release is the ultimate SW shutdown and is called whenever an @@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_release(struct bnx2x *bp); int bnx2x_vfpf_init(struct bnx2x *bp); void bnx2x_vfpf_close_vf(struct bnx2x *bp); -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx); +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading); int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx); int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params); int bnx2x_vfpf_set_mcast(struct net_device *dev); int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); @@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp); static inline int bnx2x_vf_headroom(struct bnx2x *bp) { - return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; + return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; } void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); @@ -793,10 +812,12 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} -static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; } +static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; } static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) {return 0; } +static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) {return 0; } static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index d63d1327b05..86436c77af0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) /* should be called under stats_sema */ static void __bnx2x_stats_start(struct bnx2x *bp) { - /* vfs travel through here as part of the statistics FSM, but no action - * is required - */ - if (IS_VF(bp)) - return; - - if (bp->port.pmf) - bnx2x_port_stats_init(bp); + if (IS_PF(bp)) { + if (bp->port.pmf) + bnx2x_port_stats_init(bp); - else if (bp->func_stx) - bnx2x_func_stats_init(bp); + else if (bp->func_stx) + bnx2x_func_stats_init(bp); - bnx2x_hw_stats_post(bp); - bnx2x_storm_stats_post(bp); + bnx2x_hw_stats_post(bp); + bnx2x_storm_stats_post(bp); + } bp->stats_started = true; } @@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, estats->mac_discard); } } + +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie){ + if (down_timeout(&bp->stats_sema, HZ/10)) + BNX2X_ERR("Unable to acquire stats lock\n"); + bnx2x_stats_comp(bp); + func_to_exec(cookie); + __bnx2x_stats_start(bp); + up(&bp->stats_sema); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 853824d258e..f35845006cd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h @@ -539,6 +539,9 @@ struct bnx2x; void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); +void bnx2x_stats_safe_exec(struct bnx2x *bp, + void (func_to_exec)(void *cookie), + void *cookie); /** * bnx2x_save_statistics - save statistics when unloading. diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 2088063151d..6cfb8873245 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) /* humble our request */ req->resc_request.num_txqs = - bp->acquire_resp.resc.num_txqs; + min(req->resc_request.num_txqs, + bp->acquire_resp.resc.num_txqs); req->resc_request.num_rxqs = - bp->acquire_resp.resc.num_rxqs; + min(req->resc_request.num_rxqs, + bp->acquire_resp.resc.num_rxqs); req->resc_request.num_sbs = - bp->acquire_resp.resc.num_sbs; + min(req->resc_request.num_sbs, + bp->acquire_resp.resc.num_sbs); req->resc_request.num_mac_filters = - bp->acquire_resp.resc.num_mac_filters; + min(req->resc_request.num_mac_filters, + bp->acquire_resp.resc.num_mac_filters); req->resc_request.num_vlan_filters = - bp->acquire_resp.resc.num_vlan_filters; + min(req->resc_request.num_vlan_filters, + bp->acquire_resp.resc.num_vlan_filters); req->resc_request.num_mc_filters = - bp->acquire_resp.resc.num_mc_filters; + min(req->resc_request.num_mc_filters, + bp->acquire_resp.resc.num_mc_filters); /* Clear response buffer */ memset(&bp->vf2pf_mbox->resp, 0, @@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count) bp->common.flash_size = 0; bp->flags |= NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG; - bp->igu_sb_cnt = 1; + bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs; bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id; strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver, sizeof(bp->fw_ver)); @@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp) req->stats_addr = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); + req->stats_stride = sizeof(struct per_queue_stats); + /* add list termination tlv */ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -452,12 +460,60 @@ free_irq: bnx2x_free_irq(bp); } +static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_queue *q) +{ + u8 cl_id = vfq_cl_id(vf, q); + u8 func_id = FW_VF_HANDLE(vf->abs_vfid); + + /* mac */ + bnx2x_init_mac_obj(bp, &q->mac_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, mac_rdata), + bnx2x_vf_sp_map(bp, vf, mac_rdata), + BNX2X_FILTER_MAC_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->macs_pool); + /* vlan */ + bnx2x_init_vlan_obj(bp, &q->vlan_obj, + cl_id, q->cid, func_id, + bnx2x_vf_sp(bp, vf, vlan_rdata), + bnx2x_vf_sp_map(bp, vf, vlan_rdata), + BNX2X_FILTER_VLAN_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX, + &bp->vlans_pool); + + /* mcast */ + bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, + q->cid, func_id, func_id, + bnx2x_vf_sp(bp, vf, mcast_rdata), + bnx2x_vf_sp_map(bp, vf, mcast_rdata), + BNX2X_FILTER_MCAST_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + /* rss */ + bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, + func_id, func_id, + bnx2x_vf_sp(bp, vf, rss_rdata), + bnx2x_vf_sp_map(bp, vf, rss_rdata), + BNX2X_FILTER_RSS_CONF_PENDING, + &vf->filter_state, + BNX2X_OBJ_TYPE_RX_TX); + + vf->leading_rss = cl_id; + q->is_leading = true; +} + /* ask the pf to open a queue for the vf */ -int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) +int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, + bool is_leading) { struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; - struct bnx2x_fastpath *fp = &bp->fp[fp_idx]; + u8 fp_idx = fp->index; u16 tpa_agg_size = 0, flags = 0; int rc; @@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) tpa_agg_size = TPA_AGG_SIZE; } + if (is_leading) + flags |= VFPF_QUEUE_FLG_LEADING_RSS; + /* calculate queue flags */ flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; @@ -646,6 +705,71 @@ out: return 0; } +/* request pf to config rss table for vf queues*/ +int bnx2x_vfpf_config_rss(struct bnx2x *bp, + struct bnx2x_config_rss_params *params) +{ + struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; + struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; + int rc = 0; + + /* clear mailbox and prep first tlv */ + bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, + sizeof(*req)); + + /* add list termination tlv */ + bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); + req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; + req->rss_key_size = T_ETH_RSS_KEY; + req->rss_result_mask = params->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED)) + req->rss_flags |= VFPF_RSS_MODE_DISABLED; + if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR)) + req->rss_flags |= VFPF_RSS_MODE_REGULAR; + if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH)) + req->rss_flags |= VFPF_RSS_SET_SRCH; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4)) + req->rss_flags |= VFPF_RSS_IPV4; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP)) + req->rss_flags |= VFPF_RSS_IPV4_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP)) + req->rss_flags |= VFPF_RSS_IPV4_UDP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6)) + req->rss_flags |= VFPF_RSS_IPV6; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP)) + req->rss_flags |= VFPF_RSS_IPV6_TCP; + if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP)) + req->rss_flags |= VFPF_RSS_IPV6_UDP; + + DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); + + /* output tlvs list */ + bnx2x_dp_tlv_list(bp, req); + + /* send message to pf */ + rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); + if (rc) { + BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); + goto out; + } + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n", + resp->hdr.status); + rc = -EINVAL; + } +out: + bnx2x_vfpf_finalize(bp, &req->first_tlv); + + return 0; +} + int bnx2x_vfpf_set_mcast(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, /* fill in pfdev info */ resp->pfdev_info.chip_num = bp->common.chip_id; - resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT); + resp->pfdev_info.db_size = bp->db_size; resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); @@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, /* record ghost addresses from vf message */ vf->spq_map = init->spq_addr; vf->fw_stat_map = init->stats_addr; + vf->stats_stride = init->stats_stride; vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr); + /* set VF multiqueue statistics collection mode */ + if (init->flags & VFPF_INIT_FLG_STATS_COALESCE) + vf->cfg_flags |= VF_CFG_STATS_COALESCE; + /* response */ bnx2x_vf_mbx_resp(bp, vf); } @@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags, __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); + if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS) + __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags); /* outer vlan removal is set according to PF's multi function mode */ if (IS_MF_SD(bp)) @@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_queue_init_params *init_p; struct bnx2x_queue_setup_params *setup_p; + if (bnx2x_vfq_is_leading(q)) + bnx2x_leading_vfq_init(bp, vf, q); + /* re-init the VF operation context */ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor)); setup_p = &vf->op_params.qctor.prep_qsetup; @@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_mbx_resp(bp, vf); } +static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf, + struct bnx2x_vf_mbx *mbx) +{ + struct bnx2x_vfop_cmd cmd = { + .done = bnx2x_vf_mbx_resp, + .block = false, + }; + struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss; + struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; + + if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE || + rss_tlv->rss_key_size != T_ETH_RSS_KEY) { + BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n", + vf->index); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + /* set vfop params according to rss tlv */ + memcpy(vf_op_params->ind_table, rss_tlv->ind_table, + T_ETH_INDIRECTION_TABLE_SIZE); + memcpy(vf_op_params->rss_key, rss_tlv->rss_key, + sizeof(rss_tlv->rss_key)); + vf_op_params->rss_obj = &vf->rss_conf_obj; + vf_op_params->rss_result_mask = rss_tlv->rss_result_mask; + + /* flags handled individually for backward/forward compatability */ + if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED) + __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR) + __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH) + __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4) + __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) + __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) + __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6) + __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) + __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags); + if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP) + __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags); + + if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) || + (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) && + rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) { + BNX2X_ERR("about to hit a FW assert. aborting...\n"); + vf->op_rc = -EINVAL; + goto mbx_resp; + } + + vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd); + +mbx_resp: + if (vf->op_rc) + bnx2x_vf_mbx_resp(bp, vf); +} + /* dispatch request */ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, struct bnx2x_vf_mbx *mbx) @@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, case CHANNEL_TLV_RELEASE: bnx2x_vf_mbx_release_vf(bp, vf, mbx); break; + case CHANNEL_TLV_UPDATE_RSS: + bnx2x_vf_mbx_update_rss(bp, vf, mbx); + break; } } else { @@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, /* test whether we can respond to the VF (do we have an address * for it?) */ - if (vf->state == VF_ACQUIRED) { + if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) { /* mbx_resp uses the op_rc of the VF */ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h index f3ad174a3a6..1179fe06d0c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h @@ -51,6 +51,7 @@ struct hw_sb_info { #define VFPF_QUEUE_FLG_COS 0x0080 #define VFPF_QUEUE_FLG_HC 0x0100 #define VFPF_QUEUE_FLG_DHC 0x0200 +#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400 #define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0) #define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1) @@ -131,6 +132,27 @@ struct vfpf_q_op_tlv { u8 padding[3]; }; +/* receive side scaling tlv */ +struct vfpf_rss_tlv { + struct vfpf_first_tlv first_tlv; + u32 rss_flags; +#define VFPF_RSS_MODE_DISABLED (1 << 0) +#define VFPF_RSS_MODE_REGULAR (1 << 1) +#define VFPF_RSS_SET_SRCH (1 << 2) +#define VFPF_RSS_IPV4 (1 << 3) +#define VFPF_RSS_IPV4_TCP (1 << 4) +#define VFPF_RSS_IPV4_UDP (1 << 5) +#define VFPF_RSS_IPV6 (1 << 6) +#define VFPF_RSS_IPV6_TCP (1 << 7) +#define VFPF_RSS_IPV6_UDP (1 << 8) + u8 rss_result_mask; + u8 ind_table_size; + u8 rss_key_size; + u8 padding; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY]; /* hash values */ +}; + /* acquire response tlv - carries the allocated resources */ struct pfvf_acquire_resp_tlv { struct pfvf_tlv hdr; @@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv { } resc; }; +#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues + * stats will be coalesced on + * the leading RSS queue + */ + /* Init VF */ struct vfpf_init_tlv { struct vfpf_first_tlv first_tlv; aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */ aligned_u64 spq_addr; aligned_u64 stats_addr; + u16 stats_stride; + u32 flags; + u32 padding[2]; }; /* Setup Queue */ @@ -293,13 +323,14 @@ union vfpf_tlvs { struct vfpf_q_op_tlv q_op; struct vfpf_setup_q_tlv setup_q; struct vfpf_set_q_filters_tlv set_q_filters; - struct vfpf_release_tlv release; - struct channel_list_end_tlv list_end; + struct vfpf_release_tlv release; + struct vfpf_rss_tlv update_rss; + struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; union pfvf_tlvs { - struct pfvf_general_resp_tlv general_resp; + struct pfvf_general_resp_tlv general_resp; struct pfvf_acquire_resp_tlv acquire_resp; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; @@ -355,14 +386,18 @@ enum channel_tlvs { CHANNEL_TLV_INIT, CHANNEL_TLV_SETUP_Q, CHANNEL_TLV_SET_Q_FILTERS, + CHANNEL_TLV_ACTIVATE_Q, + CHANNEL_TLV_DEACTIVATE_Q, CHANNEL_TLV_TEARDOWN_Q, CHANNEL_TLV_CLOSE, CHANNEL_TLV_RELEASE, + CHANNEL_TLV_UPDATE_RSS_DEPRECATED, CHANNEL_TLV_PF_RELEASE_VF, CHANNEL_TLV_LIST_END, CHANNEL_TLV_FLR, CHANNEL_TLV_PF_SET_MAC, CHANNEL_TLV_PF_SET_VLAN, + CHANNEL_TLV_UPDATE_RSS, CHANNEL_TLV_MAX }; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2e55ee29cf1..5701f3d1a16 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp) return false; } +static bool tg3_phy_led_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5719: + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) } return; } else if (do_low_power) { - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_FORCE_LED_OFF); + if (!tg3_phy_led_bug(tp)) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_FORCE_LED_OFF); val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index 7cb148c495c..78d6d6b970e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c @@ -353,11 +353,9 @@ struct xgmac_extra_stats { /* Receive errors */ unsigned long rx_watchdog; unsigned long rx_da_filter_fail; - unsigned long rx_sa_filter_fail; unsigned long rx_payload_error; unsigned long rx_ip_header_error; /* Tx/Rx IRQ errors */ - unsigned long tx_undeflow; unsigned long tx_process_stopped; unsigned long rx_buf_unav; unsigned long rx_process_stopped; @@ -393,6 +391,7 @@ struct xgmac_priv { char rx_pause; char tx_pause; int wolopts; + struct work_struct tx_timeout_work; }; /* XGMAC Configuration Settings */ @@ -409,6 +408,9 @@ struct xgmac_priv { #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) +#define tx_dma_ring_space(p) \ + dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ) + /* XGMAC Descriptor Access Helpers */ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) { @@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) static inline int desc_get_buf_len(struct xgmac_dma_desc *p) { - u32 len = cpu_to_le32(p->flags); + u32 len = le32_to_cpu(p->buf_size); return (len & DESC_BUFFER1_SZ_MASK) + ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); } @@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags) p->flags = cpu_to_le32(tmpflags); } +static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p) +{ + u32 tmpflags = le32_to_cpu(p->flags); + tmpflags &= TXDESC_END_RING; + p->flags = cpu_to_le32(tmpflags); +} + static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) { return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; } +static inline int desc_get_tx_fs(struct xgmac_dma_desc *p) +{ + return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG; +} + static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) { return le32_to_cpu(p->buf1_addr); @@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr, { u32 data; - data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); - writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); - data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; - writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + if (addr) { + data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); + writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + XGMAC_ADDR_LOW(num)); + } else { + writel(0, ioaddr + XGMAC_ADDR_HIGH(num)); + writel(0, ioaddr + XGMAC_ADDR_LOW(num)); + } } static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, @@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) if (unlikely(skb == NULL)) break; - priv->rx_skbuff[entry] = skb; paddr = dma_map_single(priv->device, skb->data, - bufsz, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->device, paddr)) { + dev_kfree_skb_any(skb); + break; + } + priv->rx_skbuff[entry] = skb; desc_set_buf_addr(p, paddr, priv->dma_buf_sz); } @@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv) return; for (i = 0; i < DMA_RX_RING_SZ; i++) { - if (priv->rx_skbuff[i] == NULL) + struct sk_buff *skb = priv->rx_skbuff[i]; + if (skb == NULL) continue; p = priv->dma_rx + i; dma_unmap_single(priv->device, desc_get_buf_addr(p), - priv->dma_buf_sz, DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); priv->rx_skbuff[i] = NULL; } } static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) { - int i, f; + int i; struct xgmac_dma_desc *p; if (!priv->tx_skbuff) @@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) continue; p = priv->dma_tx + i; - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { - p = priv->dma_tx + i++; + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); - } - dev_kfree_skb_any(priv->tx_skbuff[i]); + if (desc_get_tx_ls(p)) + dev_kfree_skb_any(priv->tx_skbuff[i]); priv->tx_skbuff[i] = NULL; } } @@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv) */ static void xgmac_tx_complete(struct xgmac_priv *priv) { - int i; - while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { unsigned int entry = priv->tx_tail; struct sk_buff *skb = priv->tx_skbuff[entry]; @@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) if (desc_get_owner(p)) break; - /* Verify tx error by looking at the last segment */ - if (desc_get_tx_ls(p)) - desc_get_tx_status(priv, p); - netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", priv->tx_head, priv->tx_tail); - dma_unmap_single(priv->device, desc_get_buf_addr(p), - desc_get_buf_len(p), DMA_TO_DEVICE); - - priv->tx_skbuff[entry] = NULL; - priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); - - if (!skb) { - continue; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - entry = priv->tx_tail = dma_ring_incr(priv->tx_tail, - DMA_TX_RING_SZ); - p = priv->dma_tx + priv->tx_tail; - + if (desc_get_tx_fs(p)) + dma_unmap_single(priv->device, desc_get_buf_addr(p), + desc_get_buf_len(p), DMA_TO_DEVICE); + else dma_unmap_page(priv->device, desc_get_buf_addr(p), desc_get_buf_len(p), DMA_TO_DEVICE); + + /* Check tx error on the last segment */ + if (desc_get_tx_ls(p)) { + desc_get_tx_status(priv, p); + dev_kfree_skb(skb); } - dev_kfree_skb(skb); + priv->tx_skbuff[entry] = NULL; + priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); } - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > - MAX_SKB_FRAGS) + /* Ensure tx_tail is visible to xgmac_xmit */ + smp_mb(); + if (unlikely(netif_queue_stopped(priv->dev) && + (tx_dma_ring_space(priv) > MAX_SKB_FRAGS))) netif_wake_queue(priv->dev); } -/** - * xgmac_tx_err: - * @priv: pointer to the private device structure - * Description: it cleans the descriptors and restarts the transmission - * in case of errors. - */ -static void xgmac_tx_err(struct xgmac_priv *priv) +static void xgmac_tx_timeout_work(struct work_struct *work) { - u32 reg, value, inten; + u32 reg, value; + struct xgmac_priv *priv = + container_of(work, struct xgmac_priv, tx_timeout_work); - netif_stop_queue(priv->dev); + napi_disable(&priv->napi); - inten = readl(priv->base + XGMAC_DMA_INTR_ENA); writel(0, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_lock(priv->dev); + reg = readl(priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); do { @@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv) writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, priv->base + XGMAC_DMA_STATUS); - writel(inten, priv->base + XGMAC_DMA_INTR_ENA); + netif_tx_unlock(priv->dev); netif_wake_queue(priv->dev); + + napi_enable(&priv->napi); + + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); } static int xgmac_hw_init(struct net_device *dev) @@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev) DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; writel(value, ioaddr + XGMAC_DMA_BUS_MODE); - /* Enable interrupts */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); - writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + writel(0, ioaddr + XGMAC_DMA_INTR_ENA); /* Mask power mgt interrupt */ writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); @@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev) napi_enable(&priv->napi); netif_start_queue(dev); + /* Enable interrupts */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); + return 0; } @@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, paddr)) { dev_kfree_skb(skb); - return -EIO; + return NETDEV_TX_OK; } priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); @@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) paddr = skb_frag_dma_map(priv->device, frag, 0, len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, paddr)) { - dev_kfree_skb(skb); - return -EIO; - } + if (dma_mapping_error(priv->device, paddr)) + goto dma_err; entry = dma_ring_incr(entry, DMA_TX_RING_SZ); desc = priv->dma_tx + entry; - priv->tx_skbuff[entry] = NULL; + priv->tx_skbuff[entry] = skb; desc_set_buf_addr_and_size(desc, paddr, len); if (i < (nfrags - 1)) @@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); + writel(1, priv->base + XGMAC_DMA_TX_POLL); + priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); - writel(1, priv->base + XGMAC_DMA_TX_POLL); - if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < - MAX_SKB_FRAGS) + /* Ensure tx_head update is visible to tx completion */ + smp_mb(); + if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) { netif_stop_queue(dev); + /* Ensure netif_stop_queue is visible to tx completion */ + smp_mb(); + if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS) + netif_start_queue(dev); + } + return NETDEV_TX_OK; +dma_err: + entry = priv->tx_head; + for ( ; i > 0; i--) { + entry = dma_ring_incr(entry, DMA_TX_RING_SZ); + desc = priv->dma_tx + entry; + priv->tx_skbuff[entry] = NULL; + dma_unmap_page(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + desc_clear_tx_owner(desc); + } + desc = first; + dma_unmap_single(priv->device, desc_get_buf_addr(desc), + desc_get_buf_len(desc), DMA_TO_DEVICE); + dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) skb_put(skb, frame_len); dma_unmap_single(priv->device, desc_get_buf_addr(p), - frame_len, DMA_FROM_DEVICE); + priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, priv->dev); skb->ip_summed = ip_checksum; @@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget) static void xgmac_tx_timeout(struct net_device *dev) { struct xgmac_priv *priv = netdev_priv(dev); - - /* Clear Tx resources and restart transmitting again */ - xgmac_tx_err(priv); + schedule_work(&priv->tx_timeout_work); } /** @@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { use_hash = true; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; + } else { + use_hash = false; } netdev_for_each_mc_addr(ha, dev) { if (use_hash) { @@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev) } out: + for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++) + xgmac_set_mac_addr(ioaddr, NULL, reg); for (i = 0; i < XGMAC_NUM_HASH; i++) writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); @@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id) static irqreturn_t xgmac_interrupt(int irq, void *dev_id) { u32 intr_status; - bool tx_err = false; struct net_device *dev = (struct net_device *)dev_id; struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_extra_stats *x = &priv->xstats; @@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id) if (intr_status & DMA_STATUS_TPS) { netdev_err(priv->dev, "transmit process stopped\n"); x->tx_process_stopped++; - tx_err = true; + schedule_work(&priv->tx_timeout_work); } if (intr_status & DMA_STATUS_FBI) { netdev_err(priv->dev, "fatal bus error\n"); x->fatal_bus_error++; - tx_err = true; } - - if (tx_err) - xgmac_tx_err(priv); } /* TX/RX NORMAL interrupts */ @@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = { XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_da_filter_fail), - XGMAC_STAT(rx_sa_filter_fail), XGMAC_STAT(fatal_bus_error), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), @@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev) ndev->netdev_ops = &xgmac_netdev_ops; SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); spin_lock_init(&priv->stats_lock); + INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work); priv->device = &pdev->dev; priv->dev = ndev; @@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev) if (device_can_wakeup(priv->device)) priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ - ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 4058b856eb7..76ae09999b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1157,7 +1157,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new, */ void *cxgb_alloc_mem(unsigned long size) { - void *p = kzalloc(size, GFP_KERNEL); + void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!p) p = vzalloc(size); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2aafb809e06..dfd1e36f575 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -576,6 +576,7 @@ struct adapter { struct l2t_data *l2t; void *uld_handle[CXGB4_ULD_MAX]; struct list_head list_node; + struct list_head rcu_node; struct tid_info tids; void **tid_release_head; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5a3256b083f..0d0665ca6f1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -60,6 +60,7 @@ #include <linux/workqueue.h> #include <net/neighbour.h> #include <net/netevent.h> +#include <net/addrconf.h> #include <asm/uaccess.h> #include "cxgb4.h" @@ -68,6 +69,11 @@ #include "t4fw_api.h" #include "l2t.h" +#include <../drivers/net/bonding/bonding.h> + +#ifdef DRV_VERSION +#undef DRV_VERSION +#endif #define DRV_VERSION "2.0.0-ko" #define DRV_DESC "Chelsio T4/T5 Network Driver" @@ -400,6 +406,9 @@ static struct dentry *cxgb4_debugfs_root; static LIST_HEAD(adapter_list); static DEFINE_MUTEX(uld_mutex); +/* Adapter list to be accessed from atomic context */ +static LIST_HEAD(adap_rcu_list); +static DEFINE_SPINLOCK(adap_rcu_lock); static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; static const char *uld_str[] = { "RDMA", "iSCSI" }; @@ -1133,7 +1142,7 @@ out: release_firmware(fw); */ void *t4_alloc_mem(size_t size) { - void *p = kzalloc(size, GFP_KERNEL); + void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!p) p = vzalloc(size); @@ -3227,6 +3236,38 @@ static int tid_init(struct tid_info *t) return 0; } +static int cxgb4_clip_get(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap; + struct fw_clip_cmd c; + + adap = netdev2adap(dev); + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); + *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); + *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + +static int cxgb4_clip_release(const struct net_device *dev, + const struct in6_addr *lip) +{ + struct adapter *adap; + struct fw_clip_cmd c; + + adap = netdev2adap(dev); + memset(&c, 0, sizeof(c)); + c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); + *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr); + *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8); + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); +} + /** * cxgb4_create_server - create an IP server * @dev: the device @@ -3246,6 +3287,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req *req; + int ret; skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) @@ -3263,10 +3305,78 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid, req->opt0 = cpu_to_be64(TX_CHAN(chan)); req->opt1 = cpu_to_be64(CONN_POLICY_ASK | SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); - return t4_mgmt_tx(adap, skb); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); } EXPORT_SYMBOL(cxgb4_create_server); +/* cxgb4_create_server6 - create an IPv6 server + * @dev: the device + * @stid: the server TID + * @sip: local IPv6 address to bind server to + * @sport: the server's TCP port + * @queue: queue to direct messages from this server to + * + * Create an IPv6 server for the given port and address. + * Returns <0 on error and one of the %NET_XMIT_* values on success. + */ +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue) +{ + unsigned int chan; + struct sk_buff *skb; + struct adapter *adap; + struct cpl_pass_open_req6 *req; + int ret; + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + adap = netdev2adap(dev); + req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); + req->local_port = sport; + req->peer_port = htons(0); + req->local_ip_hi = *(__be64 *)(sip->s6_addr); + req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); + req->peer_ip_hi = cpu_to_be64(0); + req->peer_ip_lo = cpu_to_be64(0); + chan = rxq_to_chan(&adap->sge, queue); + req->opt0 = cpu_to_be64(TX_CHAN(chan)); + req->opt1 = cpu_to_be64(CONN_POLICY_ASK | + SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_create_server6); + +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6) +{ + struct sk_buff *skb; + struct adapter *adap; + struct cpl_close_listsvr_req *req; + int ret; + + adap = netdev2adap(dev); + + skb = alloc_skb(sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); + req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : + LISTSVR_IPV6(0)) | QUEUENO(queue)); + ret = t4_mgmt_tx(adap, skb); + return net_xmit_eval(ret); +} +EXPORT_SYMBOL(cxgb4_remove_server); + /** * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU * @mtus: the HW MTU table @@ -3721,6 +3831,10 @@ static void attach_ulds(struct adapter *adap) { unsigned int i; + spin_lock(&adap_rcu_lock); + list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); + spin_unlock(&adap_rcu_lock); + mutex_lock(&uld_mutex); list_add_tail(&adap->list_node, &adapter_list); for (i = 0; i < CXGB4_ULD_MAX; i++) @@ -3746,6 +3860,10 @@ static void detach_ulds(struct adapter *adap) netevent_registered = false; } mutex_unlock(&uld_mutex); + + spin_lock(&adap_rcu_lock); + list_del_rcu(&adap->rcu_node); + spin_unlock(&adap_rcu_lock); } static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) @@ -3809,6 +3927,168 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) } EXPORT_SYMBOL(cxgb4_unregister_uld); +/* Check if netdev on which event is occured belongs to us or not. Return + * suceess (1) if it belongs otherwise failure (0). + */ +static int cxgb4_netdev(struct net_device *netdev) +{ + struct adapter *adap; + int i; + + spin_lock(&adap_rcu_lock); + list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) + for (i = 0; i < MAX_NPORTS; i++) + if (adap->port[i] == netdev) { + spin_unlock(&adap_rcu_lock); + return 1; + } + spin_unlock(&adap_rcu_lock); + return 0; +} + +static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, + unsigned long event) +{ + int ret = NOTIFY_DONE; + + rcu_read_lock(); + if (cxgb4_netdev(event_dev)) { + switch (event) { + case NETDEV_UP: + ret = cxgb4_clip_get(event_dev, + (const struct in6_addr *)ifa->addr.s6_addr); + if (ret < 0) { + rcu_read_unlock(); + return ret; + } + ret = NOTIFY_OK; + break; + case NETDEV_DOWN: + cxgb4_clip_release(event_dev, + (const struct in6_addr *)ifa->addr.s6_addr); + ret = NOTIFY_OK; + break; + default: + break; + } + } + rcu_read_unlock(); + return ret; +} + +static int cxgb4_inet6addr_handler(struct notifier_block *this, + unsigned long event, void *data) +{ + struct inet6_ifaddr *ifa = data; + struct net_device *event_dev; + int ret = NOTIFY_DONE; + struct bonding *bond = netdev_priv(ifa->idev->dev); + struct slave *slave; + struct pci_dev *first_pdev = NULL; + + if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) { + event_dev = vlan_dev_real_dev(ifa->idev->dev); + ret = clip_add(event_dev, ifa, event); + } else if (ifa->idev->dev->flags & IFF_MASTER) { + /* It is possible that two different adapters are bonded in one + * bond. We need to find such different adapters and add clip + * in all of them only once. + */ + read_lock(&bond->lock); + bond_for_each_slave(bond, slave) { + if (!first_pdev) { + ret = clip_add(slave->dev, ifa, event); + /* If clip_add is success then only initialize + * first_pdev since it means it is our device + */ + if (ret == NOTIFY_OK) + first_pdev = to_pci_dev( + slave->dev->dev.parent); + } else if (first_pdev != + to_pci_dev(slave->dev->dev.parent)) + ret = clip_add(slave->dev, ifa, event); + } + read_unlock(&bond->lock); + } else + ret = clip_add(ifa->idev->dev, ifa, event); + + return ret; +} + +static struct notifier_block cxgb4_inet6addr_notifier = { + .notifier_call = cxgb4_inet6addr_handler +}; + +/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with + * a physical device. + * The physical device reference is needed to send the actul CLIP command. + */ +static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) +{ + struct inet6_dev *idev = NULL; + struct inet6_ifaddr *ifa; + int ret = 0; + + idev = __in6_dev_get(root_dev); + if (!idev) + return ret; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + ret = cxgb4_clip_get(dev, + (const struct in6_addr *)ifa->addr.s6_addr); + if (ret < 0) + break; + } + read_unlock_bh(&idev->lock); + + return ret; +} + +static int update_root_dev_clip(struct net_device *dev) +{ + struct net_device *root_dev = NULL; + int i, ret = 0; + + /* First populate the real net device's IPv6 addresses */ + ret = update_dev_clip(dev, dev); + if (ret) + return ret; + + /* Parse all bond and vlan devices layered on top of the physical dev */ + for (i = 0; i < VLAN_N_VID; i++) { + root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i); + if (!root_dev) + continue; + + ret = update_dev_clip(root_dev, dev); + if (ret) + break; + } + return ret; +} + +static void update_clip(const struct adapter *adap) +{ + int i; + struct net_device *dev; + int ret; + + rcu_read_lock(); + + for (i = 0; i < MAX_NPORTS; i++) { + dev = adap->port[i]; + ret = 0; + + if (dev) + ret = update_root_dev_clip(dev); + + if (ret < 0) + break; + } + rcu_read_unlock(); +} + /** * cxgb_up - enable the adapter * @adap: adapter being enabled @@ -3854,6 +4134,7 @@ static int cxgb_up(struct adapter *adap) t4_intr_enable(adap); adap->flags |= FULL_INIT_DONE; notify_ulds(adap, CXGB4_STATE_UP); + update_clip(adap); out: return err; irq_err: @@ -5870,11 +6151,15 @@ static int __init cxgb4_init_module(void) ret = pci_register_driver(&cxgb4_driver); if (ret < 0) debugfs_remove(cxgb4_debugfs_root); + + register_inet6addr_notifier(&cxgb4_inet6addr_notifier); + return ret; } static void __exit cxgb4_cleanup_module(void) { + unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); pci_unregister_driver(&cxgb4_driver); debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ flush_workqueue(workq); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 4faf4d067ee..6f21f2451c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -154,6 +154,11 @@ struct in6_addr; int cxgb4_create_server(const struct net_device *dev, unsigned int stid, __be32 sip, __be16 sport, __be16 vlan, unsigned int queue); +int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, + const struct in6_addr *sip, __be16 sport, + unsigned int queue); +int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, + unsigned int queue, bool ipv6); int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, __be32 sip, __be16 sport, __be16 vlan, unsigned int queue, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 01d48444120..cd6874b571e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -320,6 +320,21 @@ struct cpl_act_open_req6 { __be32 opt2; }; +struct cpl_t5_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 rsvd; + __be32 opt2; + __be64 params; +}; + struct cpl_act_open_rpl { union opcode_tid ot; __be32 atid_status; @@ -405,7 +420,7 @@ struct cpl_close_listsvr_req { WR_HDR; union opcode_tid ot; __be16 reply_ctrl; -#define LISTSVR_IPV6 (1 << 14) +#define LISTSVR_IPV6(x) ((x) << 14) __be16 rsvd; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index d1c755f78aa..6f77ac48774 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -616,6 +616,7 @@ enum fw_cmd_opcodes { FW_RSS_IND_TBL_CMD = 0x20, FW_RSS_GLB_CONFIG_CMD = 0x22, FW_RSS_VI_CONFIG_CMD = 0x23, + FW_CLIP_CMD = 0x28, FW_LASTC2E_CMD = 0x40, FW_ERROR_CMD = 0x80, FW_DEBUG_CMD = 0x81, @@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd { } u; }; +struct fw_clip_cmd { + __be32 op_to_write; + __be32 alloc_to_len16; + __be64 ip_hi; + __be64 ip_lo; + __be32 r4[2]; +}; + +#define S_FW_CLIP_CMD_ALLOC 31 +#define M_FW_CLIP_CMD_ALLOC 0x1 +#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC) +#define G_FW_CLIP_CMD_ALLOC(x) \ + (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC) +#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U) + +#define S_FW_CLIP_CMD_FREE 30 +#define M_FW_CLIP_CMD_FREE 0x1 +#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE) +#define G_FW_CLIP_CMD_FREE(x) \ + (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE) +#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U) + enum fw_error_type { FW_ERROR_TYPE_EXCEPTION = 0x0, FW_ERROR_TYPE_HWMODULE = 0x1, diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index be167318015..e9f7c656ddd 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -32,12 +32,12 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.1.1.43" +#define DRV_VERSION "2.1.1.50" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 1 +#define ENIC_WQ_MAX 8 #define ENIC_RQ_MAX 8 #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index bcf15b176f4..7b756cf9474 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, completed_index, enic_wq_free_buf, opaque); - if (netif_queue_stopped(enic->netdev) && + if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && vnic_wq_desc_avail(&enic->wq[q_number]) >= (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) - netif_wake_queue(enic->netdev); + netif_wake_subqueue(enic->netdev, q_number); spin_unlock(&enic->wq_lock[q_number]); @@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data) static irqreturn_t enic_isr_msix_wq(int irq, void *data) { struct enic *enic = data; - unsigned int cq = enic_cq_wq(enic, 0); - unsigned int intr = enic_msix_wq_intr(enic, 0); + unsigned int cq; + unsigned int intr; unsigned int wq_work_to_do = -1; /* no limit */ unsigned int wq_work_done; + unsigned int wq_irq; + + wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector; + cq = enic_cq_wq(enic, wq_irq); + intr = enic_msix_wq_intr(enic, wq_irq); wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, enic_wq_service, NULL); @@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); - struct vnic_wq *wq = &enic->wq[0]; + struct vnic_wq *wq; unsigned long flags; + unsigned int txq_map; if (skb->len <= 0) { dev_kfree_skb(skb); return NETDEV_TX_OK; } + txq_map = skb_get_queue_mapping(skb) % enic->wq_count; + wq = &enic->wq[txq_map]; + /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, * which is very likely. In the off chance it's going to take * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. @@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } - spin_lock_irqsave(&enic->wq_lock[0], flags); + spin_lock_irqsave(&enic->wq_lock[txq_map], flags); if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { - netif_stop_queue(netdev); + netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); /* This is a hard error, log it */ netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); - spin_unlock_irqrestore(&enic->wq_lock[0], flags); + spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); return NETDEV_TX_BUSY; } enic_queue_wq_skb(enic, wq, skb); if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) - netif_stop_queue(netdev); + netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); - spin_unlock_irqrestore(&enic->wq_lock[0], flags); + spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); return NETDEV_TX_OK; } @@ -1025,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, q_number); + if (netdev->features & NETIF_F_RXHASH) { + skb->rxhash = rss_hash; + if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4)) + skb->l4_rxhash = true; + } if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { skb->csum = htons(checksum); @@ -1369,7 +1386,7 @@ static int enic_open(struct net_device *netdev) enic_set_rx_mode(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); for (i = 0; i < enic->rq_count; i++) napi_enable(&enic->napi[i]); @@ -2032,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * instance data is initialized to zero. */ - netdev = alloc_etherdev(sizeof(struct enic)); + netdev = alloc_etherdev_mqs(sizeof(struct enic), + ENIC_RQ_MAX, ENIC_WQ_MAX); if (!netdev) return -ENOMEM; @@ -2062,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 40-bit first, and + * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2080,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 40); + "for consistent allocations, aborting\n", 64); goto err_out_release_regions; } using_dac = 1; @@ -2198,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_dev_close; } + netif_set_real_num_tx_queues(netdev, enic->wq_count); + netif_set_real_num_rx_queues(netdev, enic->rq_count); + /* Setup notification timer, HW reset task, and wq locks */ @@ -2246,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ENIC_SETTING(enic, TSO)) netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; + if (ENIC_SETTING(enic, RSS)) + netdev->hw_features |= NETIF_F_RXHASH; if (ENIC_SETTING(enic, RXCSUM)) netdev->hw_features |= NETIF_F_RXCSUM; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 97455c573db..69dd92598b7 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, { return vdev->res[type].count; } +EXPORT_SYMBOL(vnic_dev_get_res_count); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, unsigned int index) @@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, return (char __iomem *)vdev->res[type].vaddr; } } +EXPORT_SYMBOL(vnic_dev_get_res); static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) @@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev) kfree(vdev); } } +EXPORT_SYMBOL(vnic_dev_unregister); struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, @@ -969,6 +972,13 @@ err_out: vnic_dev_unregister(vdev); return NULL; } +EXPORT_SYMBOL(vnic_dev_register); + +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev) +{ + return vdev->pdev; +} +EXPORT_SYMBOL(vnic_dev_get_pdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len) { diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index f3d9b79ba77..e670029862a 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev, struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); +struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev); int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len); int vnic_dev_enable2(struct vnic_dev *vdev, int active); int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e104db7fcf2..3224d28cdad 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); + status = be_fw_wait_ready(adapter); + if (status) + return status; + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ae236009f1a..0120217a16d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -296,6 +296,9 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; + unsigned short tx_ring_size; + unsigned short rx_ring_size; + struct platform_device *pdev; int opened; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0cd5e4b8b54..f9aacf5d852 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; -static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex + 1); + struct bufdesc *new_bd = bdp + 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? + ex_base : ex_new_bd); else - return bdp + 1; + return (new_bd >= (base + ring_size)) ? + base : new_bd; } -static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) +static inline +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) { - struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; - if (is_ex) - return (struct bufdesc *)(ex - 1); + struct bufdesc *new_bd = bdp - 1; + struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct bufdesc_ex *ex_base; + struct bufdesc *base; + int ring_size; + + if (bdp >= fep->tx_bd_base) { + base = fep->tx_bd_base; + ring_size = fep->tx_ring_size; + ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + } else { + base = fep->rx_bd_base; + ring_size = fep->rx_ring_size; + ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + } + + if (fep->bufdesc_ex) + return (struct bufdesc *)((ex_new_bd < ex_base) ? + (ex_new_bd + ring_size) : ex_new_bd); else - return bdp - 1; + return (new_bd < base) ? (new_bd + ring_size) : new_bd; } static void *swap_buffer(void *bufaddr, int len) @@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp_pre = fec_enet_get_prevdesc(bdp, fep); if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { fep->delay_work.trig_tx = true; @@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } /* If this was the last BD in the ring, start at the beginning again. */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); fep->cur_tx = bdp; @@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev) /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = BD_ENET_RX_EMPTY; else bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->cur_rx = fep->rx_bd_base; @@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev) /* ...and the same for transmit */ bdp = fep->tx_bd_base; fep->cur_tx = bdp; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; @@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev) fep->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; fep->dirty_tx = bdp; } @@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex) writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); if (fep->bufdesc_ex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); else writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); + * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); for (i = 0; i <= TX_RING_MOD_MASK; i++) { @@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev) bdp = fep->dirty_tx; /* get next bdp of dirty_tx */ - if (bdp->cbd_sc & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { @@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev) fep->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - if (status & BD_ENET_TX_WRAP) - bdp = fep->tx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); /* Since we have freed up a buffer, the ring is no longer full */ @@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget) htons(ETH_P_8021Q), vlan_tag); - if (!skb_defer_rx_timestamp(skb)) - napi_gro_receive(&fep->napi, skb); + napi_gro_receive(&fep->napi, skb); } bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, @@ -993,10 +1018,8 @@ rx_processing_done: } /* Update BD pointer to next entry */ - if (status & BD_ENET_RX_WRAP) - bdp = fep->rx_bd_base; - else - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); + /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. @@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = fep->rx_skbuff[i]; if (bdp->cbd_bufaddr) @@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev) FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) + for (i = 0; i < fep->tx_ring_size; i++) kfree(fep->tx_bounce[i]); } @@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) struct bufdesc *bdp; bdp = fep->rx_bd_base; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < fep->rx_ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) { fec_enet_free_buffers(ndev); @@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; bdp = fep->tx_bd_base; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < fep->tx_ring_size; i++) { fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); bdp->cbd_sc = 0; @@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_nextdesc(bdp, fep); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); + bdp = fec_enet_get_prevdesc(bdp, fep); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev) /* Get the Ethernet address */ fec_get_mac(ndev); + /* init the tx & rx ring size */ + fep->tx_ring_size = TX_RING_SIZE; + fep->rx_ring_size = RX_RING_SIZE; + /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; if (fep->bufdesc_ex) fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); + (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); else - fep->tx_bd_base = cbd_base + RX_RING_SIZE; + fep->tx_bd_base = cbd_base + fep->rx_ring_size; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index 1fde90b9668..bdf5023724e 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) /* Check to see if the NIC has been initialized via nic_open, * before trying to read statistic registers. */ - if (!test_bit(__LINK_STATE_START, &dev->state)) + if (!netif_running(dev)) return &sp->stats; sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 7fbe6abf605..23de82a9da8 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev, jwrite32(jme, JME_APMC, apmc); } - NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) + NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT) spin_lock_init(&jme->phy_lock); spin_lock_init(&jme->macaddr_lock); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2777c70c603..e35bac7cfdf 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -138,7 +138,9 @@ #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev) val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; @@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev) return 0; } +static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvneta_port *pp = netdev_priv(dev); + int ret; + + if (!pp->phy_dev) + return -ENOTSUPP; + + ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd); + if (!ret) + mvneta_adjust_link(dev); + + return ret; +} + /* Ethtool methods */ /* Get settings (phy address, speed) for ethtools */ @@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_change_mtu = mvneta_change_mtu, .ndo_tx_timeout = mvneta_tx_timeout, .ndo_get_stats64 = mvneta_get_stats64, + .ndo_do_ioctl = mvneta_ioctl, }; const struct ethtool_ops mvneta_eth_tool_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index f984a89c27d..dd687632111 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc) int log_rq_stride = qpc->rq_size_stride & 7; int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1; int rss = (be32_to_cpu(qpc->flags) >> 13) & 1; - int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1; + u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff; + int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0; int sq_size; int rq_size; int total_pages; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 21962828925..157fe8df2c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -6,13 +6,3 @@ config MLX5_CORE tristate depends on PCI && X86 default n - -config MLX5_DEBUG - bool "Verbose debugging output" if (MLX5_CORE && EXPERT) - depends on MLX5_CORE - default y - ---help--- - This option causes debugging code to be compiled into the - mlx5_core driver. The output can be turned on via the - debug_mask module parameter (which can also be set after - the driver is loaded through sysfs). diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 3fe09ab2d7c..32675e16021 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -1171,7 +1171,6 @@ typedef struct { #define NETXEN_DB_MAPSIZE_BYTES 0x1000 -#define NETXEN_NETDEV_WEIGHT 128 #define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_NIC_PEG_TUNE 0 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1046e946150..cbd75f97ffb 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; netif_napi_add(netdev, &sds_ring->napi, - netxen_nic_poll, NETXEN_NETDEV_WEIGHT); + netxen_nic_poll, NAPI_POLL_WEIGHT); } return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 46159609638..88349b8fa39 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -946,7 +946,6 @@ struct qlcnic_ipaddr { #define QLCNIC_PCI_REG_MSIX_TBL 0x44 #define QLCNIC_MSIX_TBL_PGSIZE 4096 -#define QLCNIC_NETDEV_WEIGHT 128 #define QLCNIC_ADAPTER_UP_MAGIC 777 #define __QLCNIC_FW_ATTACHED 0 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index a1818dae47b..3ca00e05f23 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -3148,7 +3148,7 @@ int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, status = qlcnic_83xx_set_port_config(adapter); if (status) { dev_info(&adapter->pdev->dev, - "Faild to Set Link Speed and autoneg.\n"); + "Failed to Set Link Speed and autoneg.\n"); adapter->ahw->port_config = config; } return status; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 8d06f884818..11b4bb83b93 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1458,7 +1458,7 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings; + int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; struct qlcnic_host_tx_ring *tx_ring; @@ -1466,25 +1466,22 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; - max_sds_rings = adapter->max_sds_rings; - for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test && (adapter->max_drv_tx_rings > 1)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); + NAPI_POLL_WEIGHT); } else { if (ring == (adapter->max_sds_rings - 1)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT / - max_sds_rings); + NAPI_POLL_WEIGHT); else netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); + NAPI_POLL_WEIGHT); } } @@ -1497,7 +1494,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, - QLCNIC_NETDEV_WEIGHT); + NAPI_POLL_WEIGHT); } } @@ -1784,7 +1781,7 @@ static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, break; default: dev_info(&adapter->pdev->dev, - "Unkonwn opcode: 0x%x\n", opcode); + "Unknown opcode: 0x%x\n", opcode); goto skip; } @@ -1963,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter) int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) { - int ring, max_sds_rings, temp; + int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; @@ -1971,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; - max_sds_rings = adapter->max_sds_rings; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (adapter->flags & QLCNIC_MSIX_ENABLED) { - if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { + if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_rx_poll, - QLCNIC_NETDEV_WEIGHT * 2); - } else { - temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings; + NAPI_POLL_WEIGHT); + else netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_msix_sriov_vf_poll, - temp); - } + NAPI_POLL_WEIGHT); } else { netif_napi_add(netdev, &sds_ring->napi, qlcnic_83xx_poll, - QLCNIC_NETDEV_WEIGHT / max_sds_rings); + NAPI_POLL_WEIGHT); } } @@ -2004,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, tx_ring = &adapter->tx_ring[ring]; netif_napi_add(netdev, &tx_ring->napi, qlcnic_83xx_msix_tx_poll, - QLCNIC_NETDEV_WEIGHT); + NAPI_POLL_WEIGHT); } } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 474c8a86a2a..5cd831ebfa8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); - netif_rx(skb); + netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; } @@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev) pm_runtime_get_sync(&mdp->pdev->dev); + napi_enable(&mdp->napi); + ret = request_irq(ndev->irq, sh_eth_interrupt, mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); - return ret; + goto out_napi_off; } /* Descriptor set */ @@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; - napi_enable(&mdp->napi); - return ret; out_free_irq: free_irq(ndev->irq, ndev); +out_napi_off: + napi_disable(&mdp->napi); pm_runtime_put_sync(&mdp->pdev->dev); return ret; } @@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - napi_disable(&mdp->napi); - netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ @@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev) free_irq(ndev->irq, ndev); + napi_disable(&mdp->napi); + /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index b7a39305472..975dc2d8e54 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1709,7 +1709,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) if(netif_msg_intr(sis_priv)) printk(KERN_DEBUG "%s: exiting interrupt, " - "interrupt status = 0x%#8.8x.\n", + "interrupt status = %#8.8x\n", net_dev->name, sr32(isr)); spin_unlock (&sis_priv->lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 623ebc50fe6..51c9069ef40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev, plat->force_sf_dma_mode = 1; } - dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); - if (!dma_cfg) - return -ENOMEM; - - plat->dma_cfg = dma_cfg; - of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); - dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); - dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); + if (of_find_property(np, "snps,pbl", NULL)) { + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), + GFP_KERNEL); + if (!dma_cfg) + return -ENOMEM; + plat->dma_cfg = dma_cfg; + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->fixed_burst = + of_property_read_bool(np, "snps,fixed-burst"); + dma_cfg->mixed_burst = + of_property_read_bool(np, "snps,mixed-burst"); + } plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode) { plat->force_sf_dma_mode = 0; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 269c08bf399..f28460ce24a 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9478,7 +9478,7 @@ static struct niu_parent *niu_new_parent(struct niu *np, if (IS_ERR(plat_dev)) return NULL; - for (i = 0; attr_name(niu_parent_attributes[i]); i++) { + for (i = 0; niu_parent_attributes[i].attr.name; i++) { int err = device_create_file(&plat_dev->dev, &niu_parent_attributes[i]); if (err) diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index ad32af67e61..9c805e0c0ca 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, { netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; /* NAPI */ - netif_napi_add(netdev, napi, - gelic_net_poll, GELIC_NET_NAPI_WEIGHT); + netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT); netdev->ethtool_ops = &gelic_ether_ethtool_ops; netdev->netdev_ops = &gelic_netdevice_ops; } diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index a93df6ac190..309abb472aa 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -37,7 +37,6 @@ #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ -#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index e90e1f46121..64b4639f43b 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) printk(KERN_WARNING "Setting MDIO clock divisor to " "default %d\n", DEFAULT_CLOCK_DIVISOR); clk_div = DEFAULT_CLOCK_DIVISOR; + of_node_put(np1); goto issue; } |