summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c92
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c729
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c208
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h97
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c618
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h3
10 files changed, 953 insertions, 844 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f55267363f3..3b3bf0dd0f1 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -121,4 +121,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config BNX2X_SRIOV
+ bool "Broadcom 578xx and 57712 SR-IOV support"
+ depends on BNX2X && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the 578xx and 57712 products. This
+ allows for virtual function acceleration in virtual environments.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 2ef6803a0fa..116762daae0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o bnx2x_vfpf.o bnx2x_sriov.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 85969170eb0..02c93e308df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1266,6 +1266,7 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+#ifdef CONFIG_BNX2X_SRIOV
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
@@ -1278,6 +1279,7 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
struct pci_dev *pdev;
@@ -1379,8 +1381,14 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -2275,18 +2283,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
(!((me_reg) & ME_REG_VF_ERR)))
-int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id);
-int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping);
-int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
-int bnx2x_vfpf_release(struct bnx2x *bp);
-int bnx2x_vfpf_init(struct bnx2x *bp);
-void bnx2x_vfpf_close_vf(struct bnx2x *bp);
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
-int bnx2x_vfpf_set_mac(struct bnx2x *bp);
-int bnx2x_vfpf_set_mcast(struct net_device *dev);
-int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
-
int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d17fa5ab15a..00706c4b090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -27,7 +27,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
-#include "bnx2x_sriov.h"
/**
* bnx2x_move_fp - move content of the fastpath structure.
@@ -109,7 +108,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
} else {
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
}
}
@@ -2048,7 +2047,7 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* request struct
*/
if (IS_SRIOV(bp))
- vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+ vf_headroom = bnx2x_vf_headroom(bp);
/* Request is built from stats_query_header and an array of
* stats_query_cmd_group each of which contains
@@ -3793,93 +3792,6 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return 0;
}
-/* New mac for VF. Consider these cases:
- * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
- * supply at acquire.
- * 2. VF has already been acquired but has not yet initialized - store in local
- * bulletin board. mac will be posted on VF bulletin board after VF init. VF
- * will configure this mac when it is ready.
- * 3. VF has already initialized but has not yet setup a queue - post the new
- * mac on VF's bulletin board right now. VF will configure this mac when it
- * is ready.
- * 4. VF has already set a queue - delete any macs already configured for this
- * queue and manually config the new mac.
- * In any event, once this function has been called refuse any attempts by the
- * VF to configure any mac for itself except for this mac. In case of a race
- * where the VF fails to see the new post on its bulletin board before sending a
- * mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
- */
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
-
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
- if (!is_valid_ether_addr(mac)) {
- BNX2X_ERR("mac address invalid\n");
- return -EINVAL;
- }
-
- /* update PF's copy of the VF's bulletin. will no longer accept mac
- * configuration requests from vf unless match this mac
- */
- bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
- memcpy(bulletin->mac, mac, ETH_ALEN);
-
- /* Post update on VF's bulletin board */
- rc = bnx2x_post_vf_bulletin(bp, vfidx);
- if (rc) {
- BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
- return rc;
- }
-
- /* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
-
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
-
- /* remove existing eth macs */
- rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
- if (rc) {
- BNX2X_ERR("failed to delete eth macs\n");
- return -EINVAL;
- }
-
- /* remove existing uc list macs */
- rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
- if (rc) {
- BNX2X_ERR("failed to delete uc_list macs\n");
- return -EINVAL;
- }
-
- /* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
- bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
-
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
- }
-
- return rc;
-}
-
/* called with rtnl_lock */
int bnx2x_change_mac_addr(struct net_device *dev, void *p)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 23a1fa9a4cb..6667ec52e1c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -24,6 +24,7 @@
#include "bnx2x.h"
+#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -1128,22 +1129,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
- u32 offset = BAR_USTRORM_INTMEM;
-
- if (IS_VF(bp))
- return PXP_VF_ADDR_USDM_QUEUES_START +
- bp->acquire_resp.resc.hw_qid[fp->index] *
- sizeof(struct ustorm_queue_zone_data);
- else if (!CHIP_IS_E1x(bp))
- offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
- else
- offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
-
- return offset;
-}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2b6a919b6bc..5fcaee193e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -60,7 +60,6 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
#include "bnx2x_vfpf.h"
-#include "bnx2x_sriov.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -5269,62 +5268,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-/* crc is the first field in the bulletin board. compute the crc over the
- * entire bulletin board excluding the crc field itself
- */
-u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
- struct pf_vf_bulletin_content *bulletin)
-{
- return crc32(BULLETIN_CRC_SEED,
- ((u8 *)bulletin) + sizeof(bulletin->crc),
- BULLETIN_CONTENT_SIZE - sizeof(bulletin->crc));
-}
-
-/* Check for new posts on the bulletin board */
-enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
-{
- struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
- int attempts;
-
- /* bulletin board hasn't changed since last sample */
- if (bp->old_bulletin.version == bulletin.version)
- return PFVF_BULLETIN_UNCHANGED;
-
- /* validate crc of new bulletin board */
- if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
- /* sampling structure in mid post may result with corrupted data
- * validate crc to ensure coherency.
- */
- for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
- bulletin = bp->pf2vf_bulletin->content;
- if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
- &bulletin))
- break;
-
- BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
- bulletin.crc,
- bnx2x_crc_vf_bulletin(bp, &bulletin));
- }
- if (attempts >= BULLETIN_ATTEMPTS) {
- BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
- attempts);
- return PFVF_BULLETIN_CRC_ERR;
- }
- }
-
- /* the mac address in bulletin board is valid and is new */
- if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
- memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
- /* update new mac to net device */
- memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
- }
-
- /* copy new bulletin board to bp */
- bp->old_bulletin = bulletin;
-
- return PFVF_BULLETIN_UPDATED;
-}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -9525,29 +9468,14 @@ sp_rtnl_not_reset:
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
-
rtnl_unlock();
+ /* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state)) {
- int rc = 0;
-
- /* disbale sriov in case it is still enabled */
- pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
-
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
- if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
- }
+ &bp->sp_rtnl_state))
+ bnx2x_enable_sriov(bp);
}
-/* end of nic load/unload */
-
static void bnx2x_period_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
@@ -11790,7 +11718,9 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
+#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
@@ -12445,17 +12375,10 @@ static int bnx2x_init_one(struct pci_dev *pdev,
* l2 connections.
*/
if (IS_VF(bp)) {
- /* vf doorbells are embedded within the regview */
- bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
-
- /* allocate vf2pf mailbox for vf to pf channel */
- BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
-
- /* allocate pf 2 vf bulletin board */
- BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
-
+ bnx2x_vf_map_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
@@ -12552,11 +12475,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
- rc = -ENOMEM;
-
init_one_exit:
if (bp->regview)
iounmap(bp->regview);
@@ -13419,619 +13337,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
-int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
-{
- struct cstorm_vf_zone_data __iomem *zone_data =
- REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
- int tout = 600, interval = 100; /* wait for 60 seconds */
-
- if (*done) {
- BNX2X_ERR("done was non zero before message to pf was sent\n");
- WARN_ON(true);
- return -EINVAL;
- }
-
- /* Write message address */
- writel(U64_LO(msg_mapping),
- &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
- writel(U64_HI(msg_mapping),
- &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
-
- /* make sure the address is written before FW accesses it */
- wmb();
-
- /* Trigger the PF FW */
- writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
-
- /* Wait for PF to complete */
- while ((tout >= 0) && (!*done)) {
- msleep(interval);
- tout -= 1;
-
- /* progress indicator - HV can take its own sweet time in
- * answering VFs...
- */
- DP_CONT(BNX2X_MSG_IOV, ".");
- }
-
- if (!*done) {
- BNX2X_ERR("PF response has timed out\n");
- return -EAGAIN;
- }
- DP(BNX2X_MSG_SP, "Got a response from PF\n");
- return 0;
-}
-
-int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
-{
- u32 me_reg;
- int tout = 10, interval = 100; /* Wait for 1 sec */
-
- do {
- /* pxp traps vf read of doorbells and returns me reg value */
- me_reg = readl(bp->doorbells);
- if (GOOD_ME_REG(me_reg))
- break;
-
- msleep(interval);
-
- BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
- me_reg);
- } while (tout-- > 0);
-
- if (!GOOD_ME_REG(me_reg)) {
- BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
- return -EINVAL;
- }
-
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
-
- *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
-
- return 0;
-}
-
-int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
-{
- int rc = 0, attempts = 0;
- struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
- struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
- u32 vf_id;
- bool resources_acquired = false;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
-
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
-
- req->vfdev_info.vf_id = vf_id;
- req->vfdev_info.vf_os = 0;
-
- req->resc_request.num_rxqs = rx_count;
- req->resc_request.num_txqs = tx_count;
- req->resc_request.num_sbs = bp->igu_sb_cnt;
- req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
- req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
-
- /* pf 2 vf bulletin board address */
- req->bulletin_addr = bp->pf2vf_bulletin_mapping;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- while (!resources_acquired) {
- DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
-
- /* send acquire request */
- rc = bnx2x_send_msg2pf(bp,
- &resp->hdr.status,
- bp->vf2pf_mbox_mapping);
-
- /* PF timeout */
- if (rc)
- return rc;
-
- /* copy acquire response from buffer to bp */
- memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
-
- attempts++;
-
- /* test whether the PF accepted our request. If not, humble the
- * the request and try again.
- */
- if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
- DP(BNX2X_MSG_SP, "resources acquired\n");
- resources_acquired = true;
- } else if (bp->acquire_resp.hdr.status ==
- PFVF_STATUS_NO_RESOURCE &&
- attempts < VF_ACQUIRE_THRESH) {
- DP(BNX2X_MSG_SP,
- "PF unwilling to fulfill resource request. Try PF recommended amount\n");
-
- /* humble our request */
- req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
- req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
- req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
- req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
- req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
- req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
-
- /* Clear response buffer */
- memset(&bp->vf2pf_mbox->resp, 0,
- sizeof(union pfvf_tlvs));
- } else {
- /* PF reports error */
- BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
- bp->acquire_resp.hdr.status);
- return -EAGAIN;
- }
- }
-
- /* get HW info */
- bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
- bp->link_params.chip_id = bp->common.chip_id;
- bp->db_size = bp->acquire_resp.pfdev_info.db_size;
- bp->common.int_block = INT_BLOCK_IGU;
- bp->common.chip_port_mode = CHIP_2_PORT_MODE;
- bp->igu_dsb_id = -1;
- bp->mf_ov = 0;
- bp->mf_mode = 0;
- bp->common.flash_size = 0;
- bp->flags |=
- NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
- bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
- sizeof(bp->fw_ver));
-
- if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
- memcpy(bp->dev->dev_addr,
- bp->acquire_resp.resc.current_mac_addr,
- ETH_ALEN);
-
- return 0;
-}
-
-int bnx2x_vfpf_release(struct bnx2x *bp)
-{
- struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- u32 rc = 0, vf_id;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
-
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
-
- req->vf_id = vf_id;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- /* send release request */
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
-
- if (rc)
- /* PF timeout */
- return rc;
- if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
- /* PF released us */
- DP(BNX2X_MSG_SP, "vf released\n");
- } else {
- /* PF reports error */
- BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
- resp->hdr.status);
- return -EAGAIN;
- }
-
- return 0;
-}
-
-/* Tell PF about SB addresses */
-int bnx2x_vfpf_init(struct bnx2x *bp)
-{
- struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc, i;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
-
- /* status blocks */
- for_each_eth_queue(bp, i)
- req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
- status_blk_mapping);
-
- /* statistics - requests only supports single queue for now */
- req->stats_addr = bp->fw_stats_data_mapping +
- offsetof(struct bnx2x_fw_stats_data, queue_stats);
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
- if (rc)
- return rc;
-
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
- resp->hdr.status);
- return -EAGAIN;
- }
-
- DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
- return 0;
-}
-
-/* CLOSE VF - opposite to INIT_VF */
-void bnx2x_vfpf_close_vf(struct bnx2x *bp)
-{
- struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int i, rc;
- u32 vf_id;
-
- /* If we haven't got a valid VF id, there is no sense to
- * continue with sending messages
- */
- if (bnx2x_get_vf_id(bp, &vf_id))
- goto free_irq;
-
- /* Close the queues */
- for_each_queue(bp, i)
- bnx2x_vfpf_teardown_queue(bp, i);
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
-
- req->vf_id = vf_id;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
-
- if (rc)
- BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
-
- else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
- resp->hdr.status);
-
-free_irq:
- /* Disable HW interrupts, NAPI */
- bnx2x_netif_stop(bp, 0);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-}
-
-/* ask the pf to open a queue for the vf */
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
-{
- struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
- u16 tpa_agg_size = 0, flags = 0;
- int rc;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
-
- /* select tpa mode to request */
- if (!fp->disable_tpa) {
- flags |= VFPF_QUEUE_FLG_TPA;
- flags |= VFPF_QUEUE_FLG_TPA_IPV6;
- if (fp->mode == TPA_MODE_GRO)
- flags |= VFPF_QUEUE_FLG_TPA_GRO;
- tpa_agg_size = TPA_AGG_SIZE;
- }
-
- /* calculate queue flags */
- flags |= VFPF_QUEUE_FLG_STATS;
- flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
- flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
- flags |= VFPF_QUEUE_FLG_VLAN;
- DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
-
- /* Common */
- req->vf_qid = fp_idx;
- req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
-
- /* Rx */
- req->rxq.rcq_addr = fp->rx_comp_mapping;
- req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
- req->rxq.rxq_addr = fp->rx_desc_mapping;
- req->rxq.sge_addr = fp->rx_sge_mapping;
- req->rxq.vf_sb = fp_idx;
- req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
- req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
- req->rxq.mtu = bp->dev->mtu;
- req->rxq.buf_sz = fp->rx_buf_size;
- req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
- req->rxq.tpa_agg_sz = tpa_agg_size;
- req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
- req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
- (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- req->rxq.flags = flags;
- req->rxq.drop_flags = 0;
- req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
- req->rxq.stat_id = -1; /* No stats at the moment */
-
- /* Tx */
- req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
- req->txq.vf_sb = fp_idx;
- req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
- req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
- req->txq.flags = flags;
- req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
- if (rc)
- BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
- fp_idx);
-
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
- fp_idx, resp->hdr.status);
- return -EINVAL;
- }
- return rc;
-}
-
-int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
{
- struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
- sizeof(*req));
-
- req->vf_qid = qidx;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
-
- if (rc) {
- BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
- rc);
- return rc;
- }
-
- /* PF failed the transaction */
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
- resp->hdr.status);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* request pf to add a mac for the vf */
-int bnx2x_vfpf_set_mac(struct bnx2x *bp)
-{
- struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
- sizeof(*req));
-
- req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
- req->vf_qid = 0;
- req->n_mac_vlan_filters = 1;
- req->filters[0].flags =
- VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
-
- /* sample bulletin board for new mac */
- bnx2x_sample_bulletin(bp);
-
- /* copy mac from device to request */
- memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- /* send message to pf */
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
- if (rc) {
- BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
- return rc;
- }
-
- /* failure may mean PF was configured with a new mac for us */
- while (resp->hdr.status == PFVF_STATUS_FAILURE) {
- DP(BNX2X_MSG_IOV,
- "vfpf SET MAC failed. Check bulletin board for new posts\n");
-
- /* check if bulletin board was updated */
- if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
- /* copy mac from device to request */
- memcpy(req->filters[0].mac, bp->dev->dev_addr,
- ETH_ALEN);
-
- /* send message to pf */
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
- bp->vf2pf_mbox_mapping);
- } else {
- /* no new info in bulletin */
- break;
- }
- }
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
- return -EINVAL;
- }
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
- return 0;
+ return offset;
}
-int bnx2x_vfpf_set_mcast(struct net_device *dev)
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
{
- struct bnx2x *bp = netdev_priv(dev);
- struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc, i = 0;
- struct netdev_hw_addr *ha;
+ u32 pretend_reg;
- if (bp->state != BNX2X_STATE_OPEN) {
- DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
- return -EINVAL;
- }
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
- sizeof(*req));
-
- /* Get Rx mode requested */
- DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
-
- netdev_for_each_mc_addr(ha, dev) {
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- bnx2x_mc_addr(ha));
- memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
- i++;
- }
-
- /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
- * addresses tops
- */
- if (i >= PFVF_MAX_MULTICAST_PER_VF) {
- DP(NETIF_MSG_IFUP,
- "VF supports not more than %d multicast MAC addresses\n",
- PFVF_MAX_MULTICAST_PER_VF);
- return -EINVAL;
- }
-
- req->n_multicast = i;
- req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
- req->vf_qid = 0;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
- if (rc) {
- BNX2X_ERR("Sending a message failed: %d\n", rc);
- return rc;
- }
-
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
- resp->hdr.status);
- return -EINVAL;
- }
+ if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
+ return -1;
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
return 0;
}
-
-int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
-{
- int mode = bp->rx_mode;
- struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
- struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
-
- /* clear mailbox and prep first tlv */
- bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
- sizeof(*req));
-
- DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
-
- switch (mode) {
- case BNX2X_RX_MODE_NONE: /* no Rx */
- req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
- break;
- case BNX2X_RX_MODE_NORMAL:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_ALLMULTI:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- case BNX2X_RX_MODE_PROMISC:
- req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
- req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
- break;
- default:
- BNX2X_ERR("BAD rx mode (%d)\n", mode);
- return -EINVAL;
- }
-
- req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
- req->vf_qid = 0;
-
- /* add list termination tlv */
- bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- /* output tlvs list */
- bnx2x_dp_tlv_list(bp, req);
-
- rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
- if (rc)
- BNX2X_ERR("Sending a message failed: %d\n", rc);
-
- if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
- BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
- return -EINVAL;
- }
-
- return rc;
-}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 71fcef0d407..f2f5c8ecfd7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,7 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
-#include "bnx2x_sriov.h"
+#include <linux/crc32.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -1334,25 +1334,6 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
* for calling pretend prior to calling these routines
*/
-/* called only on E1H or E2.
- * When pretending to be PF, the pretend value is the function number 0...7
- * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
- * combination
- */
-int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
-{
- u32 pretend_reg;
-
- if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX)
- return -1;
-
- /* get my own pretend register */
- pretend_reg = bnx2x_get_pretend_reg(bp);
- REG_WR(bp, pretend_reg, pretend_func_val);
- REG_RD(bp, pretend_reg);
- return 0;
-}
-
/* internal vf enable - until vf is enabled internally all transactions
* are blocked. this routine should always be called last with pretend.
*/
@@ -1800,7 +1781,7 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
- do_div(size, iov->total);
+ size /= iov->total;
vf->bars[n].bar = start + size * vf->abs_vfid;
vf->bars[n].size = size;
}
@@ -3031,3 +3012,188 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record the locking op */
vf->op_current = CHANNEL_TLV_NONE;
}
+
+void bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0;
+
+ /* disbale sriov in case it is still enabled */
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* enable sriov */
+ DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
+ rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ if (rc)
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ else
+ DP(BNX2X_MSG_IOV, "sriov enabled\n");
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state, vfidx = queue;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* if SRIOV is disabled there is nothing to do (and somewhere, someone
+ * has erred).
+ */
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* update PF's copy of the VF's bulletin. will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ return -EINVAL;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ return -EINVAL;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &flags);
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. compute the crc over the
+ * entire bulletin board excluding the crc field itself
+ */
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ BULLETIN_CONTENT_SIZE - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int attempts;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin.version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* validate crc of new bulletin board */
+ if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ bulletin = bp->pf2vf_bulletin->content;
+ if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
+ &bulletin))
+ break;
+ BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
+ bulletin.crc,
+ bnx2x_crc_vf_bulletin(bp, &bulletin));
+ }
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ }
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
+ memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ /* update new mac to net device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ }
+
+ /* copy new bulletin board to bp */
+ bp->old_bulletin = bulletin;
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ /* allocate vf2pf mailbox for vf to pf channel */
+ BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* allocate pf 2 vf bulletin board */
+ BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(union pf_vf_bulletin));
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index aab2a0525c2..df4ae743401 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -20,7 +20,15 @@
#define BNX2X_SRIOV_H
#include "bnx2x_vfpf.h"
-#include "bnx2x_cmn.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
/* The bnx2x device structure holds vfdb structure described below.
* The VF array is indexed by the relative vfid.
@@ -712,12 +720,89 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
struct pf_vf_bulletin_content *bulletin);
int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
-enum sample_bulletin_result {
- PFVF_BULLETIN_UNCHANGED,
- PFVF_BULLETIN_UPDATED,
- PFVF_BULLETIN_CRC_ERR
-};
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+void bnx2x_enable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+}
+
+#else /* CONFIG_BNX2X_SRIOV */
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
+ bool queue_work) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
+static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+
+static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index b410b9fff20..850aad3a8c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -18,7 +18,7 @@
*/
#include "bnx2x.h"
-#include "bnx2x_sriov.h"
+#include "bnx2x_cmn.h"
#include <linux/crc32.h>
/* place a given tlv on the tlv buffer at a given offset */
@@ -98,6 +98,622 @@ static inline int bnx2x_pfvf_status_codes(int rc)
}
}
+int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 600, interval = 100; /* wait for 60 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble the
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ bp->acquire_resp.resc.num_txqs;
+ req->resc_request.num_rxqs =
+ bp->acquire_resp.resc.num_rxqs;
+ req->resc_request.num_sbs =
+ bp->acquire_resp.resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ bp->acquire_resp.resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ bp->acquire_resp.resc.num_vlan_filters;
+ req->resc_request.num_mc_filters =
+ bp->acquire_resp.resc.num_mc_filters;
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = 1;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ memcpy(bp->dev->dev_addr,
+ bp->acquire_resp.resc.current_mac_addr,
+ ETH_ALEN);
+
+ return 0;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc = 0, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ return rc;
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+ return 0;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+free_irq:
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (!fp->disable_tpa) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ return rc;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = 0;
+ req->n_mac_vlan_filters = 1;
+ req->filters[0].flags =
+ VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ return rc;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+ * addresses tops
+ */
+ if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ return -EINVAL;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ return rc;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ switch (mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ BNX2X_ERR("BAD rx mode (%d)\n", mode);
+ return -EINVAL;
+ }
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
/* General service functions */
static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
{
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 9f07adaf06f..21d29855657 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -18,6 +18,8 @@
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
+#ifdef CONFIG_BNX2X_SRIOV
+
/* Common definitions for all HVs */
struct vf_pf_resc_request {
u8 num_rxqs;
@@ -353,4 +355,5 @@ enum channel_tlvs {
CHANNEL_TLV_MAX
};
+#endif /* CONFIG_BNX2X_SRIOV */
#endif /* VF_PF_IF_H */