diff options
Diffstat (limited to 'drivers/net/cnic.c')
-rw-r--r-- | drivers/net/cnic.c | 782 |
1 files changed, 663 insertions, 119 deletions
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 92bac19ad60..263a2944566 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -59,6 +59,7 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(CNIC_MODULE_VERSION); +/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ static LIST_HEAD(cnic_dev_list); static LIST_HEAD(cnic_udev_list); static DEFINE_RWLOCK(cnic_dev_lock); @@ -278,6 +279,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, u32 msg_type = ISCSI_KEVENT_IF_DOWN; struct cnic_ulp_ops *ulp_ops; struct cnic_uio_dev *udev = cp->udev; + int rc = 0, retry = 0; if (!udev || udev->uio_dev == -1) return -ENODEV; @@ -302,14 +304,26 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, path_req.pmtu = csk->mtu; } - rcu_read_lock(); - ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); - if (ulp_ops) - ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); - rcu_read_unlock(); + while (retry < 3) { + rc = 0; + rcu_read_lock(); + ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); + if (ulp_ops) + rc = ulp_ops->iscsi_nl_send_msg( + cp->ulp_handle[CNIC_ULP_ISCSI], + msg_type, buf, len); + rcu_read_unlock(); + if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) + break; + + msleep(100); + retry++; + } return 0; } +static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); + static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, char *buf, u16 len) { @@ -339,7 +353,9 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, } csk = &cp->csk_tbl[l5_cid]; csk_hold(csk); - if (cnic_in_use(csk)) { + if (cnic_in_use(csk) && + test_bit(SK_F_CONNECT_START, &csk->flags)) { + memcpy(csk->ha, path_resp->mac_addr, 6); if (test_bit(SK_F_IPV6, &csk->flags)) memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, @@ -347,8 +363,16 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, else memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, sizeof(struct in_addr)); - if (is_valid_ether_addr(csk->ha)) + + if (is_valid_ether_addr(csk->ha)) { cnic_cm_set_pg(csk); + } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && + !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { + + cnic_cm_upcall(cp, csk, + L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); + clear_bit(SK_F_CONNECT_START, &csk->flags); + } } csk_put(csk); rcu_read_unlock(); @@ -402,19 +426,6 @@ static int cnic_abort_prep(struct cnic_sock *csk) return 0; } -static void cnic_uio_stop(void) -{ - struct cnic_dev *dev; - - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); - } - read_unlock(&cnic_dev_lock); -} - int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) { struct cnic_dev *dev; @@ -445,14 +456,12 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) /* Prevent race conditions with netdev_event */ rtnl_lock(); - read_lock(&cnic_dev_lock); list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) ulp_ops->cnic_init(dev); } - read_unlock(&cnic_dev_lock); rtnl_unlock(); return 0; @@ -488,9 +497,6 @@ int cnic_unregister_driver(int ulp_type) } read_unlock(&cnic_dev_lock); - if (ulp_type == CNIC_ULP_ISCSI) - cnic_uio_stop(); - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); @@ -574,6 +580,9 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) } mutex_unlock(&cnic_lock); + if (ulp_type == CNIC_ULP_ISCSI) + cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); + synchronize_rcu(); while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && @@ -821,12 +830,14 @@ static void cnic_free_resc(struct cnic_dev *dev) cnic_free_dma(dev, &cp->conn_buf_info); cnic_free_dma(dev, &cp->kwq_info); cnic_free_dma(dev, &cp->kwq_16_data_info); + cnic_free_dma(dev, &cp->kcq2.dma); cnic_free_dma(dev, &cp->kcq1.dma); kfree(cp->iscsi_tbl); cp->iscsi_tbl = NULL; kfree(cp->ctx_tbl); cp->ctx_tbl = NULL; + cnic_free_id_tbl(&cp->fcoe_cid_tbl); cnic_free_id_tbl(&cp->cid_tbl); } @@ -940,7 +951,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) &udev->l2_ring_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_ring) - return -ENOMEM; + goto err_udev; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); @@ -948,7 +959,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) &udev->l2_buf_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_buf) - return -ENOMEM; + goto err_dma; write_lock(&cnic_dev_lock); list_add(&udev->list, &cnic_udev_list); @@ -959,6 +970,12 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) cp->udev = udev; return 0; + err_dma: + dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, + udev->l2_ring, udev->l2_ring_map); + err_udev: + kfree(udev); + return -ENOMEM; } static int cnic_init_uio(struct cnic_dev *dev) @@ -1114,12 +1131,22 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->iro_arr = ethdev->iro_arr; - cp->max_cid_space = MAX_ISCSI_TBL_SZ; + cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS; cp->iscsi_start_cid = start_cid; + cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; + cp->fcoe_init_cid = ethdev->fcoe_init_cid; + if (!cp->fcoe_init_cid) + cp->fcoe_init_cid = 0x10; + } + if (start_cid < BNX2X_ISCSI_START_CID) { u32 delta = BNX2X_ISCSI_START_CID - start_cid; cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; + cp->fcoe_start_cid += delta; cp->max_cid_space += delta; } @@ -1138,6 +1165,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; } + for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) + cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; + pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / PAGE_SIZE; @@ -1161,6 +1191,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + ret = cnic_alloc_kcq(dev, &cp->kcq2); + if (ret) + goto error; + } + pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); @@ -1254,12 +1290,18 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, struct cnic_local *cp = dev->cnic_priv; struct l5cm_spe kwqe; struct kwqe_16 *kwq[1]; + u16 type_16; int ret; kwqe.hdr.conn_and_cmd_data = cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | BNX2X_HW_CID(cp, cid))); - kwqe.hdr.type = cpu_to_le16(type); + + type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; + type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & + SPE_HDR_FUNCTION_ID; + + kwqe.hdr.type = cpu_to_le16(type_16); kwqe.hdr.reserved1 = 0; kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); @@ -1425,8 +1467,11 @@ static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) cnic_free_dma(dev, &iscsi->hq_info); cnic_free_dma(dev, &iscsi->r2tq_info); cnic_free_dma(dev, &iscsi->task_array_info); + cnic_free_id(&cp->cid_tbl, ctx->cid); + } else { + cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); } - cnic_free_id(&cp->cid_tbl, ctx->cid); + ctx->cid = 0; } @@ -1438,6 +1483,16 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; + if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { + cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); + if (cid == -1) { + ret = -ENOMEM; + goto error; + } + ctx->cid = cid; + return 0; + } + cid = cnic_alloc_new_id(&cp->cid_tbl); if (cid == -1) { ret = -ENOMEM; @@ -1695,7 +1750,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], *work = num; return -EINVAL; } - *work = 2 + req2->num_additional_wqes;; + *work = 2 + req2->num_additional_wqes; l5_cid = req1->iscsi_conn_id; if (l5_cid >= MAX_ISCSI_TBL_SZ) @@ -1770,19 +1825,15 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; union l5cm_specific_data l5_data; int ret; - u32 hw_cid, type; + u32 hw_cid; init_waitqueue_head(&ctx->waitq); ctx->wait_cond = 0; memset(&l5_data, 0, sizeof(l5_data)); hw_cid = BNX2X_HW_CID(cp, ctx->cid); - type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - hw_cid, type, &l5_data); + hw_cid, NONE_CONNECTION_TYPE, &l5_data); if (ret == 0) wait_event(ctx->waitq, ctx->wait_cond); @@ -2078,8 +2129,306 @@ static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) return 0; } -static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes) +static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_stat *req; + struct fcoe_stat_ramrod_params *fcoe_stat; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + req = (struct fcoe_kwqe_stat *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_stat) + return -ENOMEM; + + memset(fcoe_stat, 0, sizeof(*fcoe_stat)); + memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); + + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret; + struct cnic_local *cp = dev->cnic_priv; + u32 cid; + struct fcoe_init_ramrod_params *fcoe_init; + struct fcoe_kwqe_init1 *req1; + struct fcoe_kwqe_init2 *req2; + struct fcoe_kwqe_init3 *req3; + union l5cm_specific_data l5_data; + + if (num < 3) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_init1 *) wqes[0]; + req2 = (struct fcoe_kwqe_init2 *) wqes[1]; + req3 = (struct fcoe_kwqe_init3 *) wqes[2]; + if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { + *work = 1; + return -EINVAL; + } + if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { + *work = 2; + return -EINVAL; + } + + if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_init size too big\n"); + return -ENOMEM; + } + fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); + if (!fcoe_init) + return -ENOMEM; + + memset(fcoe_init, 0, sizeof(*fcoe_init)); + memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); + fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff; + fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32; + fcoe_init->eq_next_page_addr.lo = + cp->kcq2.dma.pg_map_arr[1] & 0xffffffff; + fcoe_init->eq_next_page_addr.hi = + (u64) cp->kcq2.dma.pg_map_arr[1] >> 32; + + fcoe_init->sb_num = cp->status_blk_num; + fcoe_init->eq_prod = MAX_KCQ_IDX; + fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; + cp->kcq2.sw_prod_idx = 0; + + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid, + FCOE_CONNECTION_TYPE, &l5_data); + *work = 3; + return ret; +} + +static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num, int *work) +{ + int ret = 0; + u32 cid = -1, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kwqe_conn_offload1 *req1; + struct fcoe_kwqe_conn_offload2 *req2; + struct fcoe_kwqe_conn_offload3 *req3; + struct fcoe_kwqe_conn_offload4 *req4; + struct fcoe_conn_offload_ramrod_params *fcoe_offload; + struct cnic_context *ctx; + struct fcoe_context *fctx; + struct regpair ctx_addr; + union l5cm_specific_data l5_data; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + if (num < 4) { + *work = num; + return -EINVAL; + } + req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; + req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; + req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; + req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; + + *work = 4; + + l5_cid = req1->fcoe_conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + goto err_reply; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) + goto err_reply; + + ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); + if (ret) { + ret = 0; + goto err_reply; + } + cid = ctx->cid; + + fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); + if (fctx) { + u32 hw_cid = BNX2X_HW_CID(cp, cid); + u32 val; + + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, + FCOE_CONNECTION_TYPE); + fctx->xstorm_ag_context.cdu_reserved = val; + val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, + FCOE_CONNECTION_TYPE); + fctx->ustorm_ag_context.cdu_usage = val; + } + if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_offload size too big\n"); + goto err_reply; + } + fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_offload) + goto err_reply; + + memset(fcoe_offload, 0, sizeof(*fcoe_offload)); + memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); + memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); + memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); + memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); + + cid = BNX2X_HW_CID(cp, cid); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (!ret) + set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); + + return ret; + +err_reply: + if (cid != -1) + cnic_free_bnx2x_conn_resc(dev, l5_cid); + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; + kcqe.fcoe_conn_id = req1->fcoe_conn_id; + kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_enable size too big\n"); + return -ENOMEM; + } + fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_enable) + return -ENOMEM; + + memset(fcoe_enable, 0, sizeof(*fcoe_enable)); + memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_enable_disable *req; + struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + + req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { + netdev_err(dev->netdev, "fcoe_disable size too big\n"); + return -ENOMEM; + } + fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); + if (!fcoe_disable) + return -ENOMEM; + + memset(fcoe_disable, 0, sizeof(*fcoe_disable)); + memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_conn_destroy *req; + union l5cm_specific_data l5_data; + int ret; + u32 cid, l5_cid; + struct cnic_local *cp = dev->cnic_priv; + struct cnic_context *ctx; + struct fcoe_kcqe kcqe; + struct kcqe *cqes[1]; + + req = (struct fcoe_kwqe_conn_destroy *) kwqe; + cid = req->context_id; + l5_cid = req->conn_id; + if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS) + return -EINVAL; + + l5_cid += BNX2X_FCOE_L5_CID_BASE; + + ctx = &cp->ctx_tbl[l5_cid]; + + init_waitqueue_head(&ctx->waitq); + ctx->wait_cond = 0; + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, + FCOE_CONNECTION_TYPE, &l5_data); + if (ret == 0) { + wait_event(ctx->waitq, ctx->wait_cond); + set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); + queue_delayed_work(cnic_wq, &cp->delete_task, + msecs_to_jiffies(2000)); + } + + memset(&kcqe, 0, sizeof(kcqe)); + kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; + kcqe.fcoe_conn_id = req->conn_id; + kcqe.fcoe_conn_context_id = cid; + + cqes[0] = (struct kcqe *) &kcqe; + cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); + return ret; +} + +static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) +{ + struct fcoe_kwqe_destroy *req; + union l5cm_specific_data l5_data; + struct cnic_local *cp = dev->cnic_priv; + int ret; + u32 cid; + + req = (struct fcoe_kwqe_destroy *) kwqe; + cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid); + + memset(&l5_data, 0, sizeof(l5_data)); + ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid, + FCOE_CONNECTION_TYPE, &l5_data); + return ret; +} + +static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) { int i, work, ret; u32 opcode; @@ -2143,6 +2492,98 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], return 0; } +static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, + struct kwqe *wqes[], u32 num_wqes) +{ + struct cnic_local *cp = dev->cnic_priv; + int i, work, ret; + u32 opcode; + struct kwqe *kwqe; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2 is down */ + + if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710) + return -EINVAL; + + for (i = 0; i < num_wqes; ) { + kwqe = wqes[i]; + opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); + work = 1; + + switch (opcode) { + case FCOE_KWQE_OPCODE_INIT1: + ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: + ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], + num_wqes - i, &work); + break; + case FCOE_KWQE_OPCODE_ENABLE_CONN: + ret = cnic_bnx2x_fcoe_enable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DISABLE_CONN: + ret = cnic_bnx2x_fcoe_disable(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY_CONN: + ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_DESTROY: + ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); + break; + case FCOE_KWQE_OPCODE_STAT: + ret = cnic_bnx2x_fcoe_stat(dev, kwqe); + break; + default: + ret = 0; + netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", + opcode); + break; + } + if (ret < 0) + netdev_err(dev->netdev, "KWQE(0x%x) failed\n", + opcode); + i += work; + } + return 0; +} + +static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], + u32 num_wqes) +{ + int ret = -EINVAL; + u32 layer_code; + + if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) + return -EAGAIN; /* bnx2x is down */ + + if (!num_wqes) + return 0; + + layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; + switch (layer_code) { + case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: + case KWQE_FLAGS_LAYER_MASK_L4: + case KWQE_FLAGS_LAYER_MASK_L2: + ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); + break; + + case KWQE_FLAGS_LAYER_MASK_L5_FCOE: + ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); + break; + } + return ret; +} + +static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) +{ + if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) + return KCQE_FLAGS_LAYER_MASK_L4; + + return opflag & KCQE_FLAGS_LAYER_MASK; +} + static void service_kcqes(struct cnic_dev *dev, int num_cqes) { struct cnic_local *cp = dev->cnic_priv; @@ -2154,7 +2595,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) struct cnic_ulp_ops *ulp_ops; int ulp_type; u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; - u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; + u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) comp++; @@ -2162,7 +2603,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) while (j < num_cqes) { u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; - if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) + if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) break; if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) @@ -2174,6 +2615,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes) ulp_type = CNIC_ULP_RDMA; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) ulp_type = CNIC_ULP_ISCSI; + else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) + ulp_type = CNIC_ULP_FCOE; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) ulp_type = CNIC_ULP_L4; else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) @@ -2342,11 +2785,12 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) static int cnic_service_bnx2(void *data, void *status_blk) { struct cnic_dev *dev = data; - struct cnic_local *cp = dev->cnic_priv; - u32 status_idx = *cp->kcq1.status_idx_ptr; - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) - return status_idx; + if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + struct status_block *sblk = status_blk; + + return sblk->status_idx; + } return cnic_service_bnx2_queues(dev); } @@ -2365,9 +2809,10 @@ static void cnic_service_bnx2_msix(unsigned long data) static void cnic_doirq(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { + u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; + prefetch(cp->status_blk.gen); prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); @@ -2469,12 +2914,19 @@ static void cnic_service_bnx2x_bh(unsigned long data) status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (BNX2X_CHIP_IS_E2(cp->chip_id)) + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); + + CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + + MAX_KCQ_IDX); + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, status_idx, IGU_INT_ENABLE, 1); - else + } else { cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, status_idx, IGU_INT_ENABLE, 1); + } } static int cnic_service_bnx2x(void *data, void *status_blk) @@ -2883,7 +3335,7 @@ static void cnic_cm_cleanup(struct cnic_sock *csk) struct cnic_dev *dev = csk->dev; struct cnic_local *cp = dev->cnic_priv; - cnic_free_id(&cp->csk_port_tbl, csk->src_port); + cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); csk->src_port = 0; } } @@ -3014,7 +3466,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) int is_v6, rc = 0; struct dst_entry *dst = NULL; struct net_device *realdev; - u32 local_port; + __be16 local_port; + u32 port_id; if (saddr->local.v6.sin6_family == AF_INET6 && saddr->remote.v6.sin6_family == AF_INET6) @@ -3054,19 +3507,21 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) } } - if (local_port >= CNIC_LOCAL_PORT_MIN && - local_port < CNIC_LOCAL_PORT_MAX) { - if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) - local_port = 0; + port_id = be16_to_cpu(local_port); + if (port_id >= CNIC_LOCAL_PORT_MIN && + port_id < CNIC_LOCAL_PORT_MAX) { + if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) + port_id = 0; } else - local_port = 0; + port_id = 0; - if (!local_port) { - local_port = cnic_alloc_new_id(&cp->csk_port_tbl); - if (local_port == -1) { + if (!port_id) { + port_id = cnic_alloc_new_id(&cp->csk_port_tbl); + if (port_id == -1) { rc = -ENOMEM; goto err_out; } + local_port = cpu_to_be16(port_id); } csk->src_port = local_port; @@ -3208,6 +3663,18 @@ done: csk_put(csk); } +static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) +{ + struct cnic_local *cp = dev->cnic_priv; + struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; + u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; + struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; + + ctx->timestamp = jiffies; + ctx->wait_cond = 1; + wake_up(&ctx->waitq); +} + static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) { struct cnic_local *cp = dev->cnic_priv; @@ -3216,6 +3683,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) u32 l5_cid; struct cnic_sock *csk; + if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { + cnic_process_fcoe_term_conn(dev, kcqe); + return; + } if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { cnic_cm_process_offld_pg(dev, l4kcqe); @@ -3852,7 +4323,7 @@ static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) memset(&l2kwqe, 0, sizeof(l2kwqe)); wqes[0] = &l2kwqe; - l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | + l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | (L2_KWQE_OPCODE_VALUE_FLUSH << KWQE_OPCODE_SHIFT) | 2; dev->submit_kwqes(dev, wqes, 1); @@ -4106,7 +4577,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int port = CNIC_PORT(cp); int i; - int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); + u32 cli = cp->ethdev->iscsi_l2_client_id; u32 val; memset(txbd, 0, BCM_PAGE_SIZE); @@ -4167,7 +4638,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; int i; int port = CNIC_PORT(cp); - int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); + u32 cli = cp->ethdev->iscsi_l2_client_id; int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); u32 val; dma_addr_t ring_map = udev->l2_ring_map; @@ -4231,12 +4702,39 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, cp->rx_cons_ptr = &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; + cp->rx_cons = *cp->rx_cons_ptr; +} + +static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr, + u32 lower_addr) +{ + u32 val; + u8 mac[6]; + + val = CNIC_RD(dev, upper_addr); + + mac[0] = (u8) (val >> 8); + mac[1] = (u8) val; + + val = CNIC_RD(dev, lower_addr); + + mac[2] = (u8) (val >> 24); + mac[3] = (u8) (val >> 16); + mac[4] = (u8) (val >> 8); + mac[5] = (u8) val; + + if (is_valid_ether_addr(mac)) { + memcpy(dev->mac_addr, mac, 6); + return 0; + } else { + return -EINVAL; + } } static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; - u32 base, base2, addr, val; + u32 base, base2, addr, addr1, val; int port = CNIC_PORT(cp); dev->max_iscsi_conn = 0; @@ -4249,20 +4747,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) addr = BNX2X_SHMEM_ADDR(base, dev_info.port_hw_config[port].iscsi_mac_upper); - val = CNIC_RD(dev, addr); - - dev->mac_addr[0] = (u8) (val >> 8); - dev->mac_addr[1] = (u8) val; - - addr = BNX2X_SHMEM_ADDR(base, + addr1 = BNX2X_SHMEM_ADDR(base, dev_info.port_hw_config[port].iscsi_mac_lower); - val = CNIC_RD(dev, addr); - - dev->mac_addr[2] = (u8) (val >> 24); - dev->mac_addr[3] = (u8) (val >> 16); - dev->mac_addr[4] = (u8) (val >> 8); - dev->mac_addr[5] = (u8) val; + cnic_read_bnx2x_iscsi_mac(dev, addr, addr1); addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); val = CNIC_RD(dev, addr); @@ -4278,6 +4766,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) val16 ^= 0x1e1e; dev->max_iscsi_conn = val16; } + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) + dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; + if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { int func = CNIC_FUNC(cp); u32 mf_cfg_addr; @@ -4288,21 +4780,90 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) else mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; - addr = mf_cfg_addr + - offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag); + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + /* Must determine if the MF is SD vs SI mode */ + addr = BNX2X_SHMEM_ADDR(base, + dev_info.shared_feature_config.config); + val = CNIC_RD(dev, addr); + if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) == + SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) { + int rc; + + /* MULTI_FUNCTION_SI mode */ + addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, + func_ext_config[func].func_cfg); + val = CNIC_RD(dev, addr); + if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) + dev->max_iscsi_conn = 0; + + if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) + dev->max_fcoe_conn = 0; + + addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, + func_ext_config[func]. + iscsi_mac_addr_upper); + addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr, + func_ext_config[func]. + iscsi_mac_addr_lower); + rc = cnic_read_bnx2x_iscsi_mac(dev, addr, + addr1); + if (rc && func > 1) + dev->max_iscsi_conn = 0; + + return; + } + } + + addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, + func_mf_config[func].e1hov_tag); val = CNIC_RD(dev, addr); val &= FUNC_MF_CFG_E1HOV_TAG_MASK; if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - addr = mf_cfg_addr + - offsetof(struct mf_cfg, - func_mf_config[func].config); - val = CNIC_RD(dev, addr); - val &= FUNC_MF_CFG_PROTOCOL_MASK; - if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) - dev->max_iscsi_conn = 0; + dev->max_fcoe_conn = 0; + dev->max_iscsi_conn = 0; } } + if (!is_valid_ether_addr(dev->mac_addr)) + dev->max_iscsi_conn = 0; +} + +static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) +{ + struct cnic_local *cp = dev->cnic_priv; + u32 pfid = cp->pfid; + + cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + + CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); + cp->kcq1.sw_prod_idx = 0; + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } else { + struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + + cp->kcq1.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; + cp->kcq1.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } + + if (BNX2X_CHIP_IS_E2(cp->chip_id)) { + struct host_hc_status_block_e2 *sb = cp->status_blk.gen; + + cp->kcq2.io_addr = BAR_USTRORM_INTMEM + + USTORM_FCOE_EQ_PROD_OFFSET(pfid); + cp->kcq2.sw_prod_idx = 0; + cp->kcq2.hw_prod_idx_ptr = + &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; + cp->kcq2.status_idx_ptr = + &sb->sb.running_index[SM_RX_ID]; + } } static int cnic_start_bnx2x_hw(struct cnic_dev *dev) @@ -4335,28 +4896,19 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) if (ret) return -ENOMEM; - cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; - - cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + - CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); - cp->kcq1.sw_prod_idx = 0; - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { - struct host_hc_status_block_e2 *sb = cp->status_blk.gen; - - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; - } else { - struct host_hc_status_block_e1x *sb = cp->status_blk.gen; + ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, + BNX2X_FCOE_NUM_CONNECTIONS, + cp->fcoe_start_cid); - cp->kcq1.hw_prod_idx_ptr = - &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; - cp->kcq1.status_idx_ptr = - &sb->sb.running_index[SM_RX_ID]; + if (ret) + return -ENOMEM; } + cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; + + cnic_init_bnx2x_kcq(dev); + cnic_get_bnx2x_iscsi_info(dev); /* Only 1 EQ */ @@ -4424,8 +4976,9 @@ static void cnic_init_rings(struct cnic_dev *dev) cnic_init_bnx2_rx_ring(dev); set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); - u32 cl_qzone_id, type; + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; + u32 cl_qzone_id; struct client_init_ramrod_data *data; union l5cm_specific_data l5_data; struct ustorm_eth_rx_producers rx_prods = {0}; @@ -4457,15 +5010,10 @@ static void cnic_init_rings(struct cnic_dev *dev) l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; - type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); - set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, - BNX2X_ISCSI_L2_CID, type, &l5_data); + cid, ETH_CONNECTION_TYPE, &l5_data); i = 0; while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && @@ -4476,7 +5024,7 @@ static void cnic_init_rings(struct cnic_dev *dev) netdev_err(dev->netdev, "iSCSI CLIENT_SETUP did not complete\n"); cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); - cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); + cnic_ring_ctl(dev, cid, cli, 1); } } @@ -4491,19 +5039,19 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) cnic_shutdown_bnx2_rx_ring(dev); } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { struct cnic_local *cp = dev->cnic_priv; - u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); + u32 cli = cp->ethdev->iscsi_l2_client_id; + u32 cid = cp->ethdev->iscsi_l2_cid; union l5cm_specific_data l5_data; int i; - u32 type; - cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); + cnic_ring_ctl(dev, cid, cli, 0); set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); l5_data.phy_address.lo = cli; l5_data.phy_address.hi = 0; cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, - BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); + cid, ETH_CONNECTION_TYPE, &l5_data); i = 0; while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && ++i < 10) @@ -4515,12 +5063,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); memset(&l5_data, 0, sizeof(l5_data)); - type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT) - & SPE_HDR_CONN_TYPE; - type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & - SPE_HDR_FUNCTION_ID); cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, - BNX2X_ISCSI_L2_CID, type, &l5_data); + cid, NONE_CONNECTION_TYPE, &l5_data); msleep(10); } clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); |