diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-31 15:31:23 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-31 15:31:23 -0800 |
commit | 4e13c5d0212f25d69a97606b9d5a85edb52a7737 (patch) | |
tree | 002f59b9151f42a6388656762f0e7963d08b89ef /drivers/infiniband | |
parent | deb2a1d29bf0168ff2575e714e5c1f156be663fb (diff) | |
parent | 5259a06ef97068b710f45d092a587e8d740f750f (diff) |
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target updates from Nicholas Bellinger:
"The highlights this round include:
- add support for SCSI Referrals (Hannes)
- add support for T10 DIF into target core (nab + mkp)
- add support for T10 DIF emulation in FILEIO + RAMDISK backends (Sagi + nab)
- add support for T10 DIF -> bio_integrity passthrough in IBLOCK backend (nab)
- prep changes to iser-target for >= v3.15 T10 DIF support (Sagi)
- add support for qla2xxx N_Port ID Virtualization - NPIV (Saurav + Quinn)
- allow percpu_ida_alloc() to receive task state bitmask (Kent)
- fix >= v3.12 iscsi-target session reset hung task regression (nab)
- fix >= v3.13 percpu_ref se_lun->lun_ref_active race (nab)
- fix a long-standing network portal creation race (Andy)"
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits)
target: Fix percpu_ref_put race in transport_lun_remove_cmd
target/iscsi: Fix network portal creation race
target: Report bad sector in sense data for DIF errors
iscsi-target: Convert gfp_t parameter to task state bitmask
iscsi-target: Fix connection reset hang with percpu_ida_alloc
percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask
iscsi-target: Pre-allocate more tags to avoid ack starvation
qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport
qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO.
qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine
IB/isert: Move fastreg descriptor creation to a function
IB/isert: Avoid frwr notation, user fastreg
IB/isert: seperate connection protection domains and dma MRs
tcm_loop: Enable DIF/DIX modes in SCSI host LLD
target/rd: Add DIF protection into rd_execute_rw
target/rd: Add support for protection SGL setup + release
target/rd: Refactor rd_build_device_space + rd_release_device_space
target/file: Add DIF protection support to fd_execute_rw
target/file: Add DIF protection init/format support
...
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.c | 222 | ||||
-rw-r--r-- | drivers/infiniband/ulp/isert/ib_isert.h | 10 |
2 files changed, 121 insertions, 111 deletions
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 9804fca6bf0..2b161be3c1a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -47,10 +47,10 @@ static int isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct isert_rdma_wr *wr); static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr); +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr); static void isert_qp_event_callback(struct ib_event *e, void *context) @@ -227,11 +227,11 @@ isert_create_device_ib_res(struct isert_device *device) /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { - device->use_frwr = 1; - device->reg_rdma_mem = isert_reg_rdma_frwr; - device->unreg_rdma_mem = isert_unreg_rdma_frwr; + device->use_fastreg = 1; + device->reg_rdma_mem = isert_reg_rdma; + device->unreg_rdma_mem = isert_unreg_rdma; } else { - device->use_frwr = 0; + device->use_fastreg = 0; device->reg_rdma_mem = isert_map_rdma; device->unreg_rdma_mem = isert_unmap_cmd; } @@ -239,9 +239,10 @@ isert_create_device_ib_res(struct isert_device *device) device->cqs_used = min_t(int, num_online_cpus(), device->ib_device->num_comp_vectors); device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used); - pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n", + pr_debug("Using %d CQs, device %s supports %d vectors support " + "Fast registration %d\n", device->cqs_used, device->ib_device->name, - device->ib_device->num_comp_vectors, device->use_frwr); + device->ib_device->num_comp_vectors, device->use_fastreg); device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) * device->cqs_used, GFP_KERNEL); if (!device->cq_desc) { @@ -250,13 +251,6 @@ isert_create_device_ib_res(struct isert_device *device) } cq_desc = device->cq_desc; - device->dev_pd = ib_alloc_pd(ib_dev); - if (IS_ERR(device->dev_pd)) { - ret = PTR_ERR(device->dev_pd); - pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret); - goto out_cq_desc; - } - for (i = 0; i < device->cqs_used; i++) { cq_desc[i].device = device; cq_desc[i].cq_index = i; @@ -294,13 +288,6 @@ isert_create_device_ib_res(struct isert_device *device) goto out_cq; } - device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(device->dev_mr)) { - ret = PTR_ERR(device->dev_mr); - pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret); - goto out_cq; - } - return 0; out_cq: @@ -316,9 +303,6 @@ out_cq: ib_destroy_cq(device->dev_tx_cq[j]); } } - ib_dealloc_pd(device->dev_pd); - -out_cq_desc: kfree(device->cq_desc); return ret; @@ -341,8 +325,6 @@ isert_free_device_ib_res(struct isert_device *device) device->dev_tx_cq[i] = NULL; } - ib_dereg_mr(device->dev_mr); - ib_dealloc_pd(device->dev_pd); kfree(device->cq_desc); } @@ -398,18 +380,18 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id) } static void -isert_conn_free_frwr_pool(struct isert_conn *isert_conn) +isert_conn_free_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc, *tmp; int i = 0; - if (list_empty(&isert_conn->conn_frwr_pool)) + if (list_empty(&isert_conn->conn_fr_pool)) return; - pr_debug("Freeing conn %p frwr pool", isert_conn); + pr_debug("Freeing conn %p fastreg pool", isert_conn); list_for_each_entry_safe(fr_desc, tmp, - &isert_conn->conn_frwr_pool, list) { + &isert_conn->conn_fr_pool, list) { list_del(&fr_desc->list); ib_free_fast_reg_page_list(fr_desc->data_frpl); ib_dereg_mr(fr_desc->data_mr); @@ -417,20 +399,47 @@ isert_conn_free_frwr_pool(struct isert_conn *isert_conn) ++i; } - if (i < isert_conn->conn_frwr_pool_size) + if (i < isert_conn->conn_fr_pool_size) pr_warn("Pool still has %d regions registered\n", - isert_conn->conn_frwr_pool_size - i); + isert_conn->conn_fr_pool_size - i); +} + +static int +isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd, + struct fast_reg_descriptor *fr_desc) +{ + fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, + ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_frpl)) { + pr_err("Failed to allocate data frpl err=%ld\n", + PTR_ERR(fr_desc->data_frpl)); + return PTR_ERR(fr_desc->data_frpl); + } + + fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE); + if (IS_ERR(fr_desc->data_mr)) { + pr_err("Failed to allocate data frmr err=%ld\n", + PTR_ERR(fr_desc->data_mr)); + ib_free_fast_reg_page_list(fr_desc->data_frpl); + return PTR_ERR(fr_desc->data_mr); + } + pr_debug("Create fr_desc %p page_list %p\n", + fr_desc, fr_desc->data_frpl->page_list); + + fr_desc->valid = true; + + return 0; } static int -isert_conn_create_frwr_pool(struct isert_conn *isert_conn) +isert_conn_create_fastreg_pool(struct isert_conn *isert_conn) { struct fast_reg_descriptor *fr_desc; struct isert_device *device = isert_conn->conn_device; int i, ret; - INIT_LIST_HEAD(&isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size = 0; + INIT_LIST_HEAD(&isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size = 0; for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) { fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL); if (!fr_desc) { @@ -439,40 +448,25 @@ isert_conn_create_frwr_pool(struct isert_conn *isert_conn) goto err; } - fr_desc->data_frpl = - ib_alloc_fast_reg_page_list(device->ib_device, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_frpl)) { - pr_err("Failed to allocate fr_pg_list err=%ld\n", - PTR_ERR(fr_desc->data_frpl)); - ret = PTR_ERR(fr_desc->data_frpl); - goto err; - } - - fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd, - ISCSI_ISER_SG_TABLESIZE); - if (IS_ERR(fr_desc->data_mr)) { - pr_err("Failed to allocate frmr err=%ld\n", - PTR_ERR(fr_desc->data_mr)); - ret = PTR_ERR(fr_desc->data_mr); - ib_free_fast_reg_page_list(fr_desc->data_frpl); + ret = isert_create_fr_desc(device->ib_device, + isert_conn->conn_pd, fr_desc); + if (ret) { + pr_err("Failed to create fastreg descriptor err=%d\n", + ret); goto err; } - pr_debug("Create fr_desc %p page_list %p\n", - fr_desc, fr_desc->data_frpl->page_list); - fr_desc->valid = true; - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); - isert_conn->conn_frwr_pool_size++; + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); + isert_conn->conn_fr_pool_size++; } - pr_debug("Creating conn %p frwr pool size=%d", - isert_conn, isert_conn->conn_frwr_pool_size); + pr_debug("Creating conn %p fastreg pool size=%d", + isert_conn, isert_conn->conn_fr_pool_size); return 0; err: - isert_conn_free_frwr_pool(isert_conn); + isert_conn_free_fastreg_pool(isert_conn); return ret; } @@ -558,14 +552,29 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) } isert_conn->conn_device = device; - isert_conn->conn_pd = device->dev_pd; - isert_conn->conn_mr = device->dev_mr; + isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device); + if (IS_ERR(isert_conn->conn_pd)) { + ret = PTR_ERR(isert_conn->conn_pd); + pr_err("ib_alloc_pd failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_pd; + } - if (device->use_frwr) { - ret = isert_conn_create_frwr_pool(isert_conn); + isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(isert_conn->conn_mr)) { + ret = PTR_ERR(isert_conn->conn_mr); + pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n", + isert_conn, ret); + goto out_mr; + } + + if (device->use_fastreg) { + ret = isert_conn_create_fastreg_pool(isert_conn); if (ret) { - pr_err("Conn: %p failed to create frwr_pool\n", isert_conn); - goto out_frwr; + pr_err("Conn: %p failed to create fastreg pool\n", + isert_conn); + goto out_fastreg; } } @@ -582,9 +591,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) return 0; out_conn_dev: - if (device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); -out_frwr: + if (device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); +out_fastreg: + ib_dereg_mr(isert_conn->conn_mr); +out_mr: + ib_dealloc_pd(isert_conn->conn_pd); +out_pd: isert_device_try_release(device); out_rsp_dma_map: ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, @@ -608,8 +621,8 @@ isert_connect_release(struct isert_conn *isert_conn) pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - if (device && device->use_frwr) - isert_conn_free_frwr_pool(isert_conn); + if (device && device->use_fastreg) + isert_conn_free_fastreg_pool(isert_conn); if (isert_conn->conn_qp) { cq_index = ((struct isert_cq_desc *) @@ -623,6 +636,9 @@ isert_connect_release(struct isert_conn *isert_conn) isert_free_rx_descriptors(isert_conn); rdma_destroy_id(isert_conn->conn_cm_id); + ib_dereg_mr(isert_conn->conn_mr); + ib_dealloc_pd(isert_conn->conn_pd); + if (isert_conn->login_buf) { ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE); @@ -1024,13 +1040,13 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen, } static struct iscsi_cmd -*isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp) +*isert_allocate_cmd(struct iscsi_conn *conn) { struct isert_conn *isert_conn = (struct isert_conn *)conn->context; struct isert_cmd *isert_cmd; struct iscsi_cmd *cmd; - cmd = iscsit_allocate_cmd(conn, gfp); + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); if (!cmd) { pr_err("Unable to allocate iscsi_cmd + isert_cmd\n"); return NULL; @@ -1219,7 +1235,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, switch (opcode) { case ISCSI_OP_SCSI_CMD: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1233,7 +1249,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_NOOP_OUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1246,7 +1262,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_SCSI_TMFUNC: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1254,7 +1270,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, (unsigned char *)hdr); break; case ISCSI_OP_LOGOUT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1265,7 +1281,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, HZ); break; case ISCSI_OP_TEXT: - cmd = isert_allocate_cmd(conn, GFP_KERNEL); + cmd = isert_allocate_cmd(conn); if (!cmd) break; @@ -1404,25 +1420,25 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) } static void -isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) +isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn) { struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; LIST_HEAD(unmap_list); - pr_debug("unreg_frwr_cmd: %p\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd); if (wr->fr_desc) { - pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n", + pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n", isert_cmd, wr->fr_desc); spin_lock_bh(&isert_conn->conn_lock); - list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool); spin_unlock_bh(&isert_conn->conn_lock); wr->fr_desc = NULL; } if (wr->sge) { - pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd); + pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd); ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -2163,26 +2179,22 @@ isert_map_fr_pagelist(struct ib_device *ib_dev, static int isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, - struct isert_cmd *isert_cmd, struct isert_conn *isert_conn, - struct ib_sge *ib_sge, u32 offset, unsigned int data_len) + struct isert_conn *isert_conn, struct scatterlist *sg_start, + struct ib_sge *ib_sge, u32 sg_nents, u32 offset, + unsigned int data_len) { - struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; struct ib_device *ib_dev = isert_conn->conn_cm_id->device; - struct scatterlist *sg_start; - u32 sg_off, page_off; struct ib_send_wr fr_wr, inv_wr; struct ib_send_wr *bad_wr, *wr = NULL; + int ret, pagelist_len; + u32 page_off; u8 key; - int ret, sg_nents, pagelist_len; - sg_off = offset / PAGE_SIZE; - sg_start = &cmd->se_cmd.t_data_sg[sg_off]; - sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off, - ISCSI_ISER_SG_TABLESIZE); + sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE); page_off = offset % PAGE_SIZE; - pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n", - isert_cmd, fr_desc, sg_nents, sg_off, offset); + pr_debug("Use fr_desc %p sg_nents %d offset %u\n", + fr_desc, sg_nents, offset); pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents, &fr_desc->data_frpl->page_list[0]); @@ -2232,8 +2244,8 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, } static int -isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, - struct isert_rdma_wr *wr) +isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd, + struct isert_rdma_wr *wr) { struct se_cmd *se_cmd = &cmd->se_cmd; struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd); @@ -2251,9 +2263,9 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) { data_left = se_cmd->data_length; } else { - sg_off = cmd->write_data_done / PAGE_SIZE; - data_left = se_cmd->data_length - cmd->write_data_done; offset = cmd->write_data_done; + sg_off = offset / PAGE_SIZE; + data_left = se_cmd->data_length - cmd->write_data_done; isert_cmd->tx_desc.isert_cmd = isert_cmd; } @@ -2311,16 +2323,16 @@ isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd, wr->fr_desc = NULL; } else { spin_lock_irqsave(&isert_conn->conn_lock, flags); - fr_desc = list_first_entry(&isert_conn->conn_frwr_pool, + fr_desc = list_first_entry(&isert_conn->conn_fr_pool, struct fast_reg_descriptor, list); list_del(&fr_desc->list); spin_unlock_irqrestore(&isert_conn->conn_lock, flags); wr->fr_desc = fr_desc; - ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn, - ib_sge, offset, data_len); + ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start, + ib_sge, sg_nents, offset, data_len); if (ret) { - list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool); + list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool); goto unmap_sg; } } diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 691f90ff2d8..708a069002f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -119,9 +119,9 @@ struct isert_conn { wait_queue_head_t conn_wait; wait_queue_head_t conn_wait_comp_err; struct kref conn_kref; - struct list_head conn_frwr_pool; - int conn_frwr_pool_size; - /* lock to protect frwr_pool */ + struct list_head conn_fr_pool; + int conn_fr_pool_size; + /* lock to protect fastreg pool */ spinlock_t conn_lock; #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; @@ -139,13 +139,11 @@ struct isert_cq_desc { }; struct isert_device { - int use_frwr; + int use_fastreg; int cqs_used; int refcount; int cq_active_qps[ISERT_MAX_CQ]; struct ib_device *ib_device; - struct ib_pd *dev_pd; - struct ib_mr *dev_mr; struct ib_cq *dev_rx_cq[ISERT_MAX_CQ]; struct ib_cq *dev_tx_cq[ISERT_MAX_CQ]; struct isert_cq_desc *cq_desc; |