diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 44 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_reqs.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes.c | 16 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_hw.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_verbs.c | 64 |
13 files changed, 158 insertions, 79 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index ecff9804358..160ef482712 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) char *cp, *next; unsigned fw_maj, fw_min, fw_mic; - rtnl_lock(); lldev->ethtool_ops->get_drvinfo(lldev, &info); - rtnl_unlock(); next = info.fw_version + 1; cp = strsep(&next, "."); @@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; PDBG("%s dev 0x%p\n", __func__, dev); - rtnl_lock(); lldev->ethtool_ops->get_drvinfo(lldev, &info); - rtnl_unlock(); return sprintf(buf, "%s\n", info.fw_version); } @@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr, struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; PDBG("%s dev 0x%p\n", __func__, dev); - rtnl_lock(); lldev->ethtool_ops->get_drvinfo(lldev, &info); - rtnl_unlock(); return sprintf(buf, "%s\n", info.driver); } diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 3e4585c2318..19661b2f040 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp) wqe->read.rdmaop = T3_READ_REQ; wqe->read.reserved[0] = 0; wqe->read.reserved[1] = 0; - wqe->read.reserved[2] = 0; wqe->read.rem_stag = cpu_to_be32(1); wqe->read.rem_to = cpu_to_be64(1); wqe->read.local_stag = cpu_to_be32(1); diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 4df887af66a..7fc35cf0cdd 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h @@ -163,7 +163,8 @@ struct ehca_mod_qp_parm { /* struct for tracking if cqes have been reported to the application */ struct ehca_qmap_entry { u16 app_wr_id; - u16 reported; + u8 reported; + u8 cqe_req; }; struct ehca_queue_map { @@ -171,6 +172,7 @@ struct ehca_queue_map { unsigned int entries; unsigned int tail; unsigned int left_to_poll; + unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ }; struct ehca_qp { diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index cb55be04442..757035ea246 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num) *old_attr = new_attr; } +/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */ +static int replay_modify_qp(struct ehca_sport *sport) +{ + int aqp1_destroyed; + unsigned long flags; + + spin_lock_irqsave(&sport->mod_sqp_lock, flags); + + aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI]; + + if (sport->ibqp_sqp[IB_QPT_SMI]) + ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); + if (!aqp1_destroyed) + ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); + + spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); + + return aqp1_destroyed; +} + static void parse_ec(struct ehca_shca *shca, u64 eqe) { u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); u8 spec_event; struct ehca_sport *sport = &shca->sport[port - 1]; - unsigned long flags; switch (ec) { case 0x30: /* port availability change */ if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { - int suppress_event; - /* replay modify_qp for sqps */ - spin_lock_irqsave(&sport->mod_sqp_lock, flags); - suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; - if (sport->ibqp_sqp[IB_QPT_SMI]) - ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); - if (!suppress_event) - ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]); - spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); - - /* AQP1 was destroyed, ignore this event */ - if (suppress_event) - break; + /* only replay modify_qp calls in autodetect mode; + * if AQP1 was destroyed, the port is already down + * again and we can drop the event. + */ + if (ehca_nr_ports < 0) + if (replay_modify_qp(sport)) + break; sport->port_state = IB_PORT_ACTIVE; dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, "is active"); - ehca_query_sma_attr(shca, port, - &sport->saved_attr); + ehca_query_sma_attr(shca, port, &sport->saved_attr); } else { sport->port_state = IB_PORT_DOWN; dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index bb02a86aa52..bec7e024935 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -994,8 +994,7 @@ static int ehca_mem_notifier(struct notifier_block *nb, if (printk_timed_ratelimit(&ehca_dmem_warn_time, 30 * 1000)) ehca_gen_err("DMEM operations are not allowed" - "as long as an ehca adapter is" - "attached to the LPAR"); + "in conjunction with eHCA"); return NOTIFY_BAD; } } diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 4d54b9f6456..cadbf0cdd91 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap) { int i; - qmap->tail = 0; - for (i = 0; i < qmap->entries; i++) + qmap->tail = qmap->entries - 1; + qmap->left_to_poll = 0; + qmap->next_wqe_idx = 0; + for (i = 0; i < qmap->entries; i++) { qmap->map[i].reported = 1; + qmap->map[i].cqe_req = 0; + } } /* @@ -860,6 +864,11 @@ static struct ehca_qp *internal_create_qp( if (qp_type == IB_QPT_GSI) { h_ret = ehca_define_sqp(shca, my_qp, init_attr); if (h_ret != H_SUCCESS) { + kfree(my_qp->mod_qp_parm); + my_qp->mod_qp_parm = NULL; + /* the QP pointer is no longer valid */ + shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] = + NULL; ret = ehca2ib_return_code(h_ret); goto create_qp_exit6; } @@ -1116,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, void *wqe_v; u64 q_ofs; u32 wqe_idx; + unsigned int tail_idx; /* convert real to abs address */ wqe_p = wqe_p & (~(1UL << 63)); @@ -1128,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, return -EFAULT; } + tail_idx = (qmap->tail + 1) % qmap->entries; wqe_idx = q_ofs / ipz_queue->qe_size; - if (wqe_idx < qmap->tail) - qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx; - else - qmap->left_to_poll = wqe_idx - qmap->tail; + /* check all processed wqes, whether a cqe is requested or not */ + while (tail_idx != wqe_idx) { + if (qmap->map[tail_idx].cqe_req) + qmap->left_to_poll++; + tail_idx = (tail_idx + 1) % qmap->entries; + } + /* save index in queue, where we have to start flushing */ + qmap->next_wqe_idx = wqe_idx; return 0; } @@ -1180,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) } else { spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); my_qp->sq_map.left_to_poll = 0; + my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % + my_qp->sq_map.entries; spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); my_qp->rq_map.left_to_poll = 0; + my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % + my_qp->rq_map.entries; spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); } diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 64928079eaf..00a648f4316 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id); qmap_entry->reported = 0; + qmap_entry->cqe_req = 0; switch (send_wr->opcode) { case IB_WR_SEND: @@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, if ((send_wr->send_flags & IB_SEND_SIGNALED || qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) - && !hidden) + && !hidden) { wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; + qmap_entry->cqe_req = 1; + } if (send_wr->opcode == IB_WR_SEND_WITH_IMM || send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { @@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp, qmap_entry = &my_qp->rq_map.map[rq_map_idx]; qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); qmap_entry->reported = 0; + qmap_entry->cqe_req = 1; wqe_cnt++; } /* eof for cur_recv_wr */ @@ -706,27 +710,34 @@ repoll: goto repoll; wc->qp = &my_qp->ib_qp; + qmap_tail_idx = get_app_wr_id(cqe->work_request_id); + if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) + /* We got a send completion. */ + qmap = &my_qp->sq_map; + else + /* We got a receive completion. */ + qmap = &my_qp->rq_map; + + /* advance the tail pointer */ + qmap->tail = qmap_tail_idx; + if (is_error) { /* * set left_to_poll to 0 because in error state, we will not * get any additional CQEs */ - ehca_add_to_err_list(my_qp, 1); + my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % + my_qp->sq_map.entries; my_qp->sq_map.left_to_poll = 0; + ehca_add_to_err_list(my_qp, 1); + my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % + my_qp->rq_map.entries; + my_qp->rq_map.left_to_poll = 0; if (HAS_RQ(my_qp)) ehca_add_to_err_list(my_qp, 0); - my_qp->rq_map.left_to_poll = 0; } - qmap_tail_idx = get_app_wr_id(cqe->work_request_id); - if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) - /* We got a send completion. */ - qmap = &my_qp->sq_map; - else - /* We got a receive completion. */ - qmap = &my_qp->rq_map; - qmap_entry = &qmap->map[qmap_tail_idx]; if (qmap_entry->reported) { ehca_warn(cq->device, "Double cqe on qp_num=%#x", @@ -738,10 +749,6 @@ repoll: wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); qmap_entry->reported = 1; - /* this is a proper completion, we need to advance the tail pointer */ - if (++qmap->tail == qmap->entries) - qmap->tail = 0; - /* if left_to_poll is decremented to 0, add the QP to the error list */ if (qmap->left_to_poll > 0) { qmap->left_to_poll--; @@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, else qmap = &my_qp->rq_map; - qmap_entry = &qmap->map[qmap->tail]; + qmap_entry = &qmap->map[qmap->next_wqe_idx]; while ((nr < num_entries) && (qmap_entry->reported == 0)) { /* generate flush CQE */ + memset(wc, 0, sizeof(*wc)); - offset = qmap->tail * ipz_queue->qe_size; + offset = qmap->next_wqe_idx * ipz_queue->qe_size; wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); if (!wqe) { ehca_err(cq->device, "Invalid wqe offset=%#lx on " @@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, wc->qp = &my_qp->ib_qp; - /* mark as reported and advance tail pointer */ + /* mark as reported and advance next_wqe pointer */ qmap_entry->reported = 1; - if (++qmap->tail == qmap->entries) - qmap->tail = 0; - qmap_entry = &qmap->map[qmap->tail]; + qmap->next_wqe_idx++; + if (qmap->next_wqe_idx == qmap->entries) + qmap->next_wqe_idx = 0; + qmap_entry = &qmap->map[qmap->next_wqe_idx]; wc++; nr++; } diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index fc0f6d9e603..2296832f94d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -156,7 +156,7 @@ bail: /** * ipath_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP - * @wr_id_only: update wr_id only, not SGEs + * @wr_id_only: update qp->r_wr_id only, not qp->r_sge * * Return 0 if no RWQE is available, otherwise return 1. * @@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) u32 tail; int ret; - qp->r_sge.sg_list = qp->r_sg_list; - if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; @@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) wqe = get_rwqe_ptr(rq, tail); if (++tail >= rq->size) tail = 0; - } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, - &qp->r_sge)); + if (wr_id_only) + break; + qp->r_sge.sg_list = qp->r_sg_list; + } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); qp->r_wr_id = wqe->wr_id; wq->tail = tail; diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index d0866a3636e..18308494a19 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -343,6 +343,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(ibcq->device); struct mlx4_ib_cq *cq = to_mcq(ibcq); + struct mlx4_mtt mtt; int outst_cqe; int err; @@ -376,10 +377,13 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; } + mtt = cq->buf.mtt; + err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); if (err) goto err_buf; + mlx4_mtt_cleanup(dev->dev, &mtt); if (ibcq->uobject) { cq->buf = cq->resize_buf->buf; cq->ibcq.cqe = cq->resize_buf->cqe; @@ -406,6 +410,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; err_buf: + mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); if (!ibcq->uobject) mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, cq->resize_buf->cqe); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 87f5c5a87b9..8e4d26d56a9 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd, goto err_mr; mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; + mr->umem = NULL; return &mr->ibmr; diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index a2b04d62b1a..aa1dc41f04c 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000; module_param(wqm_quanta, int, 0644); MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); +static unsigned int limit_maxrdreqsz; +module_param(limit_maxrdreqsz, bool, 0644); +MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes"); + LIST_HEAD(nes_adapter_list); static LIST_HEAD(nes_dev_list); @@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i nesdev->nesadapter->port_count; } + if ((limit_maxrdreqsz || + ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) && + (hw_rev == NE020_REV1))) && + (pcie_get_readrq(pcidev) > 256)) { + if (pcie_set_readrq(pcidev, 256)) + printk(KERN_ERR PFX "Unable to set max read request" + " to 256 bytes\n"); + else + nes_debug(NES_DBG_INIT, "Max read request size set" + " to 256 bytes\n"); + } + tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); /* bring up the Control QP */ diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 610b9d85959..bc0b4de0445 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -40,6 +40,7 @@ #define NES_PHY_TYPE_ARGUS 4 #define NES_PHY_TYPE_PUMA_1G 5 #define NES_PHY_TYPE_PUMA_10G 6 +#define NES_PHY_TYPE_GLADIUS 7 #define NES_MULTICAST_PF_MAX 8 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 932e56fcf77..d36c9a0bf1b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -220,14 +220,14 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.sq_head; qsize = nesqp->hwqp.sq_tail; /* Check for SQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); return -EINVAL; } @@ -269,7 +269,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, nes_write32(nesdev->regs+NES_WQE_ALLOC, (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); return 0; } @@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; - goto failed_vpbl_alloc; + goto failed_vpbl_avail; } else { nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; } @@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; - goto failed_vpbl_alloc; + goto failed_vpbl_avail; } else { nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; } @@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, goto failed_vpbl_alloc; } - nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); + nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; + nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC); if (!nesfmr->root_vpbl.leaf_vpbl) { spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); ret = -ENOMEM; goto failed_leaf_vpbl_alloc; } - nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1; nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); @@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, nesfmr->root_vpbl.pbl_pbase); failed_vpbl_alloc: + if (nesfmr->nesmr.pbls_used != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (nesfmr->nesmr.pbl_4k) + nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; + else + nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } + +failed_vpbl_avail: kfree(nesfmr); failed_fmr_alloc: @@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd, */ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) { + unsigned long flags; struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); struct nes_fmr *nesfmr = to_nesfmr(nesmr); struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); struct nes_device *nesdev = nesvnic->nesdev; - struct nes_mr temp_nesmr = *nesmr; + struct nes_adapter *nesadapter = nesdev->nesadapter; int i = 0; - temp_nesmr.ibmw.device = ibfmr->device; - temp_nesmr.ibmw.pd = ibfmr->pd; - temp_nesmr.ibmw.rkey = ibfmr->rkey; - temp_nesmr.ibmw.uobject = NULL; - /* free the resources */ if (nesfmr->leaf_pbl_cnt == 0) { /* single PBL case */ @@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr) pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, nesfmr->root_vpbl.pbl_pbase); } + nesmr->ibmw.device = ibfmr->device; + nesmr->ibmw.pd = ibfmr->pd; + nesmr->ibmw.rkey = ibfmr->rkey; + nesmr->ibmw.uobject = NULL; - return nes_dealloc_mw(&temp_nesmr.ibmw); + if (nesfmr->nesmr.pbls_used != 0) { + spin_lock_irqsave(&nesadapter->pbl_lock, flags); + if (nesfmr->nesmr.pbl_4k) { + nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used; + WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl); + } else { + nesadapter->free_256pbl += nesfmr->nesmr.pbls_used; + WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl); + } + spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); + } + + return nes_dealloc_mw(&nesmr->ibmw); } @@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, nes_ucontext->mcrqf = req.mcrqf; if (nes_ucontext->mcrqf) { if (nes_ucontext->mcrqf & 0x80000000) - nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; + nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1); else if (nes_ucontext->mcrqf & 0x40000000) nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; else @@ -3212,7 +3234,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.sq_head; @@ -3337,7 +3359,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); } - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); if (err) *bad_wr = ib_wr; @@ -3368,7 +3390,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, if (nesqp->ibqp_state > IB_QPS_RTS) return -EINVAL; - spin_lock_irqsave(&nesqp->lock, flags); + spin_lock_irqsave(&nesqp->lock, flags); head = nesqp->hwqp.rq_head; @@ -3421,7 +3443,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); } - spin_unlock_irqrestore(&nesqp->lock, flags); + spin_unlock_irqrestore(&nesqp->lock, flags); if (err) *bad_wr = ib_wr; @@ -3453,7 +3475,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) nes_debug(NES_DBG_CQ, "\n"); - spin_lock_irqsave(&nescq->lock, flags); + spin_lock_irqsave(&nescq->lock, flags); head = nescq->hw_cq.cq_head; cq_size = nescq->hw_cq.cq_size; @@ -3562,7 +3584,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", cqe_count, nescq->hw_cq.cq_number); - spin_unlock_irqrestore(&nescq->lock, flags); + spin_unlock_irqrestore(&nescq->lock, flags); return cqe_count; } |