summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:52:46 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:52:46 -0800
commit488823f114a3cef58e7eb932cac977440a2a59cb (patch)
treef6cb0840c927ba0234949b8f8728600a474cb1b2 /drivers/infiniband/hw/mthca/mthca_qp.c
parent827b3f6abc21246928e38480bb308936701a2ad4 (diff)
parent3c2d774cad5bf4fad576363da77870e9e6530b7a (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/nes: Add a driver for NetEffect RNICs IB/mthca: Return proper error codes from mthca_fmr_alloc() IB: Avoid marking __devinitdata as const IB/mlx4: Actually print out the driver version IB/ib_mthca: Pre-link receive WQEs in Tavor mode IB/mthca: Remove checks for srq->first_free < 0 IB/fmr_pool: Allocate page list for pool FMRs only when caching enabled IB/srp: Retry stale connections mlx4_core: Don't read reserved fields in mlx4_QUERY_ADAPTER() IB/mthca: Don't read reserved fields in mthca_QUERY_ADAPTER() IPoIB: Remove a misleading debug print IPoIB: Handle bonding failover race for connected neighbours too IB/mthca: Fix and simplify page size calculation in mthca_reg_phys_mr() IB/ehca: Add PMA support IB/ehca: Update sma_attr also in case of disruptive config change IB/ehca: Prevent sending UD packets to QP0 IB/cm: Add interim support for routed paths mlx4_core: Fix more section mismatches
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0e5461c6573..db5595bbf7f 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
{
int ret;
int i;
+ struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
if (mthca_is_memfree(dev)) {
- struct mthca_next_seg *next;
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
+ } else {
+ for (i = 0; i < qp->rq.max; ++i) {
+ next = get_recv_wqe(qp, i);
+ next->nda_op = htonl((((i + 1) % qp->rq.max) <<
+ qp->rq.wqe_shift) | 1);
+ }
+
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);