summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_srq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:52:46 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 09:52:46 -0800
commit488823f114a3cef58e7eb932cac977440a2a59cb (patch)
treef6cb0840c927ba0234949b8f8728600a474cb1b2 /drivers/infiniband/hw/mthca/mthca_srq.c
parent827b3f6abc21246928e38480bb308936701a2ad4 (diff)
parent3c2d774cad5bf4fad576363da77870e9e6530b7a (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/nes: Add a driver for NetEffect RNICs IB/mthca: Return proper error codes from mthca_fmr_alloc() IB: Avoid marking __devinitdata as const IB/mlx4: Actually print out the driver version IB/ib_mthca: Pre-link receive WQEs in Tavor mode IB/mthca: Remove checks for srq->first_free < 0 IB/fmr_pool: Allocate page list for pool FMRs only when caching enabled IB/srp: Retry stale connections mlx4_core: Don't read reserved fields in mlx4_QUERY_ADAPTER() IB/mthca: Don't read reserved fields in mthca_QUERY_ADAPTER() IPoIB: Remove a misleading debug print IPoIB: Handle bonding failover race for connected neighbours too IB/mthca: Fix and simplify page size calculation in mthca_reg_phys_mr() IB/ehca: Add PMA support IB/ehca: Update sma_attr also in case of disruptive config change IB/ehca: Prevent sending UD packets to QP0 IB/cm: Add interim support for routed paths mlx4_core: Fix more section mismatches
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_srq.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c47
1 files changed, 16 insertions, 31 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f681..a5ffff6e102 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
* scatter list L_Keys to the sentry value of 0x100.
*/
for (i = 0; i < srq->max; ++i) {
- wqe = get_wqe(srq, i);
+ struct mthca_next_seg *next;
- *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+ next = wqe = get_wqe(srq, i);
+
+ if (i < srq->max - 1) {
+ *wqe_to_link(wqe) = i + 1;
+ next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
+ } else {
+ *wqe_to_link(wqe) = -1;
+ next->nda_op = 0;
+ }
for (scatter = wqe + sizeof (struct mthca_next_seg);
(void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,16 +478,15 @@ out:
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
{
int ind;
+ struct mthca_next_seg *last_free;
ind = wqe_addr >> srq->wqe_shift;
spin_lock(&srq->lock);
- if (likely(srq->first_free >= 0))
- *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
- else
- srq->first_free = ind;
-
+ last_free = get_wqe(srq, srq->last_free);
+ *wqe_to_link(last_free) = ind;
+ last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
*wqe_to_link(get_wqe(srq, ind)) = -1;
srq->last_free = ind;
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
for (nreq = 0; wr; wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
prev_wqe = srq->last;
srq->last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
break;
}
- ((struct mthca_next_seg *) wqe)->nda_op =
- cpu_to_be32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */