diff options
author | Mike Marciniszyn <mike.marciniszyn@qlogic.com> | 2011-01-10 17:42:21 -0800 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2011-01-10 17:42:21 -0800 |
commit | 2528ea60f94ef9e1e1cd82066d55f62a1d19fde1 (patch) | |
tree | f2e3494c3b1ac698efb1ba3ba5675d1f21d03d06 /drivers/infiniband/hw/qib/qib_qp.c | |
parent | 19ede2e422496b2a064b9b22823c6afb66ff927b (diff) |
IB/qib: Change receive queue/QPN selection
The basic idea is that on SusieQ, the difficult part of mapping QPN to
context is handled by the mapping registers so the generic QPN
allocation doesn't need to worry about chip specifics. For Monty and
Linda, there is no mapping table so the qpt->mask (same as
dd->qpn_mask), is used to see if the QPN to context falls within
[zero..dd->n_krcv_queues).
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_qp.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 26 |
1 files changed, 10 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 32dacd44415..eaab008466c 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt, static inline unsigned find_next_offset(struct qib_qpn_table *qpt, struct qpn_map *map, unsigned off, - unsigned r) + unsigned n) { if (qpt->mask) { off++; - if ((off & qpt->mask) >> 1 != r) - off = ((off & qpt->mask) ? - (off | qpt->mask) + 1 : off) | (r << 1); + if (((off & qpt->mask) >> 1) >= n) + off = (off | qpt->mask) + 2; } else off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); return off; @@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, u32 i, offset, max_scan, qpn; struct qpn_map *map; u32 ret; - int r; if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; @@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, goto bail; } - r = smp_processor_id(); - if (r >= dd->n_krcv_queues) - r %= dd->n_krcv_queues; qpn = qpt->last + 1; if (qpn >= QPN_MAX) qpn = 2; - if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) - qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | - (r << 1); + if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) + qpn = (qpn | qpt->mask) + 2; offset = qpn & BITS_PER_PAGE_MASK; map = &qpt->map[qpn / BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; @@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, ret = qpn; goto bail; } - offset = find_next_offset(qpt, map, offset, r); + offset = find_next_offset(qpt, map, offset, + dd->n_krcv_queues); qpn = mk_qpn(qpt, map, offset); /* * This test differs from alloc_pidmap(). @@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, if (qpt->nmaps == QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; - offset = qpt->mask ? (r << 1) : 0; + offset = 0; } else if (map < &qpt->map[qpt->nmaps]) { ++map; - offset = qpt->mask ? (r << 1) : 0; + offset = 0; } else { map = &qpt->map[0]; - offset = qpt->mask ? (r << 1) : 2; + offset = 2; } qpn = mk_qpn(qpt, map, offset); } @@ -1065,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, } qp->ibqp.qp_num = err; qp->port_num = init_attr->port_num; - qp->processor_id = smp_processor_id(); qib_reset_qp(qp, init_attr->qp_type); break; |