summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_verbs.c
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2012-07-19 13:03:56 +0000
committerRoland Dreier <roland@purestorage.com>2012-07-19 11:19:58 -0700
commit551ace124d0ef471e8a5fee3ef9e5bb7460251be (patch)
treed04b3700a959bc1d87f27ba4cc0752b6dd1725b9 /drivers/infiniband/hw/qib/qib_verbs.c
parentf3331f88a4b97530b7acd3112902524d9dc0688c (diff)
IB/qib: Reduce sdma_lock contention
Profiling has shown that sdma_lock is proving a bottleneck for performance. The situations include: - RDMA reads when krcvqs > 1 - post sends from multiple threads For RDMA read the current global qib_wq mechanism runs on all CPUs and contends for the sdma_lock when multiple RMDA read requests are fielded on differenct CPUs. For post sends, the direct call to qib_do_send() from multiple threads causes the contention. Since the sdma mechanism is per port, this fix converts the existing workqueue to a per port single thread workqueue to reduce the lock contention in the RDMA read case, and for any other case where the QP is scheduled via the workqueue mechanism from more than 1 CPU. For the post send case, This patch modifies the post send code to test for a non empty sdma engine. If the sdma is not idle the (now single thread) workqueue will be used to trigger the send engine instead of the direct call to qib_do_send(). Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_verbs.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c29
1 files changed, 26 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 03ace0650a8..fc9b205c241 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
* @qp: the QP to post on
* @wr: the work request to send
*/
-static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
+ int *scheduled)
{
struct qib_swqe *wqe;
u32 next;
@@ -440,6 +441,12 @@ bail_inval_free:
bail_inval:
ret = -EINVAL;
bail:
+ if (!ret && !wr->next &&
+ !qib_sdma_empty(
+ dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
+ qib_schedule_send(qp);
+ *scheduled = 1;
+ }
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct qib_qp *qp = to_iqp(ibqp);
int err = 0;
+ int scheduled = 0;
for (; wr; wr = wr->next) {
- err = qib_post_one_send(qp, wr);
+ err = qib_post_one_send(qp, wr, &scheduled);
if (err) {
*bad_wr = wr;
goto bail;
@@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
/* Try to do the send work in the caller's context. */
- qib_do_send(&qp->s_work);
+ if (!scheduled)
+ qib_do_send(&qp->s_work);
bail:
return err;
@@ -2308,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
get_order(lk_tab_size));
kfree(dev->qp_table);
}
+
+/*
+ * This must be called with s_lock held.
+ */
+void qib_schedule_send(struct qib_qp *qp)
+{
+ if (qib_send_ok(qp)) {
+ struct qib_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ queue_work(ppd->qib_wq, &qp->s_work);
+ }
+}