diff options
author | Stefan Roscher <ossrosch@linux.vnet.ibm.com> | 2008-05-07 11:35:06 -0700 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-05-07 11:35:06 -0700 |
commit | 12137c593d127c6c1a3eb050674da047682badaf (patch) | |
tree | f615c55e363b550c7489aa0aceeb8d62c201fcad /drivers/infiniband/hw/ehca/ehca_qp.c | |
parent | ab69b3cf1219e0d07bb4ea373f36b1de38af531c (diff) |
IB/ehca: Wait for async events to finish before destroying QP
This is necessary because, in a multicore environment, a race between
uverbs async handler and destroy QP could occur.
Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 18fba92fa7a..3f59587338e 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp( return ERR_PTR(-ENOMEM); } + atomic_set(&my_qp->nr_events, 0); + init_waitqueue_head(&my_qp->wait_completion); spin_lock_init(&my_qp->spinlock_s); spin_lock_init(&my_qp->spinlock_r); my_qp->qp_type = qp_type; @@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, idr_remove(&ehca_qp_idr, my_qp->token); write_unlock_irqrestore(&ehca_qp_idr_lock, flags); + /* now wait until all pending events have completed */ + wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); + h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); if (h_ret != H_SUCCESS) { ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " |