diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 08:58:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 08:58:32 -0700 |
commit | 2490138cb785d299d898b579fa2874a59a3d321a (patch) | |
tree | fc9acc19ce1e089e383f901e4eaa406a739f4837 /drivers/net/mlx4/main.c | |
parent | f6f79190866d5b2d06a2114d673f91f54e7c7ce4 (diff) | |
parent | 73f526da0260db5376951373c267596993dc13a8 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (48 commits)
RDMA/iwcm: Reject the connection when the cm_id is destroyed
RDMA/cxgb3: Clean up properly on FW mismatch failures
RDMA/cxgb3: Don't ignore insert_handle() failures
MAINTAINERS: InfiniBand/RDMA mailing list transition to vger
IB/mad: Allow tuning of QP0 and QP1 sizes
IB/mad: Fix possible lock-lock-timer deadlock
RDMA/nes: Map MTU to IB_MTU_* and correctly report link state
RDMA/nes: Rework the disconn routine for terminate and flushing
RDMA/nes: Use the flush code to fill in cqe error
RDMA/nes: Make poll_cq return correct number of wqes during flush
RDMA/nes: Use flush mechanism to set status for wqe in error
RDMA/nes: Implement Terminate Packet
RDMA/nes: Add CQ error handling
RDMA/nes: Clean out CQ completions when QP is destroyed
RDMA/nes: Change memory allocation for cqp request to GFP_ATOMIC
RDMA/nes: Allocate work item for disconnect event handling
RDMA/nes: Update refcnt during disconnect
IB/mthca: Don't allow userspace open while recovering from catastrophic error
IB/mthca: Distinguish multiple devices in /proc/interrupts
IB/mthca: Annotate CQ locking
...
Diffstat (limited to 'drivers/net/mlx4/main.c')
-rw-r--r-- | drivers/net/mlx4/main.c | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index dac621b1e9f..3dd481e77f9 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, goto err_unmap_aux; } - err = mlx4_map_eq_icm(dev, init_hca->eqc_base); + err = mlx4_init_icm_table(dev, &priv->eq_table.table, + init_hca->eqc_base, dev_cap->eqc_entry_sz, + dev->caps.num_eqs, dev->caps.num_eqs, + 0, 0); if (err) { mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); goto err_unmap_cmpt; @@ -668,7 +671,7 @@ err_unmap_mtt: mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); err_unmap_eq: - mlx4_unmap_eq_icm(dev); + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); err_unmap_cmpt: mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); @@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); + mlx4_cleanup_icm_table(dev, &priv->eq_table.table); mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); - mlx4_unmap_eq_icm(dev); mlx4_UNMAP_ICM_AUX(dev); mlx4_free_icm(dev, priv->fw.aux_icm, 0); @@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; err_close: - mlx4_close_hca(dev); + mlx4_CLOSE_HCA(dev, 0); err_free_icm: mlx4_free_icms(dev); @@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_disable_pdev; } - err = pci_request_region(pdev, 0, DRV_NAME); + err = pci_request_regions(pdev, DRV_NAME); if (err) { - dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); + dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); goto err_disable_pdev; } - err = pci_request_region(pdev, 2, DRV_NAME); - if (err) { - dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n"); - goto err_release_bar0; - } - pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); - goto err_release_bar2; + goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (err) { dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " "aborting.\n"); - goto err_release_bar2; + goto err_release_regions; } } @@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "Device struct alloc failed, " "aborting.\n"); err = -ENOMEM; - goto err_release_bar2; + goto err_release_regions; } dev = &priv->dev; @@ -1205,11 +1202,8 @@ err_cmd: err_free_dev: kfree(priv); -err_release_bar2: - pci_release_region(pdev, 2); - -err_release_bar0: - pci_release_region(pdev, 0); +err_release_regions: + pci_release_regions(pdev); err_disable_pdev: pci_disable_device(pdev); @@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_disable_msix(pdev); kfree(priv); - pci_release_region(pdev, 2); - pci_release_region(pdev, 0); + pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } |