summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-12-02 11:36:24 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-02-18 17:27:49 -0500
commite0138c26cdeee8c033256ccd9e07d66db3c998be (patch)
tree2d354ce89abb54bf9ae36e6ca287e12d84c2ed5b /drivers
parentc39d35161e87f1d7c0628af6907ac66a8c77f63f (diff)
nouveau/ttm/PCIe: Use dma_addr if TTM has set it.
If the TTM layer has used the DMA API to setup pages that are TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the DMA API for pages that have TTM_PAGE_FLAG_DMA32 set"), lets use it when programming the GART in the PCIe type cards. This patch skips doing the pci_map_page (and pci_unmap_page) if there is a DMA addresses passed in for that page. If the dma_address is zero (or DMA_ERROR_CODE), then we continue on with our old behaviour. [v2: Added a review-by tag] Reviewed-by: Thomas Hellstrom <thomas@shipmail.org> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index edc140ab4df..bbdd982cbb3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
+ bool *ttm_alloced;
unsigned nr_pages;
unsigned pte_start;
@@ -35,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced)
+ return -ENOMEM;
+
nvbe->nr_pages = 0;
while (num_pages--) {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
}
nvbe->nr_pages++;
@@ -66,11 +77,14 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}