diff options
author | Dave Airlie <airlied@redhat.com> | 2011-12-06 11:21:36 +0000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-06 11:21:49 +0000 |
commit | 32faa34dc5ed121a1e927753dcbf0ad395d3a956 (patch) | |
tree | afad2825ec463545dda7477be40ddb437686b58d /drivers/gpu/drm/radeon/radeon_gart.c | |
parent | bcdd6b2fd603340fbb0ed9984b32a9a066f1b806 (diff) | |
parent | dc97b3409a790d2a21aac6e5cdb99558b5944119 (diff) |
drm/Merge branch 'drm-ttm-glisse' of ../drm-radeon-next into drm-core-next
Merge topic branch containing Jerome's TTM changes, contains one change from
Konrad to swiotlb export.
* 'drm-ttm-glisse' of ../drm-radeon-next:
drm/ttm: callback move_notify any time bo placement change v4
drm/ttm: simplify memory accounting for ttm user v2
drm/ttm: isolate dma data from ttm_tt V4
drm/nouveau: enable the ttm dma pool when swiotlb is active V3
drm/radeon/kms: enable the ttm dma pool if swiotlb is on V4
drm/ttm: provide dma aware ttm page pool code V9
drm/ttm: introduce callback for ttm_tt populate & unpopulate V4
drm/ttm: merge ttm_backend and ttm_tt V5
drm/ttm: page allocation use page array instead of list
drm/ttm: test for dma_address array allocation failure
drm/ttm: use ttm put pages function to properly restore cache attribute
drm/ttm: remove unused backend flags field
drm/ttm: remove split btw highmen and lowmem page
drm/ttm: remove userspace backed ttm object support
swiotlb: Expose swiotlb_nr_tlb function to modules
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 29 |
1 files changed, 1 insertions, 28 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index ba7ab79e12c..a4d98160858 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { - if (!rdev->gart.ttm_alloced[p]) - pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); rdev->gart.pages[p] = NULL; rdev->gart.pages_addr[p] = rdev->dummy_page.addr; page_base = rdev->gart.pages_addr[p]; @@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* we reverted the patch using dma_addr in TTM for now but this - * code stops building on alpha so just comment it out for now */ - if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ - rdev->gart.ttm_alloced[p] = true; - rdev->gart.pages_addr[p] = dma_addr[i]; - } else { - /* we need to support large memory configurations */ - /* assume that unbind have already been call on the range */ - rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { - /* FIXME: failed to map page (return -ENOMEM?) */ - radeon_gart_unbind(rdev, offset, pages); - return -ENOMEM; - } - } + rdev->gart.pages_addr[p] = dma_addr[i]; rdev->gart.pages[p] = pagelist[i]; if (rdev->gart.ptr) { page_base = rdev->gart.pages_addr[p]; @@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } - rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * - rdev->gart.num_cpu_pages, GFP_KERNEL); - if (rdev->gart.ttm_alloced == NULL) { - radeon_gart_fini(rdev); - return -ENOMEM; - } /* set GART entry to point to the dummy page by default */ for (i = 0; i < rdev->gart.num_cpu_pages; i++) { rdev->gart.pages_addr[i] = rdev->dummy_page.addr; @@ -296,10 +271,8 @@ void radeon_gart_fini(struct radeon_device *rdev) rdev->gart.ready = false; kfree(rdev->gart.pages); kfree(rdev->gart.pages_addr); - kfree(rdev->gart.ttm_alloced); rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; - rdev->gart.ttm_alloced = NULL; radeon_dummy_page_fini(rdev); } |