diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 09:11:39 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-17 09:11:39 -0700 |
commit | 40c7f2112ce18fa5eb6dc209c50dd0f046790191 (patch) | |
tree | 2da6636f3a1d005902d3f2afb533cb6acafce606 /drivers/gpu/drm/radeon/radeon_gart.c | |
parent | b04d0a90908cdb733e490486287e1ba8c568ffb0 (diff) | |
parent | c87a8d8dcd2587c203f3dd8a3c5c15d1e128ec0d (diff) |
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (177 commits)
drm/radeon: fixup refcounts in radeon dumb create ioctl.
drm: radeon: *_cs_packet_parse_vline() cleanup
radeon: merge list_del()/list_add_tail() to list_move_tail()
drm: Retry i2c transfer of EDID block after failure
drm/radeon/kms: fix typo in atom overscan setup
drm: Hold the mode mutex whilst probing for sysfs status
drm/nouveau: fix __nouveau_fence_wait performance
drm/nv40: attempt to reserve just enough vram for all 32 channels
drm/nv50: check for vm traps on every gr irq
drm/nv50: decode vm faults some more
drm/nouveau: add nouveau_enum_find() util function
drm/nouveau: properly handle pushbuffer check failures
drm/nvc0: remove vm hack forcing large/small pages to not share a PDE
drm/i915: disable opregion lid detection for now.
drm/i915: Only wait on a pending flip if we intend to write to the buffer
drm/i915/dp: Sanity check eDP existence
drm: add cap bit to denote if dumb ioctl is available or not.
drm/core: add ioctl to query device/driver capabilities
drm/radeon/kms: allow max clock of 340 Mhz on hdmi 1.3+
drm/radeon/kms: add cayman pci ids
...
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 38 |
1 files changed, 27 insertions, 11 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 65016117d95..f0534ef2f33 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj == NULL) { - r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, + r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->gart.table.vram.robj); if (r) { @@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { - pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (!rdev->gart.ttm_alloced[p]) + pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); rdev->gart.pages[p] = NULL; rdev->gart.pages_addr[p] = rdev->dummy_page.addr; page_base = rdev->gart.pages_addr[p]; @@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, } int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, - int pages, struct page **pagelist) + int pages, struct page **pagelist, dma_addr_t *dma_addr) { unsigned t; unsigned p; @@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* we need to support large memory configurations */ - /* assume that unbind have already been call on the range */ - rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], + /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 + * is requested. */ + if (dma_addr[i] != DMA_ERROR_CODE) { + rdev->gart.ttm_alloced[p] = true; + rdev->gart.pages_addr[p] = dma_addr[i]; + } else { + /* we need to support large memory configurations */ + /* assume that unbind have already been call on the range */ + rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { - /* FIXME: failed to map page (return -ENOMEM?) */ - radeon_gart_unbind(rdev, offset, pages); - return -ENOMEM; + if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { + /* FIXME: failed to map page (return -ENOMEM?) */ + radeon_gart_unbind(rdev, offset, pages); + return -ENOMEM; + } } rdev->gart.pages[p] = pagelist[i]; page_base = rdev->gart.pages_addr[p]; @@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } + rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * + rdev->gart.num_cpu_pages, GFP_KERNEL); + if (rdev->gart.ttm_alloced == NULL) { + radeon_gart_fini(rdev); + return -ENOMEM; + } /* set GART entry to point to the dummy page by default */ for (i = 0; i < rdev->gart.num_cpu_pages; i++) { rdev->gart.pages_addr[i] = rdev->dummy_page.addr; @@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev) rdev->gart.ready = false; kfree(rdev->gart.pages); kfree(rdev->gart.pages_addr); + kfree(rdev->gart.ttm_alloced); rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; + rdev->gart.ttm_alloced = NULL; } |