diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2013-02-01 17:32:42 +0100 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-02-01 13:57:10 -0500 |
commit | 43f1214aa094e46efdfc0255d9601be0e5ea0f62 (patch) | |
tree | f68b6472afdc8d460b73f67958aad4b21e06ee85 /drivers/gpu/drm/radeon/radeon_gart.c | |
parent | 24178ec42b0985d485886bc43b97e54ff173627e (diff) |
drm/radeon: use IBs for VM page table updates v2
For very large page table updates, we can exceed the
size of the ring. To avoid this, use an IB to perform
the page table update.
v2(ck): cleanup the IB infrastructure and the use it instead
of filling the struct ourself.
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gart.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 60 |
1 files changed, 24 insertions, 36 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 6e24f84755b..2c1341f63dc 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) */ static int radeon_vm_update_pdes(struct radeon_device *rdev, struct radeon_vm *vm, + struct radeon_ib *ib, uint64_t start, uint64_t end) { static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; @@ -971,7 +972,7 @@ retry: ((last_pt + incr * count) != pt)) { if (count) { - radeon_asic_vm_set_page(rdev, last_pde, + radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, incr, RADEON_VM_PAGE_VALID); } @@ -985,7 +986,7 @@ retry: } if (count) { - radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, + radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, incr, RADEON_VM_PAGE_VALID); } @@ -1009,6 +1010,7 @@ retry: */ static void radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_vm *vm, + struct radeon_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { @@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, if ((last_pte + 8 * count) != pte) { if (count) { - radeon_asic_vm_set_page(rdev, last_pte, + radeon_asic_vm_set_page(rdev, ib, last_pte, last_dst, count, RADEON_GPU_PAGE_SIZE, flags); @@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, } if (count) { - radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, + radeon_asic_vm_set_page(rdev, ib, last_pte, + last_dst, count, RADEON_GPU_PAGE_SIZE, flags); } } @@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, struct ttm_mem_reg *mem) { unsigned ridx = rdev->asic->vm.pt_ring_index; - struct radeon_ring *ring = &rdev->ring[ridx]; - struct radeon_semaphore *sem = NULL; + struct radeon_ib ib; struct radeon_bo_va *bo_va; unsigned nptes, npdes, ndw; uint64_t addr; @@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, bo_va->valid = false; } - if (vm->fence && radeon_fence_signaled(vm->fence)) { - radeon_fence_unref(&vm->fence); - } - - if (vm->fence && vm->fence->ring != ridx) { - r = radeon_semaphore_create(rdev, &sem); - if (r) { - return r; - } - } - nptes = radeon_bo_ngpu_pages(bo); /* assume two extra pdes in case the mapping overlaps the borders */ npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; - /* estimate number of dw needed */ - /* semaphore, fence and padding */ - ndw = 32; + /* padding, etc. */ + ndw = 64; if (RADEON_VM_BLOCK_SIZE > 11) /* reserve space for one header for every 2k dwords */ @@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, /* reserve space for pde addresses */ ndw += npdes * 2; - r = radeon_ring_lock(rdev, ring, ndw); - if (r) { - return r; - } + /* update too big for an IB */ + if (ndw > 0xfffff) + return -ENOMEM; - if (sem && radeon_fence_need_sync(vm->fence, ridx)) { - radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx); - radeon_fence_note_sync(vm->fence, ridx); - } + r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); + ib.length_dw = 0; - r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); + r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); if (r) { - radeon_ring_unlock_undo(rdev, ring); + radeon_ib_free(rdev, &ib); return r; } - radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, + radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, addr, bo_va->flags); - radeon_fence_unref(&vm->fence); - r = radeon_fence_emit(rdev, &vm->fence, ridx); + radeon_ib_sync_to(&ib, vm->fence); + r = radeon_ib_schedule(rdev, &ib, NULL); if (r) { - radeon_ring_unlock_undo(rdev, ring); + radeon_ib_free(rdev, &ib); return r; } - radeon_ring_unlock_commit(rdev, ring); - radeon_semaphore_free(rdev, &sem, vm->fence); + radeon_fence_unref(&vm->fence); + vm->fence = radeon_fence_ref(ib.fence); + radeon_ib_free(rdev, &ib); radeon_fence_unref(&vm->last_flush); return 0; |