diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 97 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 1 |
10 files changed, 38 insertions, 120 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 424dff5d0ab..7226f419e17 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1104,8 +1104,7 @@ nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) if (vma->node) { if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { spin_lock(&nvbo->bo.bdev->fence_lock); - ttm_bo_wait(&nvbo->bo, false, false, false, - TTM_USAGE_READWRITE); + ttm_bo_wait(&nvbo->bo, false, false, false); spin_unlock(&nvbo->bo.bdev->fence_lock); nouveau_vm_unmap(vma); } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 322bf62a064..5f0bc57fdaa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -589,8 +589,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, } spin_lock(&nvbo->bo.bdev->fence_lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, false, - TTM_USAGE_READWRITE); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); @@ -826,7 +825,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, nvbo = nouveau_gem_object(gem); spin_lock(&nvbo->bo.bdev->fence_lock); - ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait, TTM_USAGE_READWRITE); + ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); spin_unlock(&nvbo->bo.bdev->fence_lock); drm_gem_object_unreference_unlocked(gem); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 14e85315110..fae00c0d75a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -80,7 +80,6 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].lobj.wdomain = r->write_domain; p->relocs[i].lobj.rdomain = r->read_domains; p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; - p->relocs[i].lobj.tv.usage = TTM_USAGE_READWRITE; p->relocs[i].handle = r->handle; p->relocs[i].flags = r->flags; radeon_bo_list_add_object(&p->relocs[i].lobj, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index b8f75f5d344..1c851521f45 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -527,7 +527,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) if (mem_type) *mem_type = bo->tbo.mem.mem_type; if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait, false); + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); spin_unlock(&bo->tbo.bdev->fence_lock); ttm_bo_unreserve(&bo->tbo); return r; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 50fc8e4c9a3..617b64678fc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -499,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&bdev->fence_lock); - (void) ttm_bo_wait(bo, false, false, true, TTM_USAGE_READWRITE); + (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { spin_lock(&glob->lru_lock); @@ -567,8 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, retry: spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, - TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) @@ -727,8 +726,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, int ret = 0; spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, - TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { @@ -1075,8 +1073,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * instead of doing it here. */ spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu, - TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); spin_unlock(&bdev->fence_lock); if (ret) return ret; @@ -1697,83 +1694,34 @@ out_unlock: return ret; } -static void ttm_bo_unref_sync_obj_locked(struct ttm_buffer_object *bo, - void *sync_obj, - void **extra_sync_obj) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; - void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL; - - /* We must unref the sync obj wherever it's ref'd. - * Note that if we unref bo->sync_obj, we can unref both the read - * and write sync objs too, because they can't be newer than - * bo->sync_obj, so they are no longer relevant. */ - if (sync_obj == bo->sync_obj || - sync_obj == bo->sync_obj_read) { - tmp_obj_read = bo->sync_obj_read; - bo->sync_obj_read = NULL; - } - if (sync_obj == bo->sync_obj || - sync_obj == bo->sync_obj_write) { - tmp_obj_write = bo->sync_obj_write; - bo->sync_obj_write = NULL; - } - if (sync_obj == bo->sync_obj) { - tmp_obj = bo->sync_obj; - bo->sync_obj = NULL; - } - - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bdev->fence_lock); - if (tmp_obj) - driver->sync_obj_unref(&tmp_obj); - if (tmp_obj_read) - driver->sync_obj_unref(&tmp_obj_read); - if (tmp_obj_write) - driver->sync_obj_unref(&tmp_obj_write); - if (extra_sync_obj) - driver->sync_obj_unref(extra_sync_obj); - spin_lock(&bdev->fence_lock); -} - int ttm_bo_wait(struct ttm_buffer_object *bo, - bool lazy, bool interruptible, bool no_wait, - enum ttm_buffer_usage usage) + bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; void *sync_obj_arg; int ret = 0; - void **bo_sync_obj; - switch (usage) { - case TTM_USAGE_READ: - bo_sync_obj = &bo->sync_obj_read; - break; - case TTM_USAGE_WRITE: - bo_sync_obj = &bo->sync_obj_write; - break; - case TTM_USAGE_READWRITE: - default: - bo_sync_obj = &bo->sync_obj; - } - - if (likely(*bo_sync_obj == NULL)) + if (likely(bo->sync_obj == NULL)) return 0; - while (*bo_sync_obj) { + while (bo->sync_obj) { - if (driver->sync_obj_signaled(*bo_sync_obj, bo->sync_obj_arg)) { - ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, NULL); + if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { + void *tmp_obj = bo->sync_obj; + bo->sync_obj = NULL; + clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + spin_unlock(&bdev->fence_lock); + driver->sync_obj_unref(&tmp_obj); + spin_lock(&bdev->fence_lock); continue; } if (no_wait) return -EBUSY; - sync_obj = driver->sync_obj_ref(*bo_sync_obj); + sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, @@ -1784,9 +1732,16 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, return ret; } spin_lock(&bdev->fence_lock); - if (likely(*bo_sync_obj == sync_obj && + if (likely(bo->sync_obj == sync_obj && bo->sync_obj_arg == sync_obj_arg)) { - ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, &sync_obj); + void *tmp_obj = bo->sync_obj; + bo->sync_obj = NULL; + clear_bit(TTM_BO_PRIV_FLAG_MOVING, + &bo->priv_flags); + spin_unlock(&bdev->fence_lock); + driver->sync_obj_unref(&sync_obj); + driver->sync_obj_unref(&tmp_obj); + spin_lock(&bdev->fence_lock); } else { spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); @@ -1810,7 +1765,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) if (unlikely(ret != 0)) return ret; spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, true, no_wait, TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, true, no_wait); spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); @@ -1884,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) */ spin_lock(&bo->bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 6135f58169c..ae3c6f5dd2b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -436,8 +436,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, atomic_set(&fbo->cpu_writers, 0); fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); - fbo->sync_obj_read = driver->sync_obj_ref(bo->sync_obj_read); - fbo->sync_obj_write = driver->sync_obj_ref(bo->sync_obj_write); kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; @@ -620,30 +618,20 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; - void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL; + void *tmp_obj = NULL; spin_lock(&bdev->fence_lock); - if (bo->sync_obj) + if (bo->sync_obj) { tmp_obj = bo->sync_obj; - if (bo->sync_obj_read) - tmp_obj_read = bo->sync_obj_read; - if (bo->sync_obj_write) - tmp_obj_write = bo->sync_obj_write; - + bo->sync_obj = NULL; + } bo->sync_obj = driver->sync_obj_ref(sync_obj); - bo->sync_obj_read = driver->sync_obj_ref(sync_obj); - bo->sync_obj_write = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = sync_obj_arg; if (evict) { - ret = ttm_bo_wait(bo, false, false, false, - TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); - if (tmp_obj_read) - driver->sync_obj_unref(&tmp_obj_read); - if (tmp_obj_write) - driver->sync_obj_unref(&tmp_obj_write); if (ret) return ret; @@ -667,10 +655,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); - if (tmp_obj_read) - driver->sync_obj_unref(&tmp_obj_read); - if (tmp_obj_write) - driver->sync_obj_unref(&tmp_obj_write); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index ff1e26f4b09..221b924aceb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -122,7 +122,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) spin_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { - ret = ttm_bo_wait(bo, false, true, false, TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, true, false); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTARTSYS) ? diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 36d111a8823..3832fe10b4d 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -221,18 +221,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) list_for_each_entry(entry, list, head) { bo = entry->bo; - entry->old_sync_obj_read = NULL; - entry->old_sync_obj_write = NULL; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); - if (entry->usage & TTM_USAGE_READ) { - entry->old_sync_obj_read = bo->sync_obj_read; - bo->sync_obj_read = driver->sync_obj_ref(sync_obj); - } - if (entry->usage & TTM_USAGE_WRITE) { - entry->old_sync_obj_write = bo->sync_obj_write; - bo->sync_obj_write = driver->sync_obj_ref(sync_obj); - } bo->sync_obj_arg = entry->new_sync_obj_arg; ttm_bo_unreserve_locked(bo); entry->reserved = false; @@ -241,15 +231,8 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) spin_unlock(&bdev->fence_lock); list_for_each_entry(entry, list, head) { - if (entry->old_sync_obj) { + if (entry->old_sync_obj) driver->sync_obj_unref(&entry->old_sync_obj); - } - if (entry->old_sync_obj_read) { - driver->sync_obj_unref(&entry->old_sync_obj_read); - } - if (entry->old_sync_obj_write) { - driver->sync_obj_unref(&entry->old_sync_obj_write); - } } } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b8eb8cdcfb7..13afddc1f03 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -244,7 +244,7 @@ static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) ttm_bo_reserve(bo, false, false, false, 0); spin_lock(&bdev->fence_lock); - ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE); + ret = ttm_bo_wait(bo, false, false, false); spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) (void) vmw_fallback_wait(dev_priv, false, true, 0, false, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 28e1c35aec6..40932fbdac0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -89,7 +89,6 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, if (unlikely(val_node == sw_context->cur_val_buf)) { val_buf->new_sync_obj_arg = NULL; val_buf->bo = ttm_bo_reference(bo); - val_buf->usage = TTM_USAGE_READWRITE; list_add_tail(&val_buf->head, &sw_context->validate_nodes); ++sw_context->cur_val_buf; } |