diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_notifier.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fb.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 83 |
5 files changed, 72 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 56ad9df2ccb..b61966c126d 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_handle_unreference(ifb->obj); drm_gem_object_unreference(ifb->obj); + ifb->obj = NULL; } return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index d2047713dc5..dbd30b2e43f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) if (nouveau_fb->nvbo) { nouveau_bo_unmap(nouveau_fb->nvbo); - drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 3c9964a8fba..3ec181ff50c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); mutex_unlock(&dev->struct_mutex); - drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); drm_mm_takedown(&chan->notifier_heap); } diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 9cdf6a35bc2..40b0c087b59 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } - drm_gem_object_handle_unreference(gobj); drm_gem_object_unreference_unlocked(gobj); } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1..db809e034cc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -442,6 +442,43 @@ out_err: } /** + * Call bo::reserved and with the lru lock held. + * Will release GPU memory type usage on destruction. + * This is the place to put in driver specific hooks. + * Will release the bo::reserved lock and the + * lru lock on exit. + */ + +static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) +{ + struct ttm_bo_global *glob = bo->glob; + + if (bo->ttm) { + + /** + * Release the lru_lock, since we don't want to have + * an atomic requirement on ttm_tt[unbind|destroy]. + */ + + spin_unlock(&glob->lru_lock); + ttm_tt_unbind(bo->ttm); + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + spin_lock(&glob->lru_lock); + } + + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; + } + + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); +} + + +/** * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, and already on delayed list, do nothing. * If not idle, and not on delayed list, put on delayed list, @@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) int ret; spin_lock(&bo->lock); +retry: (void) ttm_bo_wait(bo, false, false, !remove_all); if (!bo->sync_obj) { @@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); + ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); + + /** + * Someone else has the object reserved. Bail and retry. + */ - ret = ttm_bo_reserve_locked(bo, false, false, false, 0); - BUG_ON(ret); - if (bo->ttm) - ttm_tt_unbind(bo->ttm); + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + spin_lock(&bo->lock); + goto requeue; + } + + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread starting an accelerated + * eviction. + */ + + if (unlikely(bo->sync_obj)) { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); + spin_lock(&bo->lock); + if (remove_all) + goto retry; + else + goto requeue; + } + + put_count = ttm_bo_del_from_lru(bo); if (!list_empty(&bo->ddestroy)) { list_del_init(&bo->ddestroy); ++put_count; } - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - spin_unlock(&glob->lru_lock); - atomic_set(&bo->reserved, 0); + ttm_bo_cleanup_memtype_use(bo); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); return 0; } - +requeue: spin_lock(&glob->lru_lock); if (list_empty(&bo->ddestroy)) { void *sync_obj = bo->sync_obj; |