summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 14:57:10 +0100
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 14:57:10 +0100
commitf2d476a110bc24fde008698ae9018c99e803e25c (patch)
tree2d33f2036764ac5a6d0ee4c45104f1d90530f5ba
parent5e45d7dfd74100d622f9cdc70bfd1f9fae1671de (diff)
drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2
This requires re-use of the seqno, which increases fairness slightly. Instead of spinning with a new seqno every time we keep the current one, but still drop all other reservations we hold. Only when we succeed, we try to get back our other reservations again. This should increase fairness slightly as well. Changes since v1: - Increase val_seq before calling ttm_bo_reserve_slowpath_nolru and retrying to take all entries to prevent a race. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c7d32365779..7b90def1567 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
-retry:
spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++;
+retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
+ /* already slowpath reserved? */
+ if (entry->reserved)
+ continue;
+
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) {
case 0:
@@ -155,11 +159,26 @@ retry:
/* fallthrough */
case -EAGAIN:
ttm_eu_backoff_reservation_locked(list);
+
+ /*
+ * temporarily increase sequence number every retry,
+ * to prevent us from seeing our old reservation
+ * sequence when someone else reserved the buffer,
+ * but hasn't updated the seq_valid/seqno members yet.
+ */
+ val_seq = entry->bo->bdev->val_seq++;
+
spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list);
- ret = ttm_bo_wait_unreserved(bo, true);
+ ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
if (unlikely(ret != 0))
return ret;
+ spin_lock(&glob->lru_lock);
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ret = -EBUSY;
+ goto err;
+ }
goto retry;
default:
goto err;