summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_semaphore.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 13:39:59 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 13:39:59 +0200
commit5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch)
tree7a06dfa1f7661f8908193f2437b32452520221d3 /drivers/gpu/drm/radeon/radeon_semaphore.c
parentb615b57a124a4af7b68196bc2fb8acc236041fa2 (diff)
parent4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff)
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few fixes from 3.4-rc6 (which drm-next has already merged). Note that this merge also restricts the stencil cache lra evict policy workaround to snb (as it should) - I had to frob the code anyway because the CM0_MASK_SHIFT define died in the masked bit cleanups. We need the backmerge to get Paulo Zanoni's infoframe regression fix for gm45 - further bugfixes from him touch the same area and would needlessly conflict. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_semaphore.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3c920..930a08af900 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -149,6 +149,62 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ bool sync_to[RADEON_NUM_RINGS],
+ int dst_ring)
+{
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ unsigned num_ops = i == dst_ring ? RADEON_NUM_RINGS : 1;
+
+ /* don't lock unused rings */
+ if (!sync_to[i] && i != dst_ring)
+ continue;
+
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[i].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ r = -EINVAL;
+ goto error;
+ }
+
+ r = radeon_ring_lock(rdev, &rdev->ring[i], num_ops * 8);
+ if (r)
+ goto error;
+ }
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (!sync_to[i] || i == dst_ring)
+ continue;
+
+ radeon_semaphore_emit_signal(rdev, i, semaphore);
+ radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+ }
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+
+ /* don't unlock unused rings */
+ if (!sync_to[i] && i != dst_ring)
+ continue;
+
+ radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+ }
+
+ return 0;
+
+error:
+ /* unlock all locks taken so far */
+ for (--i; i >= 0; --i) {
+ if (sync_to[i] || i == dst_ring) {
+ radeon_ring_unlock_undo(rdev, &rdev->ring[i]);
+ }
+ }
+ return r;
+}
+
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore)
{