diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-08 13:39:59 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-08 13:39:59 +0200 |
commit | 5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch) | |
tree | 7a06dfa1f7661f8908193f2437b32452520221d3 /drivers/gpu/drm/radeon/radeon_gem.c | |
parent | b615b57a124a4af7b68196bc2fb8acc236041fa2 (diff) | |
parent | 4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff) |
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few
fixes from 3.4-rc6 (which drm-next has already merged). Note that this
merge also restricts the stencil cache lra evict policy workaround to
snb (as it should) - I had to frob the code anyway because the
CM0_MASK_SHIFT define died in the masked bit cleanups.
We need the backmerge to get Paulo Zanoni's infoframe regression fix
for gm45 - further bugfixes from him touch the same area and would
needlessly conflict.
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_gem.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c7008b5210f..e15cb1fe2c3 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -154,6 +154,17 @@ void radeon_gem_object_close(struct drm_gem_object *obj, radeon_bo_unreserve(rbo); } +static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) +{ + if (r == -EDEADLK) { + radeon_mutex_lock(&rdev->cs_mutex); + r = radeon_gpu_reset(rdev); + if (!r) + r = -EAGAIN; + radeon_mutex_unlock(&rdev->cs_mutex); + } + return r; +} /* * GEM ioctls. @@ -210,12 +221,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, args->initial_domain, false, false, &gobj); if (r) { + r = radeon_gem_handle_lockup(rdev, r); return r; } r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); if (r) { + r = radeon_gem_handle_lockup(rdev, r); return r; } args->handle = handle; @@ -245,6 +258,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); drm_gem_object_unreference_unlocked(gobj); + r = radeon_gem_handle_lockup(robj->rdev, r); return r; } @@ -301,6 +315,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, break; } drm_gem_object_unreference_unlocked(gobj); + r = radeon_gem_handle_lockup(robj->rdev, r); return r; } @@ -322,6 +337,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, if (robj->rdev->asic->ioctl_wait_idle) robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); drm_gem_object_unreference_unlocked(gobj); + r = radeon_gem_handle_lockup(robj->rdev, r); return r; } |