summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_uvd.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2013-06-21 15:12:57 -0400
committerAlex Deucher <alexander.deucher@amd.com>2013-06-27 19:15:49 -0400
commit8a227555a8e9826a518878a366c007931304a0a8 (patch)
tree12b21c69a6d2c58986d7d110c147b5c1ebcac077 /drivers/gpu/drm/radeon/radeon_uvd.c
parent0c4aaeae441495b21b9c7d306098ee4911bfa16a (diff)
drm/radeon/kms: enable UVD as needed (v9)
When using UVD, the driver must switch to a special UVD power state. In the CS ioctl, switch to the power state and schedule work to change the power state back, when the work comes up, check if uvd is still busy and if not, switch back to the user state, otherwise, reschedule the work. Note: We really need some better way to decide when to switch out of the uvd power state. Switching power states while playback is active make uvd angry. V2: fix locking. V3: switch from timer to delayed work V4: check fence driver for UVD jobs, reduce timeout to 1 second and rearm timeout on activity v5: rebase on new dpm tree v6: rebase on interim uvd on demand changes v7: fix UVD when DPM is disabled v8: unify non-DPM and DPM UVD handling v9: remove leftover idle work struct Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Christian König <deathsimple@vodafone.de>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_uvd.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index fdc77d1eaac..ce5a10c8d33 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -699,11 +699,19 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
struct radeon_device *rdev =
container_of(work, struct radeon_device, uvd.idle_work.work);
- if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
- radeon_set_uvd_clocks(rdev, 0, 0);
- else
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = false;
+ mutex_unlock(&rdev->pm.mutex);
+ radeon_pm_compute_clocks(rdev);
+ } else {
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ }
+ } else {
schedule_delayed_work(&rdev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ }
}
void radeon_uvd_note_usage(struct radeon_device *rdev)
@@ -711,8 +719,14 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
- if (set_clocks)
- radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (set_clocks) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ /* XXX pick SD/HD/MVC */
+ radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
+ } else {
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+ }
+ }
}
static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,