summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_uvd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_uvd.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c58
1 files changed, 43 insertions, 15 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 30a94609672..0312a7f4d76 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -36,6 +36,9 @@
#include "radeon.h"
#include "r600d.h"
+/* 1 second timeout */
+#define UVD_IDLE_TIMEOUT_MS 1000
+
/* Firmware Names */
#define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
#define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
@@ -47,6 +50,8 @@ MODULE_FIRMWARE(FIRMWARE_CYPRESS);
MODULE_FIRMWARE(FIRMWARE_SUMO);
MODULE_FIRMWARE(FIRMWARE_TAHITI);
+static void radeon_uvd_idle_work_handler(struct work_struct *work);
+
int radeon_uvd_init(struct radeon_device *rdev)
{
struct platform_device *pdev;
@@ -54,6 +59,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
const char *fw_name;
int i, r;
+ INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
+
pdev = platform_device_register_simple("radeon_uvd", 0, NULL, 0);
r = IS_ERR(pdev);
if (r) {
@@ -107,7 +114,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
platform_device_unregister(pdev);
- bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) +
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -188,8 +195,6 @@ int radeon_uvd_resume(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- radeon_set_uvd_clocks(rdev, 53300, 40000);
-
return 0;
}
@@ -415,24 +420,26 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if (cmd == 0) {
- if (end & 0xFFFFFFFFF0000000) {
- DRM_ERROR("msg buffer %LX-%LX out of 256MB segment!\n",
- start, end);
- return -EINVAL;
- }
-
- r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
- if (r)
- return r;
+ if ((start >> 28) != (end >> 28)) {
+ DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ start, end);
+ return -EINVAL;
}
- if ((start & 0xFFFFFFFFF0000000) != (end & 0xFFFFFFFFF0000000)) {
- DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ /* TODO: is this still necessary on NI+ ? */
+ if ((cmd == 0 || cmd == 0x3) &&
+ (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end);
return -EINVAL;
}
+ if (cmd == 0) {
+ r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -664,3 +671,24 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
+
+static void radeon_uvd_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev =
+ container_of(work, struct radeon_device, uvd.idle_work.work);
+
+ if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ else
+ schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+}
+
+void radeon_uvd_note_usage(struct radeon_device *rdev)
+{
+ bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
+ set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
+ msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+ if (set_clocks)
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+}