summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-09 21:05:44 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-11 22:15:30 +0000
commit0a58705b2fc3fa29525cf2fdae3d4276a5771280 (patch)
tree86286dbcc87b93c411adc22aae9e4ace1975b480 /drivers/gpu/drm/i915
parentd9126400580e2caada85fa68799952956a6062fd (diff)
drm/i915: Periodically flush the active lists and requests
In order to retire active buffers whilst no client is active, we need to insert our own flush requests onto the ring. This is useful for servers that queue up some rendering and then go to sleep as it allows us to the complete processing of those requests, potentially making that memory available again much earlier. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2873d068eb1..87c2df714f6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1935,6 +1935,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
+ bool idle;
+ int i;
dev_priv = container_of(work, drm_i915_private_t,
mm.retire_work.work);
@@ -1948,11 +1950,31 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
- if (!dev_priv->mm.suspended &&
- (!list_empty(&dev_priv->ring[RCS].request_list) ||
- !list_empty(&dev_priv->ring[VCS].request_list) ||
- !list_empty(&dev_priv->ring[BCS].request_list)))
+ /* Send a periodic flush down the ring so we don't hold onto GEM
+ * objects indefinitely.
+ */
+ idle = true;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+ if (!list_empty(&ring->gpu_write_list)) {
+ struct drm_i915_gem_request *request;
+ int ret;
+
+ ret = i915_gem_flush_ring(dev, ring, 0,
+ I915_GEM_GPU_DOMAINS);
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (ret || request == NULL ||
+ i915_add_request(dev, NULL, request, ring))
+ kfree(request);
+ }
+
+ idle &= list_empty(&ring->request_list);
+ }
+
+ if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+
mutex_unlock(&dev->struct_mutex);
}