summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
1 files changed, 82 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b9baa6a63..445f27efe67 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
#include "i915_trace.h"
#include "intel_drv.h"
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+ int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+ if (space < 0)
+ space += ring->size;
+ return space;
+}
+
static u32 i915_gem_get_seqno(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
i915_kernel_lost_context(ring->dev);
else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->space = ring_space(ring);
}
return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
}
ring->tail = 0;
- ring->space = ring->head - 8;
+ ring->space = ring_space(ring);
return 0;
}
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
- int reread = 0;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
u32 head;
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
+ */
+ head = intel_read_status_page(ring, 4);
+ if (head > ring->head) {
+ ring->head = head;
+ ring->space = ring_space(ring);
+ if (ring->space >= n)
+ return 0;
+ }
+
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- /* If the reported head position has wrapped or hasn't advanced,
- * fallback to the slow and accurate path.
- */
- head = intel_read_status_page(ring, 4);
- if (reread)
- head = I915_READ_HEAD(ring);
- ring->head = head & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
+ ring->head = I915_READ_HEAD(ring);
+ ring->space = ring_space(ring);
if (ring->space >= n) {
trace_i915_ring_wait_end(dev);
return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
msleep(1);
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- reread = 1;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ u32 invalidate, u32 flush)
{
+ uint32_t cmd;
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH_DW);
- intel_ring_emit(ring, 0);
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_GPU_DOMAINS)
+ cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+ intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
}
static int blt_ring_flush(struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+ u32 invalidate, u32 flush)
{
+ uint32_t cmd;
int ret;
- if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
return 0;
ret = blt_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH_DW);
- intel_ring_emit(ring, 0);
+ cmd = MI_FLUSH_DW;
+ if (invalidate & I915_GEM_DOMAIN_RENDER)
+ cmd |= MI_INVALIDATE_TLB;
+ intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
}
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return intel_init_ring_buffer(dev, ring);
}
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ ring->irq_get = gen6_render_ring_get_irq;
+ ring->irq_put = gen6_render_ring_put_irq;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&ring->gpu_write_list);
+
+ ring->size = size;
+ ring->effective_size = ring->size;
+ if (IS_I830(ring->dev))
+ ring->effective_size -= 128;
+
+ ring->map.offset = start;
+ ring->map.size = size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
+
+ drm_core_ioremap_wc(&ring->map, dev);
+ if (ring->map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ ring->virtual_start = (void __force __iomem *)ring->map.handle;
+ return 0;
+}
+
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;