summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c92
1 files changed, 62 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b84ff89ca..bf0195a96d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -219,19 +219,28 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * Ensure that any following seqno writes only happen when the render
+ * cache is indeed flushed (but only if the caller actually wants that).
+ */
+ if (flush_domains)
+ flags |= PIPE_CONTROL_CS_STALL;
ret = intel_ring_begin(ring, 6);
if (ret)
@@ -433,11 +442,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
return ret;
}
@@ -825,7 +844,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -844,7 +867,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -946,6 +972,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
+ ret = -ENOMEM;
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -969,6 +996,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ring->dev = dev;
@@ -1002,8 +1030,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unpin;
- ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
- ring->size);
+ ring->virtual_start =
+ ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
@@ -1089,20 +1118,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool was_interruptible;
int ret;
- /* XXX As we have not yet audited all the paths to check that
- * they are ready for ERESTARTSYS from intel_ring_begin, do not
- * allow us to be interruptible by a signal.
- */
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- ret = i915_wait_request(ring, seqno);
-
- dev_priv->mm.interruptible = was_interruptible;
+ ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
@@ -1200,8 +1218,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
}
msleep(1);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end(ring);
return -EBUSY;
@@ -1210,12 +1230,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
- if (unlikely(atomic_read(&dev_priv->mm.wedged)))
- return -EIO;
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
@@ -1250,20 +1271,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
- 50))
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ /* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,