summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c101
1 files changed, 49 insertions, 52 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4200c32407e..d9e2208cfe9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL);
+ if (obj->ring != ring && obj->last_write_seqno) {
+ /* Keep the seqno relative to the current ring */
+ obj->last_write_seqno = seqno;
+ }
obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
@@ -2254,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}
@@ -2653,7 +2667,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
- uint64_t val;
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
@@ -2663,8 +2676,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
+ fence_reg += reg * 8;
+
+ /* To w/a incoherency with non-atomic 64-bit register updates,
+ * we split the 64-bit update into two 32-bit writes. In order
+ * for a partial fence not to be evaluated between writes, we
+ * precede the update with write to turn off the fence register,
+ * and only enable the fence as the last step.
+ *
+ * For extra levels of paranoia, we make sure each step lands
+ * before applying the next step.
+ */
+ I915_WRITE(fence_reg, 0);
+ POSTING_READ(fence_reg);
+
if (obj) {
u32 size = obj->gtt_space->size;
+ uint64_t val;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
@@ -2673,12 +2701,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- } else
- val = 0;
- fence_reg += reg * 8;
- I915_WRITE64(fence_reg, val);
- POSTING_READ(fence_reg);
+ I915_WRITE(fence_reg + 4, val >> 32);
+ POSTING_READ(fence_reg + 4);
+
+ I915_WRITE(fence_reg + 0, val);
+ POSTING_READ(fence_reg);
+ } else {
+ I915_WRITE(fence_reg + 4, 0);
+ POSTING_READ(fence_reg + 4);
+ }
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2773,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2796,56 +2832,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
return fence - dev_priv->fence_regs;
}
-struct write_fence {
- struct drm_device *dev;
- struct drm_i915_gem_object *obj;
- int fence;
-};
-
-static void i915_gem_write_fence__ipi(void *data)
-{
- struct write_fence *args = data;
-
- /* Required for SNB+ with LLC */
- wbinvd();
-
- /* Required for VLV */
- i915_gem_write_fence(args->dev, args->fence, args->obj);
-}
-
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct write_fence args = {
- .dev = obj->base.dev,
- .fence = fence_number(dev_priv, fence),
- .obj = enable ? obj : NULL,
- };
-
- /* In order to fully serialize access to the fenced region and
- * the update to the fence register we need to take extreme
- * measures on SNB+. In theory, the write to the fence register
- * flushes all memory transactions before, and coupled with the
- * mb() placed around the register write we serialise all memory
- * operations with respect to the changes in the tiler. Yet, on
- * SNB+ we need to take a step further and emit an explicit wbinvd()
- * on each processor in order to manually flush all memory
- * transactions before updating the fence register.
- *
- * However, Valleyview complicates matter. There the wbinvd is
- * insufficient and unlike SNB/IVB requires the serialising
- * register write. (Note that that register write by itself is
- * conversely not sufficient for SNB+.) To compromise, we do both.
- */
- if (INTEL_INFO(args.dev)->gen >= 6)
- on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
- else
- i915_gem_write_fence(args.dev, args.fence, args.obj);
+ int reg = fence_number(dev_priv, fence);
+
+ i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
if (enable) {
- obj->fence_reg = args.fence;
+ obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
@@ -2853,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}
static int
@@ -2982,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;
return 0;
}
@@ -4611,7 +4608,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;