diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 266 |
1 files changed, 193 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2c9d9cbaf65..3a30133f93e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,9 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) struct eb_vmas { struct list_head vmas; @@ -262,10 +265,12 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) static int relocate_entry_cpu(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) { struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); + uint64_t delta = reloc->delta + target_offset; char *vaddr; int ret; @@ -275,7 +280,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, vaddr = kmap_atomic(i915_gem_object_get_page(obj, reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = reloc->delta; + *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta); if (INTEL_INFO(dev)->gen >= 8) { page_offset = offset_in_page(page_offset + sizeof(uint32_t)); @@ -286,7 +291,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); } - *(uint32_t *)(vaddr + page_offset) = 0; + *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta); } kunmap_atomic(vaddr); @@ -296,10 +301,12 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, static int relocate_entry_gtt(struct drm_i915_gem_object *obj, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + uint64_t delta = reloc->delta + target_offset; uint32_t __iomem *reloc_entry; void __iomem *reloc_page; int ret; @@ -318,7 +325,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, reloc->offset & PAGE_MASK); reloc_entry = (uint32_t __iomem *) (reloc_page + offset_in_page(reloc->offset)); - iowrite32(reloc->delta, reloc_entry); + iowrite32(lower_32_bits(delta), reloc_entry); if (INTEL_INFO(dev)->gen >= 8) { reloc_entry += 1; @@ -331,7 +338,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, reloc_entry = reloc_page; } - iowrite32(0, reloc_entry); + iowrite32(upper_32_bits(delta), reloc_entry); } io_mapping_unmap_atomic(reloc_page); @@ -348,7 +355,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; struct i915_vma *target_vma; - uint32_t target_offset; + uint64_t target_offset; int ret; /* we've already hold a reference to all valid objects */ @@ -426,11 +433,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (obj->active && in_atomic()) return -EFAULT; - reloc->delta += target_offset; if (use_cpu_reloc(obj)) - ret = relocate_entry_cpu(obj, reloc); + ret = relocate_entry_cpu(obj, reloc, target_offset); else - ret = relocate_entry_gtt(obj, reloc); + ret = relocate_entry_gtt(obj, reloc, target_offset); if (ret) return ret; @@ -538,14 +544,14 @@ need_reloc_mappable(struct i915_vma *vma) static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, - struct intel_ring_buffer *ring, + struct intel_engine_cs *ring, bool *need_reloc) { struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence; - unsigned flags; + uint64_t flags; int ret; flags = 0; @@ -559,6 +565,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) @@ -592,8 +600,38 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } +static bool +eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + bool need_fence, need_mappable; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(vma); + + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (need_mappable && !obj->map_and_fenceable) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + return false; +} + static int -i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, +i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct list_head *vmas, bool *need_relocs) { @@ -607,6 +645,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, if (list_empty(vmas)) return 0; + i915_gem_retire_requests_ring(ring); + vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; INIT_LIST_HEAD(&ordered_vmas); @@ -653,26 +693,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool need_fence, need_mappable; - - obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); - - WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vma->vm)); - - if ((entry->alignment && - vma->node.start & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) + if (eb_vma_misplaced(vma, has_fenced_gpu_access)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -708,7 +732,7 @@ static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, - struct intel_ring_buffer *ring, + struct intel_engine_cs *ring, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec) { @@ -773,9 +797,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -824,7 +848,7 @@ err: } static int -i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, +i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, struct list_head *vmas) { struct i915_vma *vma; @@ -907,11 +931,11 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, return 0; } -static struct i915_hw_context * +static struct intel_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, - struct intel_ring_buffer *ring, const u32 ctx_id) + struct intel_engine_cs *ring, const u32 ctx_id) { - struct i915_hw_context *ctx = NULL; + struct intel_context *ctx = NULL; struct i915_ctx_hang_stats *hs; if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) @@ -932,7 +956,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, static void i915_gem_execbuffer_move_to_active(struct list_head *vmas, - struct intel_ring_buffer *ring) + struct intel_engine_cs *ring) { struct i915_vma *vma; @@ -955,6 +979,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (i915_gem_obj_ggtt_bound(obj) && i915_gem_obj_to_ggtt(obj)->pin_count) intel_mark_fb_busy(obj, ring); + + /* update for the implicit flush after a batch */ + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; } trace_i915_gem_object_change_domain(obj, old_read, old_write); @@ -964,7 +991,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, static void i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, - struct intel_ring_buffer *ring, + struct intel_engine_cs *ring, struct drm_i915_gem_object *obj) { /* Unconditionally force add_request to emit a full flush. */ @@ -976,13 +1003,15 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, static int i915_reset_gen7_sol_offsets(struct drm_device *dev, - struct intel_ring_buffer *ring) + struct intel_engine_cs *ring) { struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; - if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) - return 0; + if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("sol reset is gen7/rcs only\n"); + return -EINVAL; + } ret = intel_ring_begin(ring, 4 * 3); if (ret) @@ -999,6 +1028,56 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +/** + * Find one BSD ring to dispatch the corresponding BSD command. + * The Ring ID is returned. + */ +static int gen8_dispatch_bsd_ring(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + + /* Check whether the file_priv is using one ring */ + if (file_priv->bsd_ring) + return file_priv->bsd_ring->id; + else { + /* If no, use the ping-pong mechanism to select one ring */ + int ring_id; + + mutex_lock(&dev->struct_mutex); + if (dev_priv->mm.bsd_ring_dispatch_index == 0) { + ring_id = VCS; + dev_priv->mm.bsd_ring_dispatch_index = 1; + } else { + ring_id = VCS2; + dev_priv->mm.bsd_ring_dispatch_index = 0; + } + file_priv->bsd_ring = &dev_priv->ring[ring_id]; + mutex_unlock(&dev->struct_mutex); + return ring_id; + } +} + +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1009,11 +1088,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; - struct intel_ring_buffer *ring; - struct i915_hw_context *ctx; + struct intel_engine_cs *ring; + struct intel_context *ctx; struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); - u32 exec_start = args->batch_start_offset, exec_len; + u64 exec_start = args->batch_start_offset, exec_len; u32 mask, flags; int ret, mode, i; bool need_relocs; @@ -1035,7 +1114,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) flags |= I915_DISPATCH_PINNED; - if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) { + if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { DRM_DEBUG("execbuf with unknown ring: %d\n", (int)(args->flags & I915_EXEC_RING_MASK)); return -EINVAL; @@ -1043,7 +1122,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) ring = &dev_priv->ring[RCS]; - else + else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { + if (HAS_BSD2(dev)) { + int ring_id; + ring_id = gen8_dispatch_bsd_ring(dev, file); + ring = &dev_priv->ring[ring_id]; + } else + ring = &dev_priv->ring[VCS]; + } else ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; if (!intel_ring_initialized(ring)) { @@ -1058,14 +1144,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (ring == &dev_priv->ring[RCS] && - mode != dev_priv->relative_constants_mode) { - if (INTEL_INFO(dev)->gen < 4) + if (mode != 0 && ring != &dev_priv->ring[RCS]) { + DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); + return -EINVAL; + } + + if (mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("no rel constants on pre-gen4\n"); return -EINVAL; + } if (INTEL_INFO(dev)->gen > 5 && - mode == I915_EXEC_CONSTANTS_REL_SURFACE) + mode == I915_EXEC_CONSTANTS_REL_SURFACE) { + DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); return -EINVAL; + } /* The HW changed the meaning on this bit on gen6 */ if (INTEL_INFO(dev)->gen >= 6) @@ -1113,6 +1207,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = -EFAULT; goto pre_mutex_err; } + } else { + if (args->DR4 == 0xffffffff) { + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); + args->DR4 = 0; + } + + if (args->DR1 || args->DR4 || args->cliprects_ptr) { + DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); + return -EINVAL; + } } intel_runtime_pm_get(dev_priv); @@ -1132,7 +1236,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); ret = PTR_ERR(ctx); goto pre_mutex_err; - } + } i915_gem_context_reference(ctx); @@ -1142,6 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, eb = eb_create(args); if (eb == NULL) { + i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); ret = -ENOMEM; goto pre_mutex_err; @@ -1153,7 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1355,18 +1460,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1389,6 +1497,11 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EINVAL; } + if (args->rsvd2 != 0) { + DRM_DEBUG("dirty rvsd2 field\n"); + return -EINVAL; + } + exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (exec2_list == NULL) @@ -1412,14 +1525,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } |