summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c300
1 files changed, 240 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050..0c67924ca80 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
bool retry_alloc = false;
int ret;
- if (dev_priv->mm.suspended)
- return -EBUSY;
-
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
@@ -2839,6 +2837,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
return 0;
}
+/*
+ * Prepare buffer for display plane. Use uninterruptible for possible flush
+ * wait, as in modesetting process we're not supposed to be interrupted.
+ */
+int
+i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ uint32_t old_write_domain, old_read_domains;
+ int ret;
+
+ /* Not valid to be called on unbound objects. */
+ if (obj_priv->gtt_space == NULL)
+ return -EINVAL;
+
+ i915_gem_object_flush_gpu_write_domain(obj);
+
+ /* Wait on any GPU rendering and flushing to occur. */
+ if (obj_priv->active) {
+#if WATCH_BUF
+ DRM_INFO("%s: object %p wait for seqno %08x\n",
+ __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+ if (ret != 0)
+ return ret;
+ }
+
+ old_write_domain = obj->write_domain;
+ old_read_domains = obj->read_domains;
+
+ obj->read_domains &= I915_GEM_DOMAIN_GTT;
+
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ obj->write_domain = I915_GEM_DOMAIN_GTT;
+ obj_priv->dirty = 1;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+
+ return 0;
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -3198,7 +3247,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
@@ -3206,12 +3255,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
+ bool need_fence;
+
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence && !i915_obj_fenceable(dev, obj))
+ i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
if (ret)
return ret;
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
entry->offset = obj_priv->gtt_offset;
/* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3445,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
+ struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
@@ -3463,7 +3535,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry **relocs)
{
@@ -3478,8 +3550,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL)
+ if (*relocs == NULL) {
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
return -ENOMEM;
+ }
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,7 +3577,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry *relocs)
{
@@ -3536,7 +3610,7 @@ err:
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3589,18 +3663,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
}
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs;
- int ret, ret2, i, pinned = 0;
+ int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
@@ -3614,25 +3688,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
+ if (object_list == NULL) {
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
args->buffer_count);
ret = -ENOMEM;
goto pre_mutex_err;
}
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3884,20 +3946,6 @@ err:
mutex_unlock(&dev->struct_mutex);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
-
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@@ -3914,12 +3962,156 @@ err:
pre_mutex_err:
drm_free_large(object_list);
- drm_free_large(exec_list);
kfree(cliprects);
return ret;
}
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (!IS_I965G(dev))
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = 0;
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
@@ -3933,19 +4125,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (ret)
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- return ret;
- }
- }
+
obj_priv->pin_count++;
/* If the object is not active and not pending a flush,
@@ -4708,7 +4888,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;