summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-30 10:00:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-30 10:00:37 -0800
commit4a490b78cb7e0e5efa44425df72a9fedc1c36366 (patch)
tree8a867e39c4e555e4ba10772748b0bde8fe789e20 /drivers/gpu/drm/i915/i915_gem.c
parent8d91a42e54eebc43f4d8f6064751ccba73528275 (diff)
parentd5757dbe79870d825d0dec30074d48683e1d7e9a (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM update from Dave Airlie: "This is a bit larger due to me not bothering to do anything since before Xmas, and other people working too hard after I had clearly given up. It's got the 3 main x86 driver fixes pulls, and a bunch of tegra fixes, doesn't fix the Ironlake bug yet, but that does seem to be getting closer. - radeon: gpu reset fixes and userspace packet support - i915: watermark fixes, workarounds, i830/845 fix, - nouveau: nvd9/kepler microcode fixes, accel is now enabled and working, gk106 support - tegra: misc fixes." * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (34 commits) Revert "drm: tegra: protect DC register access with mutex" drm: tegra: program only one window during modeset drm: tegra: clean out old gem prototypes drm: tegra: remove redundant tegra2_tmds_config entry drm: tegra: protect DC register access with mutex drm: tegra: don't leave clients host1x member uninitialized drm: tegra: fix front_porch <-> back_porch mixup drm/nve0/graph: fix fuc, and enable acceleration on all known chipsets drm/nvc0/graph: fix fuc, and enable acceleration on GF119 drm/nouveau/bios: cache ramcfg strap on later chipsets drm/nouveau/mxm: silence output if no bios data drm/nouveau/bios: parse/display extra version component drm/nouveau/bios: implement opcode 0xa9 drm/nouveau/bios: update gpio parsing apis to match current design drm/nouveau: initial support for GK106 drm/radeon: add WAIT_UNTIL to evergreen VM safe reg list drm/i915: disable shrinker lock stealing for create_mmap_offset drm/i915: optionally disable shrinker lock stealing drm/i915: fix flags in dma buf exporting drm/radeon: add support for MEM_WRITE packet ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c77
1 files changed, 37 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 742206e4510..da3c82e301b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
+ dev_priv->mm.shrinker_no_lock_stealing = true;
+
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
- return ret;
+ goto out;
i915_gem_shrink_all(dev_priv);
- return drm_gem_create_mmap_offset(&obj->base);
+ ret = drm_gem_create_mmap_offset(&obj->base);
+out:
+ dev_priv->mm.shrinker_no_lock_stealing = false;
+
+ return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2890,7 +2896,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *free_space;
+ struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2936,66 +2942,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
+
search_free:
if (map_and_fenceable)
- free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
- size, alignment, obj->cache_level,
- false);
-
- if (free_space != NULL) {
- if (map_and_fenceable)
- free_space =
- drm_mm_get_block_range_generic(free_space,
- size, alignment, obj->cache_level,
- 0, dev_priv->mm.gtt_mappable_end,
- false);
- else
- free_space =
- drm_mm_get_block_generic(free_space,
- size, alignment, obj->cache_level,
- false);
- }
- if (free_space == NULL) {
+ ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
+ size, alignment, obj->cache_level);
+ if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- return ret;
- }
+ if (ret == 0)
+ goto search_free;
- goto search_free;
+ i915_gem_object_unpin_pages(obj);
+ kfree(node);
+ return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev,
- free_space,
- obj->cache_level))) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(node);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(free_space);
+ drm_mm_put_block(node);
return ret;
}
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_space = free_space;
- obj->gtt_offset = free_space->start;
+ obj->gtt_space = node;
+ obj->gtt_offset = node->start;
fenceable =
- free_space->size == fence_size &&
- (free_space->start & (fence_alignment - 1)) == 0;
+ node->size == fence_size &&
+ (node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -4392,6 +4386,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
unlock = false;
}