summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCarl Worth <cworth@cworth.org>2009-03-20 11:54:25 -0700
committerEric Anholt <eric@anholt.net>2009-04-01 15:22:07 -0700
commit5e118f4139feafe97e913df67b1f7c1e5083e535 (patch)
treea4d73fb1bb51083ab95b6167c6a8c621f6245a63
parent7026d4ac1fc134566c2c946e6c0d849fc03ba7b7 (diff)
drm/i915: Add a spinlock to protect the active_list
This is a baby-step in the direction of having finer-grained locking than the struct_mutex. Specifically, this will enable new debugging code to read the active list for printing out GPU state when the GPU is wedged, (while the struct_mutex is held, of course). Signed-off-by: Carl Worth <cworth@cworth.org> [anholt: indentation fix] Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c6
4 files changed, 29 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f48bb366b..317b1223e09 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -300,6 +300,7 @@ typedef struct drm_i915_private {
*
* A reference is held on the buffer while on this list.
*/
+ spinlock_t active_list_lock;
struct list_head active_list;
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9f4eceb8093..1449b452cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1325,8 +1325,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
obj_priv->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
+ spin_lock(&dev_priv->mm.active_list_lock);
list_move_tail(&obj_priv->list,
&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
obj_priv->last_rendering_seqno = seqno;
}
@@ -1468,6 +1470,7 @@ i915_gem_retire_request(struct drm_device *dev,
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
+ spin_lock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.active_list)) {
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
@@ -1482,7 +1485,7 @@ i915_gem_retire_request(struct drm_device *dev,
* this seqno.
*/
if (obj_priv->last_rendering_seqno != request->seqno)
- return;
+ goto out;
#if WATCH_LRU
DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -1494,6 +1497,8 @@ i915_gem_retire_request(struct drm_device *dev,
else
i915_gem_object_move_to_inactive(obj);
}
+out:
+ spin_unlock(&dev_priv->mm.active_list_lock);
}
/**
@@ -2215,15 +2220,20 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
}
if (obj_priv->gtt_space == NULL) {
+ bool lists_empty;
+
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- if (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list)) {
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+ list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ if (lists_empty) {
DRM_ERROR("GTT full, but LRU list empty\n");
return -ENOMEM;
}
@@ -3679,6 +3689,7 @@ i915_gem_idle(struct drm_device *dev)
i915_gem_retire_requests(dev);
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!dev_priv->mm.wedged) {
/* Active and flushing should now be empty as we've
* waited for a sequence higher than any pending execbuffer
@@ -3705,6 +3716,7 @@ i915_gem_idle(struct drm_device *dev)
obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
i915_gem_object_move_to_inactive(obj_priv->obj);
}
+ spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
struct drm_i915_gem_object *obj_priv;
@@ -3953,7 +3965,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
return ret;
+ spin_lock(&dev_priv->mm.active_list_lock);
BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
BUG_ON(!list_empty(&dev_priv->mm.request_list));
@@ -3997,6 +4012,7 @@ i915_gem_load(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 131c088f8c8..8d0b943e2c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -105,12 +105,14 @@ i915_dump_lru(struct drm_device *dev, const char *where)
struct drm_i915_gem_object *obj_priv;
DRM_INFO("active list %s {\n", where);
+ spin_lock(&dev_priv->mm.active_list_lock);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
+ spin_unlock(&dev_priv->mm.active_list_lock);
DRM_INFO("}\n");
DRM_INFO("flushing list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 455ec970b38..a1ac0c5e730 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -69,10 +69,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
+ spinlock_t *lock = NULL;
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
+ lock = &dev_priv->mm.active_list_lock;
+ spin_lock(lock);
head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
@@ -104,6 +107,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
seq_printf(m, "\n");
}
+
+ if (lock)
+ spin_unlock(lock);
return 0;
}