summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-07-23 09:06:52 +1000
committerBen Skeggs <bskeggs@redhat.com>2010-08-17 11:58:56 +1000
commit415e6186f17136075f7cc825ba3835d005773637 (patch)
treef6f837734b05a7006cab9b6badcda8590e1e4d24 /drivers/gpu/drm/nouveau/nouveau_gem.c
parent56dfc58ea094e7a8607786f4762c65b09cd85738 (diff)
drm/nouveau: fix race condition when under memory pressure
When VRAM is running out it's possible that the client's push buffers get evicted to main memory. When they're validated back in, the GPU may be used for the copy back to VRAM, but the existing synchronisation code only deals with inter-channel sync, not sync between PFIFO and PGRAPH on the same channel. This leads to PFIFO fetching from command buffers that haven't quite been copied by PGRAPH yet. This patch marks push buffers as so, and forces any GPU-assisted buffer moves to be done on a different channel, which triggers the correct synchronisation to happen before we submit them. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 547f2c24c1e..a915dcdd9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
- if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
- spin_lock(&nvbo->bo.lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret)) {
- NV_ERROR(dev, "fail wait other chan\n");
- return ret;
- }
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail pre-validate sync\n");
+ return ret;
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- nvbo->channel = chan;
+ nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
+ ret = nouveau_bo_sync_gpu(nvbo, chan);
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail post-validate sync\n");
+ return ret;
+ }
+
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -615,6 +616,21 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
+ /* Mark push buffers as being used on PFIFO, the validation code
+ * will then make sure that if the pushbuf bo moves, that they
+ * happen on the kernel channel, which will in turn cause a sync
+ * to happen before we try and submit the push buffer.
+ */
+ for (i = 0; i < req->nr_push; i++) {
+ if (push[i].bo_index >= req->nr_buffers) {
+ NV_ERROR(dev, "push %d buffer not in list\n", i);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bo[push[i].bo_index].read_domains |= (1 << 31);
+ }
+
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);