diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2012-04-30 13:55:29 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2012-05-24 16:55:53 +1000 |
commit | 5e120f6e4b3f35b741c5445dfc755f50128c3c44 (patch) | |
tree | 210b2bb8f5dccfcb4a6c134341fa31a633ba5243 /drivers/gpu/drm/nouveau/nouveau_fence.c | |
parent | d375e7d56dffa564a6c337d2ed3217fb94826100 (diff) |
drm/nouveau/fence: convert to exec engine, and improve channel sync
Now have a somewhat simpler semaphore sync implementation for nv17:nv84,
and a switched to using semaphores as fences on nv84+ and making use of
the hardware's >= acquire operation.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 474 |
1 files changed, 62 insertions, 412 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 2c10d54fc49..4ba41a45114 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -36,85 +36,71 @@ #include "nouveau_software.h" #include "nouveau_dma.h" -#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) -#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) +void +nouveau_fence_context_del(struct nouveau_fence_chan *fctx) +{ + struct nouveau_fence *fence, *fnext; + spin_lock(&fctx->lock); + list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { + if (fence->work) + fence->work(fence->priv, false); + fence->channel = NULL; + list_del(&fence->head); + nouveau_fence_unref(&fence); + } + spin_unlock(&fctx->lock); +} + +void +nouveau_fence_context_new(struct nouveau_fence_chan *fctx) +{ + INIT_LIST_HEAD(&fctx->pending); + spin_lock_init(&fctx->lock); +} void nouveau_fence_update(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; - struct nouveau_fence *tmp, *fence; - uint32_t sequence; - - spin_lock(&chan->fence.lock); - - /* Fetch the last sequence if the channel is still up and running */ - if (likely(!list_empty(&chan->fence.pending))) { - if (USE_REFCNT(dev)) - sequence = nvchan_rd32(chan, 0x48); - else - sequence = atomic_read(&chan->fence.last_sequence_irq); - - if (chan->fence.sequence_ack == sequence) - goto out; - chan->fence.sequence_ack = sequence; - } + struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); + struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; + struct nouveau_fence *fence, *fnext; - list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) { - if (fence->sequence > chan->fence.sequence_ack) + spin_lock(&fctx->lock); + list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { + if (priv->read(chan) < fence->sequence) break; - fence->channel = NULL; - list_del(&fence->head); if (fence->work) fence->work(fence->priv, true); - + fence->channel = NULL; + list_del(&fence->head); nouveau_fence_unref(&fence); } - -out: - spin_unlock(&chan->fence.lock); + spin_unlock(&fctx->lock); } int nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); + struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; int ret; - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { - nouveau_fence_update(chan); - - BUG_ON(chan->fence.sequence == - chan->fence.sequence_ack - 1); - } - - fence->sequence = ++chan->fence.sequence; - fence->channel = chan; - - kref_get(&fence->kref); - spin_lock(&chan->fence.lock); - list_add_tail(&fence->head, &chan->fence.pending); - spin_unlock(&chan->fence.lock); + fence->channel = chan; + fence->timeout = jiffies + (3 * DRM_HZ); + fence->sequence = ++fctx->sequence; - if (USE_REFCNT(dev)) { - if (dev_priv->card_type < NV_C0) - BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); - else - BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); - } else { - BEGIN_NV04(chan, NvSubSw, 0x0150, 1); + ret = priv->emit(fence); + if (!ret) { + kref_get(&fence->kref); + spin_lock(&fctx->lock); + list_add_tail(&fence->head, &fctx->pending); + spin_unlock(&fctx->lock); } - OUT_RING (chan, fence->sequence); - FIRE_RING(chan); - fence->timeout = jiffies + 3 * DRM_HZ; - return 0; + return ret; } bool @@ -158,6 +144,23 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) return ret; } +int +nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) +{ + struct nouveau_channel *prev = fence ? fence->channel : NULL; + struct drm_device *dev = chan->dev; + struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); + int ret = 0; + + if (unlikely(prev && prev != chan && !nouveau_fence_done(fence))) { + ret = priv->sync(fence, chan); + if (unlikely(ret)) + ret = nouveau_fence_wait(fence, true, false); + } + + return ret; +} + static void nouveau_fence_del(struct kref *kref) { @@ -186,6 +189,9 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) struct nouveau_fence *fence; int ret = 0; + if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE])) + return -ENODEV; + fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (!fence) return -ENOMEM; @@ -200,359 +206,3 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) *pfence = fence; return ret; } - -struct nouveau_semaphore { - struct kref ref; - struct drm_device *dev; - struct drm_mm_node *mem; -}; - -void -nouveau_fence_work(struct nouveau_fence *fence, - void (*work)(void *priv, bool signalled), - void *priv) -{ - if (!fence->channel) { - work(priv, true); - } else { - fence->work = work; - fence->priv = priv; - } -} - -static struct nouveau_semaphore * -semaphore_alloc(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_semaphore *sema; - int size = (dev_priv->chipset < 0x84) ? 4 : 16; - int ret, i; - - if (!USE_SEMA(dev)) - return NULL; - - sema = kmalloc(sizeof(*sema), GFP_KERNEL); - if (!sema) - goto fail; - - ret = drm_mm_pre_get(&dev_priv->fence.heap); - if (ret) - goto fail; - - spin_lock(&dev_priv->fence.lock); - sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0); - if (sema->mem) - sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0); - spin_unlock(&dev_priv->fence.lock); - - if (!sema->mem) - goto fail; - - kref_init(&sema->ref); - sema->dev = dev; - for (i = sema->mem->start; i < sema->mem->start + size; i += 4) - nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0); - - return sema; -fail: - kfree(sema); - return NULL; -} - -static void -semaphore_free(struct kref *ref) -{ - struct nouveau_semaphore *sema = - container_of(ref, struct nouveau_semaphore, ref); - struct drm_nouveau_private *dev_priv = sema->dev->dev_private; - - spin_lock(&dev_priv->fence.lock); - drm_mm_put_block(sema->mem); - spin_unlock(&dev_priv->fence.lock); - - kfree(sema); -} - -static void -semaphore_work(void *priv, bool signalled) -{ - struct nouveau_semaphore *sema = priv; - struct drm_nouveau_private *dev_priv = sema->dev->dev_private; - - if (unlikely(!signalled)) - nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1); - - kref_put(&sema->ref, semaphore_free); -} - -static int -semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_fence *fence = NULL; - u64 offset = chan->fence.vma.offset + sema->mem->start; - int ret; - - if (dev_priv->chipset < 0x84) { - ret = RING_SPACE(chan, 4); - if (ret) - return ret; - - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3); - OUT_RING (chan, NvSema); - OUT_RING (chan, offset); - OUT_RING (chan, 1); - } else - if (dev_priv->chipset < 0xc0) { - ret = RING_SPACE(chan, 7); - if (ret) - return ret; - - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, chan->vram_handle); - BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 1); - OUT_RING (chan, 1); /* ACQUIRE_EQ */ - } else { - ret = RING_SPACE(chan, 5); - if (ret) - return ret; - - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 1); - OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */ - } - - /* Delay semaphore destruction until its work is done */ - ret = nouveau_fence_new(chan, &fence); - if (ret) - return ret; - - kref_get(&sema->ref); - nouveau_fence_work(fence, semaphore_work, sema); - nouveau_fence_unref(&fence); - return 0; -} - -static int -semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_fence *fence = NULL; - u64 offset = chan->fence.vma.offset + sema->mem->start; - int ret; - - if (dev_priv->chipset < 0x84) { - ret = RING_SPACE(chan, 5); - if (ret) - return ret; - - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); - OUT_RING (chan, NvSema); - OUT_RING (chan, offset); - BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); - OUT_RING (chan, 1); - } else - if (dev_priv->chipset < 0xc0) { - ret = RING_SPACE(chan, 7); - if (ret) - return ret; - - BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); - OUT_RING (chan, chan->vram_handle); - BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 1); - OUT_RING (chan, 2); /* RELEASE */ - } else { - ret = RING_SPACE(chan, 5); - if (ret) - return ret; - - BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); - OUT_RING (chan, upper_32_bits(offset)); - OUT_RING (chan, lower_32_bits(offset)); - OUT_RING (chan, 1); - OUT_RING (chan, 0x1002); /* RELEASE */ - } - - /* Delay semaphore destruction until its work is done */ - ret = nouveau_fence_new(chan, &fence); - if (ret) - return ret; - - kref_get(&sema->ref); - nouveau_fence_work(fence, semaphore_work, sema); - nouveau_fence_unref(&fence); - return 0; -} - -int -nouveau_fence_sync(struct nouveau_fence *fence, - struct nouveau_channel *wchan) -{ - struct nouveau_channel *chan; - struct drm_device *dev = wchan->dev; - struct nouveau_semaphore *sema; - int ret = 0; - - chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; - if (likely(!chan || chan == wchan || nouveau_fence_done(fence))) - goto out; - - sema = semaphore_alloc(dev); - if (!sema) { - /* Early card or broken userspace, fall back to - * software sync. */ - ret = nouveau_fence_wait(fence, true, false); - goto out; - } - - /* try to take chan's mutex, if we can't take it right away - * we have to fallback to software sync to prevent locking - * order issues - */ - if (!mutex_trylock(&chan->mutex)) { - ret = nouveau_fence_wait(fence, true, false); - goto out_unref; - } - - /* Make wchan wait until it gets signalled */ - ret = semaphore_acquire(wchan, sema); - if (ret) - goto out_unlock; - - /* Signal the semaphore from chan */ - ret = semaphore_release(chan, sema); - -out_unlock: - mutex_unlock(&chan->mutex); -out_unref: - kref_put(&sema->ref, semaphore_free); -out: - if (chan) - nouveau_channel_put_unlocked(&chan); - return ret; -} - -int -nouveau_fence_channel_init(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *obj = NULL; - int ret; - - if (dev_priv->card_type < NV_C0) { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1); - OUT_RING (chan, NvSw); - FIRE_RING (chan); - } - - /* Setup area of memory shared between all channels for x-chan sync */ - if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { - struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; - - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, - mem->start << PAGE_SHIFT, - mem->size, NV_MEM_ACCESS_RW, - NV_MEM_TARGET_VRAM, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvSema, obj); - nouveau_gpuobj_ref(NULL, &obj); - if (ret) - return ret; - } else - if (USE_SEMA(dev)) { - /* map fence bo into channel's vm */ - ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, - &chan->fence.vma); - if (ret) - return ret; - } - - atomic_set(&chan->fence.last_sequence_irq, 0); - return 0; -} - -void -nouveau_fence_channel_fini(struct nouveau_channel *chan) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_fence *tmp, *fence; - - spin_lock(&chan->fence.lock); - list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) { - fence->channel = NULL; - list_del(&fence->head); - - if (unlikely(fence->work)) - fence->work(fence->priv, false); - - kref_put(&fence->kref, nouveau_fence_del); - } - spin_unlock(&chan->fence.lock); - - nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma); -} - -int -nouveau_fence_init(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int size = (dev_priv->chipset < 0x84) ? 4096 : 16384; - int ret; - - /* Create a shared VRAM heap for cross-channel sync. */ - if (USE_SEMA(dev)) { - ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM, - 0, 0, NULL, &dev_priv->fence.bo); - if (ret) - return ret; - - ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); - if (ret) - goto fail; - - ret = nouveau_bo_map(dev_priv->fence.bo); - if (ret) - goto fail; - - ret = drm_mm_init(&dev_priv->fence.heap, 0, - dev_priv->fence.bo->bo.mem.size); - if (ret) - goto fail; - - spin_lock_init(&dev_priv->fence.lock); - } - - return 0; -fail: - nouveau_bo_unmap(dev_priv->fence.bo); - nouveau_bo_ref(NULL, &dev_priv->fence.bo); - return ret; -} - -void -nouveau_fence_fini(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (USE_SEMA(dev)) { - drm_mm_takedown(&dev_priv->fence.heap); - nouveau_bo_unmap(dev_priv->fence.bo); - nouveau_bo_unpin(dev_priv->fence.bo); - nouveau_bo_ref(NULL, &dev_priv->fence.bo); - } -} |