From ebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 20 Jul 2012 08:17:34 +1000 Subject: drm/nouveau: port all engines to new engine module format This is a HUGE commit, but it's not nearly as bad as it looks - any problems can be isolated to a particular chipset and engine combination. It was simply too difficult to port each one at a time, the compat layers are *already* ridiculous. Most of the changes here are simply to the glue, the process for each of the engine modules was to start with a standard skeleton and copy+paste the old code into the appropriate places, fixing up variable names etc as needed. v2: Marcin Slusarz - fix find/replace bug in license header v3: Ben Skeggs - bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and left no space for kernel's requirements during GEM pushbuf submission. - fix duplicate assignments noticed by clang v4: Marcin Slusarz - add sparse annotations to nv04_fifo_pause/nv04_fifo_start - use ioread32_native/iowrite32_native for fifo control registers v5: Ben Skeggs - rebase on v3.6-rc4, modified to keep copy engine fix intact - nv10/fence: unmap fence bo before destroying - fixed fermi regression when using nvidia gr fuc - fixed typo in supported dma_mask checking Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 347 ++++++++++++++++------------------- 1 file changed, 160 insertions(+), 187 deletions(-) (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c') diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c3e66ae04c8..3465df32722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -27,66 +27,57 @@ * Jeremy Kolb */ -#include "drmP.h" -#include "ttm/ttm_page_alloc.h" +#include -#include -#include "nouveau_drv.h" +#include +#include +#include + +#include "nouveau_drm.h" #include "nouveau_dma.h" -#include #include "nouveau_fence.h" -#include -#include -#include -#include +#include "nouveau_bo.h" +#include "nouveau_ttm.h" +#include "nouveau_gem.h" /* * NV10-NV40 tiling helpers */ static void -nv10_bo_update_tile_region(struct drm_device *dev, - struct nouveau_tile_reg *tilereg, uint32_t addr, - uint32_t size, uint32_t pitch, uint32_t flags) +nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, + u32 addr, u32 size, u32 pitch, u32 flags) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i = tilereg - dev_priv->tile.reg, j; - struct nouveau_fb_tile *tile = nvfb_tile(dev, i); - unsigned long save; + struct nouveau_drm *drm = nouveau_newpriv(dev); + int i = reg - drm->tile.reg; + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb_tile *tile = &pfb->tile.region[i]; + struct nouveau_engine *engine; - nouveau_fence_unref(&tilereg->fence); + nouveau_fence_unref(®->fence); if (tile->pitch) - nvfb_tile_fini(dev, i); + pfb->tile.fini(pfb, i, tile); if (pitch) - nvfb_tile_init(dev, i, addr, size, pitch, flags); - - spin_lock_irqsave(&dev_priv->context_switch_lock, save); - nv_wr32(dev, NV03_PFIFO_CACHES, 0); - nv04_fifo_cache_pull(dev, false); + pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); - nouveau_wait_for_idle(dev); - - nvfb_tile_prog(dev, i); - for (j = 0; j < NVOBJ_ENGINE_NR; j++) { - if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region) - dev_priv->eng[j]->set_tile_region(dev, i); - } + pfb->tile.prog(pfb, i, tile); - nv04_fifo_cache_pull(dev, true); - nv_wr32(dev, NV03_PFIFO_CACHES, 1); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) + engine->tile_prog(engine, i); + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) + engine->tile_prog(engine, i); } -static struct nouveau_tile_reg * +static struct nouveau_drm_tile * nv10_bo_get_tile_region(struct drm_device *dev, int i) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + struct nouveau_drm *drm = nouveau_newpriv(dev); + struct nouveau_drm_tile *tile = &drm->tile.reg[i]; - spin_lock(&dev_priv->tile.lock); + spin_lock(&drm->tile.lock); if (!tile->used && (!tile->fence || nouveau_fence_done(tile->fence))) @@ -94,18 +85,18 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) else tile = NULL; - spin_unlock(&dev_priv->tile.lock); + spin_unlock(&drm->tile.lock); return tile; } static void -nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, - struct nouveau_fence *fence) +nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, + struct nouveau_fence *fence) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm *drm = nouveau_newpriv(dev); if (tile) { - spin_lock(&dev_priv->tile.lock); + spin_lock(&drm->tile.lock); if (fence) { /* Mark it as pending. */ tile->fence = fence; @@ -113,25 +104,27 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, } tile->used = false; - spin_unlock(&dev_priv->tile.lock); + spin_unlock(&drm->tile.lock); } } -static struct nouveau_tile_reg * -nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, - uint32_t pitch, uint32_t flags) +static struct nouveau_drm_tile * +nv10_bo_set_tiling(struct drm_device *dev, u32 addr, + u32 size, u32 pitch, u32 flags) { - struct nouveau_tile_reg *tile, *found = NULL; + struct nouveau_drm *drm = nouveau_newpriv(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_drm_tile *tile, *found = NULL; int i; - for (i = 0; i < nvfb_tile_nr(dev); i++) { + for (i = 0; i < pfb->tile.regions; i++) { tile = nv10_bo_get_tile_region(dev, i); if (pitch && !found) { found = tile; continue; - } else if (tile && nvfb_tile(dev, i)->pitch) { + } else if (tile && pfb->tile.region[i].pitch) { /* Kill an unused tile region. */ nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); } @@ -148,13 +141,12 @@ nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); - nv10_bo_put_tile_region(dev, nvbo->tile, NULL); kfree(nvbo); } @@ -163,23 +155,24 @@ static void nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, int *align, int *size) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_device *device = nv_device(drm->device); - if (dev_priv->card_type < NV_50) { + if (device->card_type < NV_50) { if (nvbo->tile_mode) { - if (dev_priv->chipset >= 0x40) { + if (device->chipset >= 0x40) { *align = 65536; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x30) { + } else if (device->chipset >= 0x30) { *align = 32768; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x20) { + } else if (device->chipset >= 0x20) { *align = 16384; *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x10) { + } else if (device->chipset >= 0x10) { *align = 16384; *size = roundup(*size, 32 * nvbo->tile_mode); } @@ -198,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, struct sg_table *sg, struct nouveau_bo **pnvbo) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm *drm = nouveau_newpriv(dev); struct nouveau_bo *nvbo; size_t acc_size; int ret; @@ -215,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, INIT_LIST_HEAD(&nvbo->vma_list); nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; - nvbo->bo.bdev = &dev_priv->ttm.bdev; + nvbo->bo.bdev = &drm->ttm.bdev; nvbo->page_shift = 12; - if (dev_priv->chan_vm) { + if (drm->client.base.vm) { if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm); + nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; } nouveau_bo_fixup_align(nvbo, flags, &align, &size); nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); - acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, + acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, sizeof(struct nouveau_bo)); - ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, + ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, type, &nvbo->placement, align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, nouveau_bo_del_ttm); @@ -259,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); - int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT; + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + u32 vram_pages = pfb->ram.size >> PAGE_SHIFT; - if (dev_priv->card_type == NV_10 && + if (nv_device(drm->device)->card_type == NV_10 && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* @@ -302,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; int ret; if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { - NV_ERROR(nouveau_bdev(bo->bdev)->dev, - "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, + NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 1 << bo->mem.mem_type, memtype); return -EINVAL; } @@ -326,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: - dev_priv->fb_aper_free -= bo->mem.size; + drm->gem.vram_available -= bo->mem.size; break; case TTM_PL_TT: - dev_priv->gart_info.aper_free -= bo->mem.size; + drm->gem.gart_available -= bo->mem.size; break; default: break; @@ -345,7 +338,7 @@ out: int nouveau_bo_unpin(struct nouveau_bo *nvbo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; int ret; @@ -362,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: - dev_priv->fb_aper_free += bo->mem.size; + drm->gem.vram_available += bo->mem.size; break; case TTM_PL_TT: - dev_priv->gart_info.aper_free += bo->mem.size; + drm->gem.gart_available += bo->mem.size; break; default: break; @@ -460,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) } static struct ttm_tt * -nouveau_ttm_tt_create(struct ttm_bo_device *bdev, - unsigned long size, uint32_t page_flags, - struct page *dummy_read_page) +nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, + uint32_t page_flags, struct page *dummy_read) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct drm_device *dev = drm->dev; - switch (dev_priv->gart_info.type) { -#if __OS_HAS_AGP - case NOUVEAU_GART_AGP: - return ttm_agp_tt_create(bdev, dev->agp->bridge, - size, page_flags, dummy_read_page); -#endif - case NOUVEAU_GART_PDMA: - case NOUVEAU_GART_HW: - return nouveau_sgdma_create_ttm(bdev, size, page_flags, - dummy_read_page); - default: - NV_ERROR(dev, "Unknown GART type %d\n", - dev_priv->gart_info.type); - break; + if (drm->agp.stat == ENABLED) { + return ttm_agp_tt_create(bdev, dev->agp->bridge, size, + page_flags, dummy_read); } - return NULL; + return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); } static int @@ -497,8 +478,7 @@ static int nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bdev); switch (type) { case TTM_PL_SYSTEM: @@ -507,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - if (dev_priv->card_type >= NV_50) { + if (nv_device(drm->device)->card_type >= NV_50) { man->func = &nouveau_vram_manager; man->io_reserve_fastpath = false; man->use_io_reserve_lru = true; @@ -521,35 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_WC; break; case TTM_PL_TT: - if (dev_priv->card_type >= NV_50) + if (nv_device(drm->device)->card_type >= NV_50) man->func = &nouveau_gart_manager; else - if (dev_priv->gart_info.type != NOUVEAU_GART_AGP) + if (drm->agp.stat != ENABLED) man->func = &nv04_gart_manager; else man->func = &ttm_bo_manager_func; - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: + + if (drm->agp.stat == ENABLED) { man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - break; - case NOUVEAU_GART_PDMA: - case NOUVEAU_GART_HW: + } else { man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - NV_ERROR(dev, "Unknown GART type: %d\n", - dev_priv->gart_info.type); - return -EINVAL; } + break; default: - NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; @@ -783,20 +756,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) { - int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, - &chan->m2mf_ntfy); + int ret = RING_SPACE(chan, 6); if (ret == 0) { - ret = RING_SPACE(chan, 6); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); - OUT_RING (chan, NvNotify0); - OUT_RING (chan, NvDmaFB); - OUT_RING (chan, NvDmaFB); - } else { - nouveau_ramht_remove(chan, NvNotify0); - } + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); + OUT_RING (chan, NvNotify0); + OUT_RING (chan, NvDmaFB); + OUT_RING (chan, NvDmaFB); } return ret; @@ -895,16 +862,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) { - int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, - &chan->m2mf_ntfy); + int ret = RING_SPACE(chan, 4); if (ret == 0) { - ret = RING_SPACE(chan, 4); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); - OUT_RING (chan, NvNotify0); - } + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); + OUT_RING (chan, NvNotify0); } return ret; @@ -915,8 +878,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, struct nouveau_channel *chan, struct ttm_mem_reg *mem) { if (mem->mem_type == TTM_PL_TT) - return chan->gart_handle; - return chan->vram_handle; + return NvDmaTT; + return NvDmaFB; } static int @@ -972,8 +935,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, struct nouveau_mem *node = mem->mm_node; int ret; - ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, - node->page_shift, NV_MEM_ACCESS_RO, vma); + ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << + PAGE_SHIFT, node->page_shift, + NV_MEM_ACCESS_RW, vma); if (ret) return ret; @@ -990,19 +954,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct nouveau_channel *chan = chan = dev_priv->channel; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_channel *chan = chan = drm->channel; struct nouveau_bo *nvbo = nouveau_bo(bo); struct ttm_mem_reg *old_mem = &bo->mem; int ret; - mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); + mutex_lock(&chan->cli->mutex); /* create temporary vmas for the transfer and attach them to the * old nouveau_mem node, these will get cleaned up after ttm has * destroyed the ttm_mem_reg */ - if (dev_priv->card_type >= NV_50) { + if (nv_device(drm->device)->card_type >= NV_50) { struct nouveau_mem *node = old_mem->mm_node; ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); @@ -1014,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, goto out; } - ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); + ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); if (ret == 0) { ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, @@ -1022,14 +986,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, } out: - mutex_unlock(&chan->mutex); + mutex_unlock(&chan->cli->mutex); return ret; } void nouveau_bo_move_init(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_cli *cli = chan->cli; + struct nouveau_drm *drm = chan->drm; static const struct { const char *name; int engine; @@ -1054,19 +1019,26 @@ nouveau_bo_move_init(struct nouveau_channel *chan) int ret; do { + struct nouveau_object *object; u32 handle = (mthd->engine << 16) | mthd->oclass; - ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); + + ret = nouveau_object_new(nv_object(cli), chan->handle, handle, + mthd->oclass, NULL, 0, &object); if (ret == 0) { ret = mthd->init(chan, handle); - if (ret == 0) { - dev_priv->ttm.move = mthd->exec; - name = mthd->name; - break; + if (ret) { + nouveau_object_del(nv_object(cli), + chan->handle, handle); + continue; } + + drm->ttm.move = mthd->exec; + name = mthd->name; + break; } } while ((++mthd)->exec); - NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); + NV_INFO(drm, "MM: using %s for buffer copies\n", name); } static int @@ -1151,7 +1123,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) nouveau_vm_map(vma, new_mem->mm_node); } else if (new_mem && new_mem->mem_type == TTM_PL_TT && - nvbo->page_shift == nvvm_spg_shift(vma->vm)) { + nvbo->page_shift == vma->vm->vmm->spg_shift) { if (((struct nouveau_mem *)new_mem->mm_node)->sg) nouveau_vm_map_sg_table(vma, 0, new_mem-> num_pages << PAGE_SHIFT, @@ -1168,10 +1140,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) static int nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, - struct nouveau_tile_reg **new_tile) + struct nouveau_drm_tile **new_tile) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); u64 offset = new_mem->start << PAGE_SHIFT; @@ -1179,7 +1151,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, if (new_mem->mem_type != TTM_PL_VRAM) return 0; - if (dev_priv->card_type >= NV_10) { + if (nv_device(drm->device)->card_type >= NV_10) { *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, nvbo->tile_mode, nvbo->tile_flags); @@ -1190,11 +1162,11 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, static void nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, - struct nouveau_tile_reg *new_tile, - struct nouveau_tile_reg **old_tile) + struct nouveau_drm_tile *new_tile, + struct nouveau_drm_tile **old_tile) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); *old_tile = new_tile; @@ -1205,13 +1177,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); struct ttm_mem_reg *old_mem = &bo->mem; - struct nouveau_tile_reg *new_tile = NULL; + struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (dev_priv->card_type < NV_50) { + if (nv_device(drm->device)->card_type < NV_50) { ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); if (ret) return ret; @@ -1226,7 +1198,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* CPU copy if we have no accelerated method available */ - if (!dev_priv->ttm.move) { + if (!drm->ttm.move) { ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } @@ -1246,7 +1218,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: - if (dev_priv->card_type < NV_50) { + if (nv_device(drm->device)->card_type < NV_50) { if (ret) nouveau_bo_vm_cleanup(bo, NULL, &new_tile); else @@ -1266,8 +1238,8 @@ static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct drm_device *dev = drm->dev; int ret; mem->bus.addr = NULL; @@ -1283,9 +1255,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return 0; case TTM_PL_TT: #if __OS_HAS_AGP - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + if (drm->agp.stat == ENABLED) { mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = dev_priv->gart_info.aper_base; + mem->bus.base = drm->agp.base; mem->bus.is_iomem = true; } #endif @@ -1294,10 +1266,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; - if (dev_priv->card_type >= NV_50) { + if (nv_device(drm->device)->card_type >= NV_50) { + struct nouveau_bar *bar = nouveau_bar(drm->device); struct nouveau_mem *node = mem->mm_node; - ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW, + ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, &node->bar_vma); if (ret) return ret; @@ -1314,40 +1287,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_bar *bar = nouveau_bar(drm->device); struct nouveau_mem *node = mem->mm_node; - if (mem->mem_type != TTM_PL_VRAM) - return; - if (!node->bar_vma.node) return; - nvbar_unmap(dev_priv->dev, &node->bar_vma); + bar->unmap(bar, &node->bar_vma); } static int nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_device *device = nv_device(drm->device); + u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (dev_priv->card_type < NV_50 || + if (nv_device(drm->device)->card_type < NV_50 || !nouveau_bo_tile_layout(nvbo)) return 0; } /* make sure bo is in mappable vram */ - if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) + if (bo->mem.start + bo->mem.num_pages < mappable) return 0; nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = dev_priv->fb_mappable_pages; + nvbo->placement.lpfn = mappable; nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); return nouveau_bo_validate(nvbo, false, true, false); } @@ -1356,7 +1329,7 @@ static int nouveau_ttm_tt_populate(struct ttm_tt *ttm) { struct ttm_dma_tt *ttm_dma = (void *)ttm; - struct drm_nouveau_private *dev_priv; + struct nouveau_drm *drm; struct drm_device *dev; unsigned i; int r; @@ -1373,11 +1346,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) return 0; } - dev_priv = nouveau_bdev(ttm->bdev); - dev = dev_priv->dev; + drm = nouveau_bdev(ttm->bdev); + dev = drm->dev; #if __OS_HAS_AGP - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + if (drm->agp.stat == ENABLED) { return ttm_agp_tt_populate(ttm); } #endif @@ -1414,7 +1387,7 @@ static void nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct ttm_dma_tt *ttm_dma = (void *)ttm; - struct drm_nouveau_private *dev_priv; + struct nouveau_drm *drm; struct drm_device *dev; unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1422,11 +1395,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) if (slave) return; - dev_priv = nouveau_bdev(ttm->bdev); - dev = dev_priv->dev; + drm = nouveau_bdev(ttm->bdev); + dev = drm->dev; #if __OS_HAS_AGP - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + if (drm->agp.stat == ENABLED) { ttm_agp_tt_unpopulate(ttm); return; } -- cgit v1.2.3-70-g09d2