diff options
author | Dave Airlie <airlied@redhat.com> | 2010-04-20 14:15:09 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-04-20 14:15:09 +1000 |
commit | 7547a917fa5f3b2406f52c7dcf7ec9ad3c8532eb (patch) | |
tree | 59b0d0e9b6c251c4df5799b93395454592004d57 /drivers/gpu/drm/nouveau/nouveau_bo.c | |
parent | a8089e849a32c5b6bfd6c88dbd09c0ea4a779b71 (diff) | |
parent | 6b8b1786a8c29ce6e32298b93ac8d4a18a2b11c4 (diff) |
Merge branch 'drm-ttm-unmappable' into drm-core-next
* drm-ttm-unmappable:
drm/radeon/kms: enable use of unmappable VRAM V2
drm/ttm: remove io_ field from TTM V6
drm/vmwgfx: add support for new TTM fault callback V5
drm/nouveau/kms: add support for new TTM fault callback V5
drm/radeon/kms: add support for new fault callback V7
drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
drm/ttm: split no_wait argument in 2 GPU or reserve wait
Conflicts:
drivers/gpu/drm/nouveau/nouveau_bo.c
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 114 |
1 files changed, 79 insertions, 35 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 957d1762984..fb164efada3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) nouveau_bo_placement_set(nvbo, memtype, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false); + ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - - man->io_addr = NULL; - man->io_offset = drm_get_resource_start(dev, 1); - man->io_size = drm_get_resource_len(dev, 1); - if (man->io_size > dev_priv->vram_size) - man->io_size = dev_priv->vram_size; - man->gpu_offset = dev_priv->vm_vram_base; break; case TTM_PL_TT: switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_NEEDS_IOREMAP; + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; break; @@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, dev_priv->gart_info.type); return -EINVAL; } - - man->io_offset = dev_priv->gart_info.aper_base; - man->io_size = dev_priv->gart_info.aper_size; - man->io_addr = NULL; man->gpu_offset = dev_priv->vm_gart_base; break; default: @@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) static int nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - struct nouveau_bo *nvbo, bool evict, bool no_wait, + struct nouveau_bo *nvbo, bool evict, + bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct nouveau_fence *fence = NULL; @@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, return ret; ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, - evict, no_wait, new_mem); + evict, no_wait_reserve, no_wait_gpu, new_mem); if (nvbo->channel && nvbo->channel != chan) ret = nouveau_fence_wait(fence, NULL, false, false); nouveau_fence_unref((void *)&fence); @@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - int no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); @@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, dst_offset += (PAGE_SIZE * line_count); } - return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); + return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); } static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; @@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: if (tmp_mem.mm_node) { spin_lock(&bo->bdev->glob->lru_lock); @@ -618,7 +608,8 @@ out: static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); + ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); if (ret) goto out; @@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait, struct ttm_mem_reg *new_mem) + bool no_wait_reserve, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); @@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Software copy if the card isn't up and running yet. */ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || !dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } @@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Hardware assisted copy. */ if (new_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else if (old_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); else - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); + ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); if (!ret) goto out; /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); out: if (ret) @@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) return 0; } +static int +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct drm_device *dev = dev_priv->dev; + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* System memory */ + return 0; + case TTM_PL_TT: +#if __OS_HAS_AGP + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = dev_priv->gart_info.aper_base; + mem->bus.is_iomem = true; + } +#endif + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; + mem->bus.base = drm_get_resource_start(dev, 1); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +{ +} + +static int +nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +{ + return 0; +} + struct ttm_bo_driver nouveau_bo_driver = { .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .invalidate_caches = nouveau_bo_invalidate_caches, @@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = { .sync_obj_flush = nouveau_fence_flush, .sync_obj_unref = nouveau_fence_unref, .sync_obj_ref = nouveau_fence_ref, + .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, + .io_mem_reserve = &nouveau_ttm_io_mem_reserve, + .io_mem_free = &nouveau_ttm_io_mem_free, }; |