From f72a113a71ab08c4df8a5f80ab2f8a140feb81f6 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:00 +0200 Subject: drm/radeon: add userptr support v8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds an IOCTL for turning a pointer supplied by userspace into a buffer object. It imposes several restrictions upon the memory being mapped: 1. It must be page aligned (both start/end addresses, i.e ptr and size). 2. It must be normal system memory, not a pointer into another map of IO space (e.g. it must not be a GTT mmapping of another object). 3. The BO is mapped into GTT, so the maximum amount of memory mapped at all times is still the GTT limit. 4. The BO is only mapped readonly for now, so no write support. 5. List of backing pages is only acquired once, so they represent a snapshot of the first use. Exporting and sharing as well as mapping of buffer objects created by this function is forbidden and results in an -EPERM. v2: squash all previous changes into first public version v3: fix tabs, map readonly, don't use MM callback any more v4: set TTM_PAGE_FLAG_SG so that TTM never messes with the pages, pin/unpin pages on bind/unbind instead of populate/unpopulate v5: rebased on 3.17-wip, IOCTL renamed to userptr, reject any unknown flags, better handle READONLY flag, improve permission check v6: fix ptr cast warning, use set_page_dirty/mark_page_accessed on unpin v7: add warning about it's availability in the API definition v8: drop access_ok check, fix VM mapping bits Signed-off-by: Christian König Reviewed-by: Alex Deucher (v4) Reviewed-by: Jérôme Glisse (v4) Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ttm.c | 145 ++++++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 72afe82a95c..b20933fa35c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -39,6 +39,8 @@ #include #include #include +#include +#include #include #include "radeon_reg.h" #include "radeon.h" @@ -515,8 +517,92 @@ struct radeon_ttm_tt { struct ttm_dma_tt ttm; struct radeon_device *rdev; u64 offset; + + uint64_t userptr; + struct mm_struct *usermm; + uint32_t userflags; }; +/* prepare the sg table with the user pages */ +static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + unsigned pinned = 0, nents; + int r; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (current->mm != gtt->usermm) + return -EPERM; + + do { + unsigned num_pages = ttm->num_pages - pinned; + uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; + struct page **pages = ttm->pages + pinned; + + r = get_user_pages(current, current->mm, userptr, num_pages, + write, 0, pages, NULL); + if (r < 0) + goto release_pages; + + pinned += r; + + } while (pinned < ttm->num_pages); + + r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, + ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (r) + goto release_sg; + + r = -ENOMEM; + nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + if (nents != ttm->sg->nents) + goto release_sg; + + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + gtt->ttm.dma_address, ttm->num_pages); + + return 0; + +release_sg: + kfree(ttm->sg); + +release_pages: + release_pages(ttm->pages, pinned, 0); + return r; +} + +static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + struct scatterlist *sg; + int i; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + /* free the sg table and pages again */ + dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + + for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { + struct page *page = sg_page(sg); + + if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) + set_page_dirty(page); + + mark_page_accessed(page); + page_cache_release(page); + } + + sg_free_table(ttm->sg); +} + static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -525,6 +611,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, RADEON_GART_PAGE_WRITE; int r; + if (gtt->userptr) { + radeon_ttm_tt_pin_userptr(ttm); + flags &= ~RADEON_GART_PAGE_WRITE; + } + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -547,6 +638,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) struct radeon_ttm_tt *gtt = (void *)ttm; radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); + + if (gtt->userptr) + radeon_ttm_tt_unpin_userptr(ttm); + return 0; } @@ -603,6 +698,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0; + if (gtt->userptr) { + ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + if (!ttm->sg) + return -ENOMEM; + + ttm->page_flags |= TTM_PAGE_FLAG_SG; + ttm->state = tt_unbound; + return 0; + } + if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -652,6 +757,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + if (gtt->userptr) { + kfree(ttm->sg); + ttm->page_flags &= ~TTM_PAGE_FLAG_SG; + return; + } + if (slave) return; @@ -680,6 +791,40 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } +int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return -EINVAL; + + gtt->userptr = addr; + gtt->usermm = current->mm; + gtt->userflags = flags; + return 0; +} + +bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return false; + + return !!gtt->userptr; +} + +bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return false; + + return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, -- cgit v1.2.3-70-g09d2 From ddd00e33e17a62c5f44377ab42e7562ccfae7bd1 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 7 Aug 2014 09:36:01 +0200 Subject: drm/radeon: add userptr flag to limit it to anonymous memory v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid problems with writeback by limiting userptr to anonymous memory. v2: add commit and code comments Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab223b50..032736b429b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES; /* reject unknown flag values */ - if (args->flags & ~RADEON_GEM_USERPTR_READONLY) + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY)) return -EINVAL; /* readonly pages not tested on older hardware */ diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b20933fa35c..12e37b1ddc4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -538,6 +538,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) if (current->mm != gtt->usermm) return -EPERM; + if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { + /* check that we only pin down anonymous memory + to prevent problems with writeback */ + unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + struct vm_area_struct *vma; + vma = find_vma(gtt->usermm, gtt->userptr); + if (!vma || vma->vm_file || vma->vm_end < end) + return -EPERM; + } + do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f2093037..9720e1a3684 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create { * perform any operation. */ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) struct drm_radeon_gem_userptr { uint64_t addr; -- cgit v1.2.3-70-g09d2 From f1217ed09f827e42a49ffa6a5aab672aa6f57a65 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 27 Aug 2014 13:16:04 +0200 Subject: drm/ttm: move fpfn and lpfn into each placement v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to more fine grained specify where to place the buffer object. v2: rebased on drm-next, add bochs changes as well Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/ast/ast_drv.h | 2 +- drivers/gpu/drm/ast/ast_ttm.c | 20 ++-- drivers/gpu/drm/bochs/bochs.h | 2 +- drivers/gpu/drm/bochs/bochs_mm.c | 20 ++-- drivers/gpu/drm/cirrus/cirrus_drv.h | 2 +- drivers/gpu/drm/cirrus/cirrus_ttm.c | 17 ++-- drivers/gpu/drm/mgag200/mgag200_drv.h | 2 +- drivers/gpu/drm/mgag200/mgag200_ttm.c | 20 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 52 +++++++--- drivers/gpu/drm/nouveau/nouveau_bo.h | 4 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 9 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_object.c | 17 ++-- drivers/gpu/drm/qxl/qxl_ttm.c | 8 +- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 71 +++++++++----- drivers/gpu/drm/radeon/radeon_ttm.c | 25 ++--- drivers/gpu/drm/radeon/radeon_uvd.c | 8 +- drivers/gpu/drm/ttm/ttm_bo.c | 93 ++++++++---------- drivers/gpu/drm/ttm/ttm_bo_manager.c | 9 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 136 ++++++++++++++++---------- drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c | 22 +++-- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 3 +- include/drm/ttm/ttm_bo_api.h | 40 ++++---- include/drm/ttm/ttm_bo_driver.h | 3 +- 26 files changed, 346 insertions(+), 253 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 957d4fabf1e..cb91c2acc3c 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -316,7 +316,7 @@ struct ast_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index b8246227bab..8008ea0bc76 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -293,18 +293,22 @@ void ast_mm_fini(struct ast_private *ast) void ast_ttm_placement(struct ast_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; + bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int ast_bo_create(struct drm_device *dev, int size, int align, @@ -360,7 +364,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ast_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -383,7 +387,7 @@ int ast_bo_unpin(struct ast_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -407,7 +411,7 @@ int ast_bo_push_sysram(struct ast_bo *bo) ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 7eb52dd44b0..4f6e7b3a363 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -99,7 +99,7 @@ struct bochs_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 1728a1b0b81..2af30e7607d 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -257,20 +257,26 @@ void bochs_mm_fini(struct bochs_device *bochs) static void bochs_ttm_placement(struct bochs_bo *bo, int domain) { + unsigned i; u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) { - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED + bo->placements[c++].flags = TTM_PL_FLAG_WC + | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; } if (domain & TTM_PL_FLAG_SYSTEM) { - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING + | TTM_PL_FLAG_SYSTEM; } if (!c) { - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING + | TTM_PL_FLAG_SYSTEM; + } + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; } bo->placement.num_placement = c; bo->placement.num_busy_placement = c; @@ -294,7 +300,7 @@ int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) bochs_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -319,7 +325,7 @@ int bochs_bo_unpin(struct bochs_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 401c890b6c6..dd2cfc9024a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -163,7 +163,7 @@ struct cirrus_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 92e6b778609..3e7d758330a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -298,18 +298,21 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int cirrus_bo_create(struct drm_device *dev, int size, int align, @@ -365,7 +368,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) cirrus_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -392,7 +395,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo) cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 80de23d9b9c..2e2b76aa4e1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -224,7 +224,7 @@ struct mgag200_bo { struct ttm_placement placement; struct ttm_bo_kmap_obj kmap; struct drm_gem_object gem; - u32 placements[3]; + struct ttm_place placements[3]; int pin_count; }; #define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 5a00e90696d..be883ef5a1d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -293,18 +293,22 @@ void mgag200_mm_fini(struct mga_device *mdev) void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) { u32 c = 0; - bo->placement.fpfn = 0; - bo->placement.lpfn = 0; + unsigned i; + bo->placement.placement = bo->placements; bo->placement.busy_placement = bo->placements; if (domain & TTM_PL_FLAG_VRAM) - bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; if (domain & TTM_PL_FLAG_SYSTEM) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!c) - bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; bo->placement.num_placement = c; bo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } } int mgag200_bo_create(struct drm_device *dev, int size, int align, @@ -361,7 +365,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) mgag200_ttm_placement(bo, pl_flag); for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -384,7 +388,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo) return 0; for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) return ret; @@ -408,7 +412,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo) mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); for (i = 0; i < bo->placement.num_placement ; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 01da508625f..0591ca0734e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -241,16 +241,16 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, } static void -set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) { *n = 0; if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; if (type & TTM_PL_FLAG_TT) - pl[(*n)++] = TTM_PL_FLAG_TT | flags; + pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; + pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; } static void @@ -258,6 +258,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; + unsigned i, fpfn, lpfn; if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && @@ -269,11 +270,19 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) * at the same time. */ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { - nvbo->placement.fpfn = vram_pages / 2; - nvbo->placement.lpfn = ~0; + fpfn = vram_pages / 2; + lpfn = ~0; } else { - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = vram_pages / 2; + fpfn = 0; + lpfn = vram_pages / 2; + } + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = fpfn; + nvbo->placements[i].lpfn = lpfn; + } + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = fpfn; + nvbo->busy_placements[i].lpfn = lpfn; } } } @@ -1041,12 +1050,15 @@ static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1074,12 +1086,15 @@ static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING + }; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; - placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; @@ -1294,7 +1309,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) struct nouveau_bo *nvbo = nouveau_bo(bo); struct nvif_device *device = &drm->device; u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT; - int ret; + int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. @@ -1319,9 +1334,16 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) bo->mem.start + bo->mem.num_pages < mappable) return 0; + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = 0; + nvbo->placements[i].lpfn = mappable; + } + + for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { + nvbo->busy_placements[i].fpfn = 0; + nvbo->busy_placements[i].lpfn = mappable; + } - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable; nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); return nouveau_bo_validate(nvbo, false, false); } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index ff17c1f432f..4ef88e84a69 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -9,8 +9,8 @@ struct nouveau_bo { struct ttm_buffer_object bo; struct ttm_placement placement; u32 valid_domains; - u32 placements[3]; - u32 busy_placements[3]; + struct ttm_place placements[3]; + struct ttm_place busy_placements[3]; struct ttm_bo_kmap_obj kmap; struct list_head head; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 53874b76b03..e81d086577c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -71,8 +71,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, static int nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(man->bdev); @@ -158,8 +157,7 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man, static int nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); @@ -239,8 +237,7 @@ nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) static int nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct nouveau_mem *node; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 36ed40ba773..f6022b70364 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -106,7 +106,7 @@ struct qxl_bo { /* Protected by gem.mutex */ struct list_head list; /* Protected by tbo.reserved */ - u32 placements[3]; + struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index b95f144f0b4..adad12d3037 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -55,21 +55,24 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; + unsigned i; - qbo->placement.fpfn = 0; - qbo->placement.lpfn = 0; qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; if (domain == QXL_GEM_DOMAIN_SURFACE) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; + qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; if (domain == QXL_GEM_DOMAIN_CPU) - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; if (!c) - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + qbo->placements[i].fpfn = 0; + qbo->placements[i].lpfn = 0; + } } @@ -259,7 +262,7 @@ int qxl_bo_unpin(struct qxl_bo *bo) if (bo->pin_count) return 0; for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (unlikely(r != 0)) dev_err(qdev->dev, "%p validate failed for unpin\n", bo); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 71a1baeac14..f66c59b222f 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -188,11 +188,13 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { struct qxl_bo *qbo; - static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + static struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + }; if (!qxl_ttm_bo_is_qxl_bo(bo)) { - placement->fpfn = 0; - placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b321ad4dcaf..bb01dab513d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -469,7 +469,7 @@ struct radeon_bo { struct list_head list; /* Protected by tbo.reserved */ u32 initial_domain; - u32 placements[3]; + struct ttm_place placements[3]; struct ttm_placement placement; struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 28752380798..0129c7efae3 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -97,40 +97,56 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) { u32 c = 0, i; - rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_VRAM; + if (domain & RADEON_GEM_DOMAIN_GTT) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_TT; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_TT; } } + if (domain & RADEON_GEM_DOMAIN_CPU) { if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_SYSTEM; + } else if ((rbo->flags & RADEON_GEM_GTT_WC) || rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | + rbo->placements[c++].flags = TTM_PL_FLAG_WC | + TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; } else { - rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_SYSTEM; } } if (!c) - rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + rbo->placements[c++].flags = TTM_PL_MASK_CACHING | + TTM_PL_FLAG_SYSTEM; + rbo->placement.num_placement = c; rbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + rbo->placements[i].fpfn = 0; + rbo->placements[i].lpfn = 0; + } + /* * Use two-ended allocation depending on the buffer size to * improve fragmentation quality. @@ -138,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) */ if (rbo->tbo.mem.size > 512 * 1024) { for (i = 0; i < c; i++) { - rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; + rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; } } } @@ -287,21 +303,22 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, return 0; } radeon_ttm_placement_from_domain(bo, domain); - if (domain == RADEON_GEM_DOMAIN_VRAM) { + for (i = 0; i < bo->placement.num_placement; i++) { + unsigned lpfn = 0; + /* force to pin into visible video ram */ - bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; - } - if (max_offset) { - u64 lpfn = max_offset >> PAGE_SHIFT; + if (bo->placements[i].flags & TTM_PL_FLAG_VRAM) + lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; + else + lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */ - if (!bo->placement.lpfn) - bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; + if (max_offset) + lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT)); - if (lpfn < bo->placement.lpfn) - bo->placement.lpfn = lpfn; + bo->placements[i].lpfn = lpfn; + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; @@ -333,8 +350,10 @@ int radeon_bo_unpin(struct radeon_bo *bo) bo->pin_count--; if (bo->pin_count) return 0; - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; + for (i = 0; i < bo->placement.num_placement; i++) { + bo->placements[i].lpfn = 0; + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + } r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) @@ -735,7 +754,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; r = ttm_bo_validate(bo, &rbo->placement, false, false); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 12e37b1ddc4..822eb363004 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -178,12 +178,15 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, static void radeon_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { + static struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM + }; + struct radeon_bo *rbo; - static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; if (!radeon_ttm_bo_is_radeon_bo(bo)) { - placement->fpfn = 0; - placement->lpfn = 0; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; @@ -286,20 +289,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; - u32 placements; + struct ttm_place placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { @@ -334,19 +337,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; - u32 placements; + struct ttm_place placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; - placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; + placements.fpfn = 0; + placements.lpfn = 0; + placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 341848a1437..25c8a1fd152 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -233,8 +233,12 @@ int radeon_uvd_resume(struct radeon_device *rdev) void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo) { - rbo->placement.fpfn = 0 >> PAGE_SHIFT; - rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + int i; + + for (i = 0; i < rbo->placement.num_placement; ++i) { + rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; + rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; + } } void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3da89d5dab6..b992ec3c318 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -53,12 +53,13 @@ static struct attribute ttm_bo_count = { .mode = S_IRUGO }; -static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) +static inline int ttm_mem_type_from_place(const struct ttm_place *place, + uint32_t *mem_type) { int i; for (i = 0; i <= TTM_PL_PRIV5; i++) - if (flags & (1 << i)) { + if (place->flags & (1 << i)) { *mem_type = i; return 0; } @@ -89,12 +90,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { - ret = ttm_mem_type_from_flags(placement->placement[i], + ret = ttm_mem_type_from_place(&placement->placement[i], &mem_type); if (ret) return; pr_err(" placement[%d]=0x%08X (%d)\n", - i, placement->placement[i], mem_type); + i, placement->placement[i].flags, mem_type); ttm_mem_type_debug(bo->bdev, mem_type); } } @@ -685,8 +686,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem.bus.io_reserved_vm = false; evict_mem.bus.io_reserved_count = 0; - placement.fpfn = 0; - placement.lpfn = 0; placement.num_placement = 0; placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); @@ -774,7 +773,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, uint32_t mem_type, - struct ttm_placement *placement, + const struct ttm_place *place, struct ttm_mem_reg *mem, bool interruptible, bool no_wait_gpu) @@ -784,7 +783,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, int ret; do { - ret = (*man->func->get_node)(man, bo, placement, 0, mem); + ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret != 0)) return ret; if (mem->mm_node) @@ -827,18 +826,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, uint32_t mem_type, - uint32_t proposed_placement, + const struct ttm_place *place, uint32_t *masked_placement) { uint32_t cur_flags = ttm_bo_type_flags(mem_type); - if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) + if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0) return false; - if ((proposed_placement & man->available_caching) == 0) + if ((place->flags & man->available_caching) == 0) return false; - cur_flags |= (proposed_placement & man->available_caching); + cur_flags |= (place->flags & man->available_caching); *masked_placement = cur_flags; return true; @@ -869,15 +868,14 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { - ret = ttm_mem_type_from_flags(placement->placement[i], - &mem_type); + const struct ttm_place *place = &placement->placement[i]; + + ret = ttm_mem_type_from_place(place, &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; - type_ok = ttm_bo_mt_compatible(man, - mem_type, - placement->placement[i], + type_ok = ttm_bo_mt_compatible(man, mem_type, place, &cur_flags); if (!type_ok) @@ -889,7 +887,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ - ttm_flag_masked(&cur_flags, placement->placement[i], + ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) @@ -897,8 +895,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (man->has_type && man->use_type) { type_found = true; - ret = (*man->func->get_node)(man, bo, placement, - cur_flags, mem); + ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret)) return ret; } @@ -916,17 +913,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return -EINVAL; for (i = 0; i < placement->num_busy_placement; ++i) { - ret = ttm_mem_type_from_flags(placement->busy_placement[i], - &mem_type); + const struct ttm_place *place = &placement->busy_placement[i]; + + ret = ttm_mem_type_from_place(place, &mem_type); if (ret) return ret; man = &bdev->man[mem_type]; if (!man->has_type) continue; - if (!ttm_bo_mt_compatible(man, - mem_type, - placement->busy_placement[i], - &cur_flags)) + if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) continue; cur_flags = ttm_bo_select_caching(man, bo->mem.placement, @@ -935,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ - ttm_flag_masked(&cur_flags, placement->busy_placement[i], + ttm_flag_masked(&cur_flags, place->flags, ~TTM_PL_MASK_MEMTYPE); if (mem_type == TTM_PL_SYSTEM) { @@ -945,7 +940,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, return 0; } - ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, + ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, interruptible, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; @@ -1006,20 +1001,27 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement, { int i; - if (mem->mm_node && placement->lpfn != 0 && - (mem->start < placement->fpfn || - mem->start + mem->num_pages > placement->lpfn)) - return false; - for (i = 0; i < placement->num_placement; i++) { - *new_flags = placement->placement[i]; + const struct ttm_place *heap = &placement->placement[i]; + if (mem->mm_node && heap->lpfn != 0 && + (mem->start < heap->fpfn || + mem->start + mem->num_pages > heap->lpfn)) + continue; + + *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && (*new_flags & mem->placement & TTM_PL_MASK_MEM)) return true; } for (i = 0; i < placement->num_busy_placement; i++) { - *new_flags = placement->busy_placement[i]; + const struct ttm_place *heap = &placement->busy_placement[i]; + if (mem->mm_node && heap->lpfn != 0 && + (mem->start < heap->fpfn || + mem->start + mem->num_pages > heap->lpfn)) + continue; + + *new_flags = heap->flags; if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && (*new_flags & mem->placement & TTM_PL_MASK_MEM)) return true; @@ -1037,11 +1039,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, uint32_t new_flags; lockdep_assert_held(&bo->resv->lock.base); - /* Check that range is valid */ - if (placement->lpfn || placement->fpfn) - if (placement->fpfn > placement->lpfn || - (placement->lpfn - placement->fpfn) < bo->num_pages) - return -EINVAL; /* * Check whether we need to move buffer. */ @@ -1070,15 +1067,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_validate); -int ttm_bo_check_placement(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - BUG_ON((placement->fpfn || placement->lpfn) && - (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); - - return 0; -} - int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, @@ -1147,15 +1135,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev, atomic_inc(&bo->glob->bo_count); drm_vma_node_reset(&bo->vma_node); - ret = ttm_bo_check_placement(bo, placement); - /* * For ttm_bo_type_device buffers, allocate * address space from the device. */ - if (likely(!ret) && - (bo->type == ttm_bo_type_device || - bo->type == ttm_bo_type_sg)) + if (bo->type == ttm_bo_type_device || + bo->type == ttm_bo_type_sg) ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, bo->mem.num_pages); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 9e103a4875c..964387fc5c8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -49,8 +49,7 @@ struct ttm_range_manager { static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; @@ -60,7 +59,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, unsigned long lpfn; int ret; - lpfn = placement->lpfn; + lpfn = place->lpfn; if (!lpfn) lpfn = man->size; @@ -68,13 +67,13 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (!node) return -ENOMEM; - if (flags & TTM_PL_FLAG_TOPDOWN) + if (place->flags & TTM_PL_FLAG_TOPDOWN) aflags = DRM_MM_CREATE_TOP; spin_lock(&rman->lock); ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, mem->page_alignment, 0, - placement->fpfn, lpfn, + place->fpfn, lpfn, DRM_MM_SEARCH_BEST, aflags); spin_unlock(&rman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 6327cfc3680..37c093c0c7b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -30,66 +30,101 @@ #include #include -static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | - TTM_PL_FLAG_CACHED; - -static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place vram_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED +}; -static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_CACHED; +static struct ttm_place vram_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place sys_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED +}; -static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | - TTM_PL_FLAG_CACHED; +static struct ttm_place sys_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | - TTM_PL_FLAG_CACHED | - TTM_PL_FLAG_NO_EVICT; +static struct ttm_place gmr_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +}; -static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | - TTM_PL_FLAG_CACHED; +static struct ttm_place gmr_ne_placement_flags = { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +}; -struct ttm_placement vmw_vram_placement = { +static struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, + .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED +}; + +struct ttm_placement vmw_vram_placement = { .num_placement = 1, .placement = &vram_placement_flags, .num_busy_placement = 1, .busy_placement = &vram_placement_flags }; -static uint32_t vram_gmr_placement_flags[] = { - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED +static struct ttm_place vram_gmr_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + } }; -static uint32_t gmr_vram_placement_flags[] = { - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED +static struct ttm_place gmr_vram_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + } }; struct ttm_placement vmw_vram_gmr_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 2, .placement = vram_gmr_placement_flags, .num_busy_placement = 1, .busy_placement = &gmr_placement_flags }; -static uint32_t vram_gmr_ne_placement_flags[] = { - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT +static struct ttm_place vram_gmr_ne_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | + TTM_PL_FLAG_NO_EVICT + } }; struct ttm_placement vmw_vram_gmr_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 2, .placement = vram_gmr_ne_placement_flags, .num_busy_placement = 1, @@ -97,8 +132,6 @@ struct ttm_placement vmw_vram_gmr_ne_placement = { }; struct ttm_placement vmw_vram_sys_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &vram_placement_flags, .num_busy_placement = 1, @@ -106,8 +139,6 @@ struct ttm_placement vmw_vram_sys_placement = { }; struct ttm_placement vmw_vram_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &vram_ne_placement_flags, .num_busy_placement = 1, @@ -115,8 +146,6 @@ struct ttm_placement vmw_vram_ne_placement = { }; struct ttm_placement vmw_sys_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &sys_placement_flags, .num_busy_placement = 1, @@ -124,24 +153,33 @@ struct ttm_placement vmw_sys_placement = { }; struct ttm_placement vmw_sys_ne_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .placement = &sys_ne_placement_flags, .num_busy_placement = 1, .busy_placement = &sys_ne_placement_flags }; -static uint32_t evictable_placement_flags[] = { - TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, - TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, - VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED +static struct ttm_place evictable_placement_flags[] = { + { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED + }, { + .fpfn = 0, + .lpfn = 0, + .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED + } }; struct ttm_placement vmw_evictable_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 4, .placement = evictable_placement_flags, .num_busy_placement = 1, @@ -149,8 +187,6 @@ struct ttm_placement vmw_evictable_placement = { }; struct ttm_placement vmw_srf_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .num_busy_placement = 2, .placement = &gmr_placement_flags, @@ -158,8 +194,6 @@ struct ttm_placement vmw_srf_placement = { }; struct ttm_placement vmw_mob_placement = { - .fpfn = 0, - .lpfn = 0, .num_placement = 1, .num_busy_placement = 1, .placement = &mob_placement_flags, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c index ed1d51006ab..914b375763d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c @@ -198,13 +198,19 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, { struct ttm_buffer_object *bo = &buf->base; struct ttm_placement placement; + struct ttm_place place; int ret = 0; if (pin) - placement = vmw_vram_ne_placement; + place = vmw_vram_ne_placement.placement[0]; else - placement = vmw_vram_placement; - placement.lpfn = bo->num_pages; + place = vmw_vram_placement.placement[0]; + place.lpfn = bo->num_pages; + + placement.num_placement = 1; + placement.placement = &place; + placement.num_busy_placement = 1; + placement.busy_placement = &place; ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) @@ -293,21 +299,23 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, */ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) { - uint32_t pl_flags; + struct ttm_place pl; struct ttm_placement placement; uint32_t old_mem_type = bo->mem.mem_type; int ret; lockdep_assert_held(&bo->resv->lock.base); - pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB + pl.fpfn = 0; + pl.lpfn = 0; + pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; if (pin) - pl_flags |= TTM_PL_FLAG_NO_EVICT; + pl.flags |= TTM_PL_FLAG_NO_EVICT; memset(&placement, 0, sizeof(placement)); placement.num_placement = 1; - placement.placement = &pl_flags; + placement.placement = &pl; ret = ttm_bo_validate(bo, &placement, false, true); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b031b48dbb3..0a474f391fa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -374,10 +374,16 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv, size_t size, struct vmw_dma_buffer **out) { struct vmw_dma_buffer *vmw_bo; - struct ttm_placement ne_placement = vmw_vram_ne_placement; + struct ttm_place ne_place = vmw_vram_ne_placement.placement[0]; + struct ttm_placement ne_placement; int ret; - ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + ne_placement.num_placement = 1; + ne_placement.placement = &ne_place; + ne_placement.num_busy_placement = 1; + ne_placement.busy_placement = &ne_place; + + ne_place.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; (void) ttm_write_lock(&vmw_priv->reservation_sem, false); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 26f8bdde352..170b61be1e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -46,8 +46,7 @@ struct vmwgfx_gmrid_man { static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem) { struct vmwgfx_gmrid_man *gman = diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 7526c5bf561..e3d39c80a09 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -45,12 +45,24 @@ struct ttm_bo_device; struct drm_mm_node; +/** + * struct ttm_place + * + * @fpfn: first valid page frame number to put the object + * @lpfn: last valid page frame number to put the object + * @flags: memory domain and caching flags for the object + * + * Structure indicating a possible place to put an object. + */ +struct ttm_place { + unsigned fpfn; + unsigned lpfn; + uint32_t flags; +}; /** * struct ttm_placement * - * @fpfn: first valid page frame number to put the object - * @lpfn: last valid page frame number to put the object * @num_placement: number of preferred placements * @placement: preferred placements * @num_busy_placement: number of preferred placements when need to evict buffer @@ -59,12 +71,10 @@ struct drm_mm_node; * Structure indicating the placement you request for an object. */ struct ttm_placement { - unsigned fpfn; - unsigned lpfn; - unsigned num_placement; - const uint32_t *placement; - unsigned num_busy_placement; - const uint32_t *busy_placement; + unsigned num_placement; + const struct ttm_place *placement; + unsigned num_busy_placement; + const struct ttm_place *busy_placement; }; /** @@ -518,20 +528,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo); -/** - * ttm_bo_check_placement - * - * @bo: the buffer object. - * @placement: placements - * - * Performs minimal validity checking on an intended change of - * placement flags. - * Returns - * -EINVAL: Intended change is invalid or not allowed. - */ -extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, - struct ttm_placement *placement); - /** * ttm_bo_init_mm * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 1d9f0f1ff52..5c8bb5699a6 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -208,8 +208,7 @@ struct ttm_mem_type_manager_func { */ int (*get_node)(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, - struct ttm_placement *placement, - uint32_t flags, + const struct ttm_place *place, struct ttm_mem_reg *mem); /** -- cgit v1.2.3-70-g09d2 From f2c24b83ae90292d315aa7ac029c6ce7929e01aa Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 2 Apr 2014 17:14:48 +0200 Subject: drm/ttm: flip the switch, and convert to dma_fence Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/nouveau/nouveau_bo.c | 48 ++------- drivers/gpu/drm/nouveau/nouveau_fence.c | 24 ++--- drivers/gpu/drm/nouveau/nouveau_fence.h | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 16 +-- drivers/gpu/drm/qxl/qxl_debugfs.c | 6 +- drivers/gpu/drm/qxl/qxl_drv.h | 2 - drivers/gpu/drm/qxl/qxl_kms.c | 1 - drivers/gpu/drm/qxl/qxl_object.h | 4 +- drivers/gpu/drm/qxl/qxl_release.c | 3 - drivers/gpu/drm/qxl/qxl_ttm.c | 104 ------------------- drivers/gpu/drm/radeon/radeon_cs.c | 10 +- drivers/gpu/drm/radeon/radeon_display.c | 2 +- drivers/gpu/drm/radeon/radeon_mn.c | 6 +- drivers/gpu/drm/radeon/radeon_object.c | 4 +- drivers/gpu/drm/radeon/radeon_ttm.c | 34 +----- drivers/gpu/drm/radeon/radeon_uvd.c | 6 +- drivers/gpu/drm/radeon/radeon_vm.c | 15 ++- drivers/gpu/drm/ttm/ttm_bo.c | 171 +++++++++++++++++++++---------- drivers/gpu/drm/ttm/ttm_bo_util.c | 23 +---- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 40 -------- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 14 +-- include/drm/ttm/ttm_bo_api.h | 2 - include/drm/ttm/ttm_bo_driver.h | 26 +---- include/drm/ttm/ttm_execbuf_util.h | 10 +- 25 files changed, 197 insertions(+), 386 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2d026c81ca1..6cf7db070fa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -88,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) static void nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, - struct nouveau_fence *fence) + struct fence *fence) { struct nouveau_drm *drm = nouveau_drm(dev); if (tile) { spin_lock(&drm->tile.lock); - tile->fence = nouveau_fence_ref(fence); + tile->fence = nouveau_fence_ref((struct nouveau_fence *)fence); tile->used = false; spin_unlock(&drm->tile.lock); } @@ -976,7 +976,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { - ret = ttm_bo_move_accel_cleanup(bo, fence, + ret = ttm_bo_move_accel_cleanup(bo, + &fence->base, evict, no_wait_gpu, new_mem); @@ -1167,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; + struct fence *fence = reservation_object_get_excl(bo->resv); - nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); + nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; } @@ -1455,47 +1457,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } -static void -nouveau_bo_fence_unref(void **sync_obj) -{ - nouveau_fence_unref((struct nouveau_fence **)sync_obj); -} - void nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) { struct reservation_object *resv = nvbo->bo.resv; - nouveau_bo_fence_unref(&nvbo->bo.sync_obj); - nvbo->bo.sync_obj = nouveau_fence_ref(fence); - reservation_object_add_excl_fence(resv, &fence->base); } -static void * -nouveau_bo_fence_ref(void *sync_obj) -{ - return nouveau_fence_ref(sync_obj); -} - -static bool -nouveau_bo_fence_signalled(void *sync_obj) -{ - return nouveau_fence_done(sync_obj); -} - -static int -nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) -{ - return nouveau_fence_wait(sync_obj, lazy, intr); -} - -static int -nouveau_bo_fence_flush(void *sync_obj) -{ - return 0; -} - struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, @@ -1506,11 +1475,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .move_notify = nouveau_bo_move_ntfy, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_bo_fence_signalled, - .sync_obj_wait = nouveau_bo_fence_wait, - .sync_obj_flush = nouveau_bo_fence_flush, - .sync_obj_unref = nouveau_bo_fence_unref, - .sync_obj_ref = nouveau_bo_fence_ref, .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 3beb3bf130e..5e7fa68bc43 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -185,17 +185,18 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) } void -nouveau_fence_work(struct nouveau_fence *fence, +nouveau_fence_work(struct fence *fence, void (*func)(void *), void *data) { struct nouveau_fence_work *work; - if (fence_is_signaled(&fence->base)) + if (fence_is_signaled(fence)) goto err; work = kmalloc(sizeof(*work), GFP_KERNEL); if (!work) { - WARN_ON(nouveau_fence_wait(fence, false, false)); + WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence, + false, false)); goto err; } @@ -203,7 +204,7 @@ nouveau_fence_work(struct nouveau_fence *fence, work->func = func; work->data = data; - if (fence_add_callback(&fence->base, &work->cb, nouveau_fence_work_cb) < 0) + if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) goto err_free; return; @@ -349,14 +350,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan) struct reservation_object_list *fobj; int ret = 0, i; - fence = nvbo->bo.sync_obj; - if (fence && fence_is_signaled(fence)) { - nouveau_fence_unref((struct nouveau_fence **) - &nvbo->bo.sync_obj); - fence = NULL; - } + fence = reservation_object_get_excl(resv); - if (fence) { + if (fence && !fence_is_signaled(fence)) { struct nouveau_fence *f = from_fence(fence); struct nouveau_channel *prev = f->channel; @@ -370,12 +366,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan) if (ret) return ret; - fence = reservation_object_get_excl(resv); - if (fence && !nouveau_local_fence(fence, chan->drm)) - ret = fence_wait(fence, true); - fobj = reservation_object_get_list(resv); - if (!fobj || ret) + if (!fobj) return ret; for (i = 0; i < fobj->shared_count && !ret; ++i) { diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 44efd8c7426..0282e88274f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -26,7 +26,7 @@ void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); bool nouveau_fence_done(struct nouveau_fence *); -void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); +void nouveau_fence_work(struct fence *, void (*)(void *), void *); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d68c9656e40..a28b5102c4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -98,13 +98,12 @@ static void nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; - struct nouveau_fence *fence = NULL; + struct fence *fence = NULL; list_del(&vma->head); - if (mapped) { - fence = nouveau_fence_ref(nvbo->bo.sync_obj); - } + if (mapped) + fence = reservation_object_get_excl(nvbo->bo.resv); if (fence) { nouveau_fence_work(fence, nouveau_gem_object_delete, vma); @@ -114,7 +113,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) nouveau_vm_put(vma); kfree(vma); } - nouveau_fence_unref(&fence); } void @@ -874,8 +872,12 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); if (!ret) { ret = ttm_bo_wait(&nvbo->bo, true, true, true); - if (!no_wait && ret) - fence = nouveau_fence_ref(nvbo->bo.sync_obj); + if (!no_wait && ret) { + struct fence *excl; + + excl = reservation_object_get_excl(nvbo->bo.resv); + fence = nouveau_fence_ref((struct nouveau_fence *)excl); + } ttm_bo_unreserve(&nvbo->bo); } diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 0d144e0646d..a4a63fd8480 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -67,9 +67,9 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) rel = fobj ? fobj->shared_count : 0; rcu_read_unlock(); - seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n", - (unsigned long)bo->gem_base.size, bo->pin_count, - bo->tbo.sync_obj, rel); + seq_printf(m, "size %ld, pc %d, num releases %d\n", + (unsigned long)bo->gem_base.size, + bo->pin_count, rel); } spin_unlock(&qdev->release_lock); return 0; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 116eeae843b..a8be87632ca 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -280,9 +280,7 @@ struct qxl_device { uint8_t slot_gen_bits; uint64_t va_slot_mask; - /* XXX: when rcu becomes available, release_lock can be killed */ spinlock_t release_lock; - spinlock_t fence_lock; struct idr release_idr; uint32_t release_seqno; spinlock_t release_idr_lock; diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index a9e7c30e92c..7234561e09d 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -224,7 +224,6 @@ static int qxl_device_init(struct qxl_device *qdev, idr_init(&qdev->release_idr); spin_lock_init(&qdev->release_idr_lock); spin_lock_init(&qdev->release_lock); - spin_lock_init(&qdev->fence_lock); idr_init(&qdev->surf_id_idr); spin_lock_init(&qdev->surf_id_idr_lock); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 1edaf576808..37af1bc0dd0 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -78,8 +78,8 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, } if (mem_type) *mem_type = bo->tbo.mem.mem_type; - if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 9731d2540a4..15158c5a5b3 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -464,9 +464,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) bo = entry->bo; qbo = to_qxl_bo(bo); - if (!entry->bo->sync_obj) - entry->bo->sync_obj = qbo; - reservation_object_add_shared_fence(bo->resv, &release->base); ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 29e0a758ee6..abe945a04fd 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -357,105 +357,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); } -static bool qxl_sync_obj_signaled(void *sync_obj); - -static int qxl_sync_obj_wait(void *sync_obj, - bool lazy, bool interruptible) -{ - struct qxl_bo *bo = (struct qxl_bo *)sync_obj; - struct qxl_device *qdev = bo->gem_base.dev->dev_private; - struct reservation_object_list *fobj; - int count = 0, sc = 0, num_release = 0; - bool have_drawable_releases; - -retry: - if (sc == 0) { - if (bo->type == QXL_GEM_DOMAIN_SURFACE) - qxl_update_surface(qdev, bo); - } else if (sc >= 1) { - qxl_io_notify_oom(qdev); - } - - sc++; - - for (count = 0; count < 10; count++) { - if (qxl_sync_obj_signaled(sync_obj)) - return 0; - - if (!qxl_queue_garbage_collect(qdev, true)) - break; - } - - have_drawable_releases = false; - num_release = 0; - - spin_lock(&qdev->release_lock); - fobj = bo->tbo.resv->fence; - for (count = 0; fobj && count < fobj->shared_count; count++) { - struct qxl_release *release; - - release = container_of(fobj->shared[count], - struct qxl_release, base); - - if (fence_is_signaled(&release->base)) - continue; - - num_release++; - - if (release->type == QXL_RELEASE_DRAWABLE) - have_drawable_releases = true; - } - spin_unlock(&qdev->release_lock); - - qxl_queue_garbage_collect(qdev, true); - - if (have_drawable_releases || sc < 4) { - if (sc > 2) - /* back off */ - usleep_range(500, 1000); - if (have_drawable_releases && sc > 300) { - WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, num_release); - return -EBUSY; - } - goto retry; - } - return 0; -} - -static int qxl_sync_obj_flush(void *sync_obj) -{ - return 0; -} - -static void qxl_sync_obj_unref(void **sync_obj) -{ - *sync_obj = NULL; -} - -static void *qxl_sync_obj_ref(void *sync_obj) -{ - return sync_obj; -} - -static bool qxl_sync_obj_signaled(void *sync_obj) -{ - struct qxl_bo *qbo = (struct qxl_bo *)sync_obj; - struct qxl_device *qdev = qbo->gem_base.dev->dev_private; - struct reservation_object_list *fobj; - bool ret = true; - unsigned i; - - spin_lock(&qdev->release_lock); - fobj = qbo->tbo.resv->fence; - for (i = 0; fobj && i < fobj->shared_count; ++i) { - ret = fence_is_signaled(fobj->shared[i]); - if (!ret) - break; - } - spin_unlock(&qdev->release_lock); - return ret; -} - static void qxl_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { @@ -482,11 +383,6 @@ static struct ttm_bo_driver qxl_bo_driver = { .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, - .sync_obj_signaled = &qxl_sync_obj_signaled, - .sync_obj_wait = &qxl_sync_obj_wait, - .sync_obj_flush = &qxl_sync_obj_flush, - .sync_obj_unref = &qxl_sync_obj_unref, - .sync_obj_ref = &qxl_sync_obj_ref, .move_notify = &qxl_bo_move_notify, }; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index bd328cb6fa6..6e3d1c8f348 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -253,11 +253,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) int i; for (i = 0; i < p->nrelocs; i++) { + struct reservation_object *resv; + struct fence *fence; + if (!p->relocs[i].robj) continue; + resv = p->relocs[i].robj->tbo.resv; + fence = reservation_object_get_excl(resv); + radeon_semaphore_sync_to(p->ib.semaphore, - p->relocs[i].robj->tbo.sync_obj); + (struct radeon_fence *)fence); } } @@ -427,7 +433,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo ttm_eu_fence_buffer_objects(&parser->ticket, &parser->validated, - parser->ib.fence); + &parser->ib.fence->base); } else if (backoff) { ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7d0a7abdab2..bc894c17b2f 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -494,7 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, DRM_ERROR("failed to pin new rbo buffer before flip\n"); goto cleanup; } - work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); + work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); radeon_bo_unreserve(new_rbo); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 0157bc2f11f..a69bd441dd2 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -122,6 +122,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct radeon_bo *bo; + struct fence *fence; int r; bo = container_of(it, struct radeon_bo, mn_it); @@ -133,8 +134,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, continue; } - if (bo->tbo.sync_obj) { - r = radeon_fence_wait(bo->tbo.sync_obj, false); + fence = reservation_object_get_excl(bo->tbo.resv); + if (fence) { + r = radeon_fence_wait((struct radeon_fence *)fence, false); if (r) DRM_ERROR("(%d) failed to wait for user bo\n", r); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 378fe9ea4d4..aadbd36e64b 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -781,8 +781,8 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) return r; if (mem_type) *mem_type = bo->tbo.mem.mem_type; - if (bo->tbo.sync_obj) - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); + + r = ttm_bo_wait(&bo->tbo, true, true, no_wait); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 822eb363004..62d1f4d730a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -270,12 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); /* sync other rings */ - fence = bo->sync_obj; + fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv); r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ &fence); /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, (void *)fence, + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, no_wait_gpu, new_mem); radeon_fence_unref(&fence); return r; @@ -488,31 +488,6 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) -{ - return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); -} - -static int radeon_sync_obj_flush(void *sync_obj) -{ - return 0; -} - -static void radeon_sync_obj_unref(void **sync_obj) -{ - radeon_fence_unref((struct radeon_fence **)sync_obj); -} - -static void *radeon_sync_obj_ref(void *sync_obj) -{ - return radeon_fence_ref((struct radeon_fence *)sync_obj); -} - -static bool radeon_sync_obj_signaled(void *sync_obj) -{ - return radeon_fence_signaled((struct radeon_fence *)sync_obj); -} - /* * TTM backend functions. */ @@ -847,11 +822,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, - .sync_obj_signaled = &radeon_sync_obj_signaled, - .sync_obj_wait = &radeon_sync_obj_wait, - .sync_obj_flush = &radeon_sync_obj_flush, - .sync_obj_unref = &radeon_sync_obj_unref, - .sync_obj_ref = &radeon_sync_obj_ref, .move_notify = &radeon_bo_move_notify, .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5729e9bebd9..ba4f3891602 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -400,6 +400,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, { int32_t *msg, msg_type, handle; unsigned img_size = 0; + struct fence *f; void *ptr; int i, r; @@ -409,8 +410,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } - if (bo->tbo.sync_obj) { - r = radeon_fence_wait(bo->tbo.sync_obj, false); + f = reservation_object_get_excl(bo->tbo.resv); + if (f) { + r = radeon_fence_wait((struct radeon_fence *)f, false); if (r) { DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 3d9a6a036f8..671ee566aa5 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -424,7 +424,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, if (r) goto error; - ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); + ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); radeon_ib_free(rdev, &ib); return 0; @@ -693,8 +693,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, incr, R600_PTE_VALID); if (ib.length_dw != 0) { + struct fence *fence; + radeon_asic_vm_pad_ib(rdev, &ib); - radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); + + fence = reservation_object_get_excl(pd->tbo.resv); + radeon_semaphore_sync_to(ib.semaphore, + (struct radeon_fence *)fence); + radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); WARN_ON(ib.length_dw > ndw); r = radeon_ib_schedule(rdev, &ib, NULL, false); @@ -820,8 +826,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_bo *pt = vm->page_tables[pt_idx].bo; unsigned nptes; uint64_t pte; + struct fence *fence; - radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); + fence = reservation_object_get_excl(pt->tbo.resv); + radeon_semaphore_sync_to(ib->semaphore, + (struct radeon_fence *)fence); if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 195386f16ca..66707be386f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -40,6 +40,7 @@ #include #include #include +#include #define TTM_ASSERT_LOCKED(param) #define TTM_DEBUG(fmt, arg...) @@ -142,7 +143,6 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(atomic_read(&bo->list_kref.refcount)); BUG_ON(atomic_read(&bo->kref.refcount)); BUG_ON(atomic_read(&bo->cpu_writers)); - BUG_ON(bo->sync_obj != NULL); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); @@ -403,12 +403,30 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ww_mutex_unlock (&bo->resv->lock); } +static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) +{ + struct reservation_object_list *fobj; + struct fence *fence; + int i; + + fobj = reservation_object_get_list(bo->resv); + fence = reservation_object_get_excl(bo->resv); + if (fence && !fence->ops->signaled) + fence_enable_sw_signaling(fence); + + for (i = 0; fobj && i < fobj->shared_count; ++i) { + fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(bo->resv)); + + if (!fence->ops->signaled) + fence_enable_sw_signaling(fence); + } +} + static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; - struct ttm_bo_driver *driver = bdev->driver; - void *sync_obj = NULL; int put_count; int ret; @@ -416,9 +434,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ret = __ttm_bo_reserve(bo, false, true, false, NULL); if (!ret) { - (void) ttm_bo_wait(bo, false, false, true); - - if (!bo->sync_obj) { + if (!ttm_bo_wait(bo, false, false, true)) { put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -427,8 +443,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_list_ref_sub(bo, put_count, true); return; - } - sync_obj = driver->sync_obj_ref(bo->sync_obj); + } else + ttm_bo_flush_all_fences(bo); /* * Make NO_EVICT bos immediately available to @@ -447,14 +463,70 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); - if (sync_obj) { - driver->sync_obj_flush(sync_obj); - driver->sync_obj_unref(&sync_obj); - } schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); } +static int ttm_bo_unreserve_and_wait(struct ttm_buffer_object *bo, + bool interruptible) +{ + struct ttm_bo_global *glob = bo->glob; + struct reservation_object_list *fobj; + struct fence *excl = NULL; + struct fence **shared = NULL; + u32 shared_count = 0, i; + int ret = 0; + + fobj = reservation_object_get_list(bo->resv); + if (fobj && fobj->shared_count) { + shared = kmalloc(sizeof(*shared) * fobj->shared_count, + GFP_KERNEL); + + if (!shared) { + ret = -ENOMEM; + __ttm_bo_unreserve(bo); + spin_unlock(&glob->lru_lock); + return ret; + } + + for (i = 0; i < fobj->shared_count; ++i) { + if (!fence_is_signaled(fobj->shared[i])) { + fence_get(fobj->shared[i]); + shared[shared_count++] = fobj->shared[i]; + } + } + if (!shared_count) { + kfree(shared); + shared = NULL; + } + } + + excl = reservation_object_get_excl(bo->resv); + if (excl && !fence_is_signaled(excl)) + fence_get(excl); + else + excl = NULL; + + __ttm_bo_unreserve(bo); + spin_unlock(&glob->lru_lock); + + if (excl) { + ret = fence_wait(excl, interruptible); + fence_put(excl); + } + + if (shared_count > 0) { + for (i = 0; i < shared_count; ++i) { + if (!ret) + ret = fence_wait(shared[i], interruptible); + fence_put(shared[i]); + } + kfree(shared); + } + + return ret; +} + /** * function ttm_bo_cleanup_refs_and_unlock * If bo idle, remove from delayed- and lru lists, and unref. @@ -471,8 +543,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; struct ttm_bo_global *glob = bo->glob; int put_count; int ret; @@ -480,20 +550,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ret = ttm_bo_wait(bo, false, false, true); if (ret && !no_wait_gpu) { - void *sync_obj; - - /* - * Take a reference to the fence and unreserve, - * at this point the buffer should be dead, so - * no new sync objects can be attached. - */ - sync_obj = driver->sync_obj_ref(bo->sync_obj); - - __ttm_bo_unreserve(bo); - spin_unlock(&glob->lru_lock); - - ret = driver->sync_obj_wait(sync_obj, false, interruptible); - driver->sync_obj_unref(&sync_obj); + ret = ttm_bo_unreserve_and_wait(bo, interruptible); if (ret) return ret; @@ -1498,41 +1555,51 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); - int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { - struct ttm_bo_driver *driver = bo->bdev->driver; - void *sync_obj; - int ret = 0; - - lockdep_assert_held(&bo->resv->lock.base); + struct reservation_object_list *fobj; + struct reservation_object *resv; + struct fence *excl; + long timeout = 15 * HZ; + int i; - if (likely(bo->sync_obj == NULL)) - return 0; + resv = bo->resv; + fobj = reservation_object_get_list(resv); + excl = reservation_object_get_excl(resv); + if (excl) { + if (!fence_is_signaled(excl)) { + if (no_wait) + return -EBUSY; - if (bo->sync_obj) { - if (driver->sync_obj_signaled(bo->sync_obj)) { - driver->sync_obj_unref(&bo->sync_obj); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - return 0; + timeout = fence_wait_timeout(excl, + interruptible, timeout); } + } - if (no_wait) - return -EBUSY; + for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { + struct fence *fence; + fence = rcu_dereference_protected(fobj->shared[i], + reservation_object_held(resv)); - sync_obj = driver->sync_obj_ref(bo->sync_obj); - ret = driver->sync_obj_wait(sync_obj, - lazy, interruptible); + if (!fence_is_signaled(fence)) { + if (no_wait) + return -EBUSY; - if (likely(ret == 0)) { - clear_bit(TTM_BO_PRIV_FLAG_MOVING, - &bo->priv_flags); - driver->sync_obj_unref(&bo->sync_obj); + timeout = fence_wait_timeout(fence, + interruptible, timeout); } - driver->sync_obj_unref(&sync_obj); } - return ret; + + if (timeout < 0) + return timeout; + + if (timeout == 0) + return -EBUSY; + + reservation_object_add_excl_fence(resv, NULL); + clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + return 0; } EXPORT_SYMBOL(ttm_bo_wait); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 495aebf0f9c..824af90cbe3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -37,6 +37,7 @@ #include #include #include +#include void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { @@ -444,8 +445,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object **new_obj) { struct ttm_buffer_object *fbo; - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; int ret; fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); @@ -466,10 +465,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); - if (bo->sync_obj) - fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); - else - fbo->sync_obj = NULL; kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; @@ -642,28 +637,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - void *sync_obj, + struct fence *fence, bool evict, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_reg *old_mem = &bo->mem; int ret; struct ttm_buffer_object *ghost_obj; - void *tmp_obj = NULL; - if (bo->sync_obj) { - tmp_obj = bo->sync_obj; - bo->sync_obj = NULL; - } - bo->sync_obj = driver->sync_obj_ref(sync_obj); + reservation_object_add_excl_fence(bo->resv, fence); if (evict) { ret = ttm_bo_wait(bo, false, false, false); - if (tmp_obj) - driver->sync_obj_unref(&tmp_obj); if (ret) return ret; @@ -684,13 +671,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - if (tmp_obj) - driver->sync_obj_unref(&tmp_obj); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) return ret; + reservation_object_add_excl_fence(ghost_obj->resv, fence); + /** * If we're not moving to fixed memory, the TTM object * needs to stay alive. Otherwhise hang it on the ghost diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 108730e9147..adafc0f8ec0 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -163,7 +163,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, void *sync_obj) + struct list_head *list, struct fence *fence) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; @@ -183,18 +183,12 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { bo = entry->bo; - entry->old_sync_obj = bo->sync_obj; - bo->sync_obj = driver->sync_obj_ref(sync_obj); + reservation_object_add_excl_fence(bo->resv, fence); ttm_bo_add_to_lru(bo); __ttm_bo_unreserve(bo); } spin_unlock(&glob->lru_lock); if (ticket) ww_acquire_fini(ticket); - - list_for_each_entry(entry, list, head) { - if (entry->old_sync_obj) - driver->sync_obj_unref(&entry->old_sync_obj); - } } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 4a449b2528a..cff2bf9db9d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -801,41 +801,6 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; } -/** - * FIXME: We're using the old vmware polling method to sync. - * Do this with fences instead. - */ - -static void *vmw_sync_obj_ref(void *sync_obj) -{ - - return (void *) - vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); -} - -static void vmw_sync_obj_unref(void **sync_obj) -{ - vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); -} - -static int vmw_sync_obj_flush(void *sync_obj) -{ - vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); - return 0; -} - -static bool vmw_sync_obj_signaled(void *sync_obj) -{ - return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj); -} - -static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) -{ - return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, - lazy, interruptible, - VMW_FENCE_WAIT_TIMEOUT); -} - /** * vmw_move_notify - TTM move_notify_callback * @@ -873,11 +838,6 @@ struct ttm_bo_driver vmw_bo_driver = { .evict_flags = vmw_evict_flags, .move = NULL, .verify_access = vmw_verify_access, - .sync_obj_signaled = vmw_sync_obj_signaled, - .sync_obj_wait = vmw_sync_obj_wait, - .sync_obj_flush = vmw_sync_obj_flush, - .sync_obj_unref = vmw_sync_obj_unref, - .sync_obj_ref = vmw_sync_obj_ref, .move_notify = vmw_move_notify, .swap_notify = vmw_swap_notify, .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 67aebdb13b8..98d5afd9a9d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1420,22 +1420,16 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) { struct ttm_bo_device *bdev = bo->bdev; - struct vmw_fence_obj *old_fence_obj; + struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); if (fence == NULL) { vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + reservation_object_add_excl_fence(bo->resv, &fence->base); + fence_put(&fence->base); } else - vmw_fence_obj_reference(fence); - - reservation_object_add_excl_fence(bo->resv, &fence->base); - - old_fence_obj = bo->sync_obj; - bo->sync_obj = fence; - - if (old_fence_obj) - vmw_fence_obj_unreference(&old_fence_obj); + reservation_object_add_excl_fence(bo->resv, &fence->base); } /** diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 5805f4a4947..70b44917c36 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -173,7 +173,6 @@ struct ttm_tt; * @lru: List head for the lru list. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. - * @sync_obj: Pointer to a synchronization object. * @priv_flags: Flags describing buffer object internal state. * @vma_node: Address space manager node. * @offset: The current GPU offset, which can have different meanings @@ -240,7 +239,6 @@ struct ttm_buffer_object { * Members protected by a bo reservation. */ - void *sync_obj; unsigned long priv_flags; struct drm_vma_offset_node vma_node; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e1ee141e26c..142d752fc45 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -312,11 +312,6 @@ struct ttm_mem_type_manager { * @move: Callback for a driver to hook in accelerated functions to * move a buffer. * If set to NULL, a potentially slow memcpy() move is used. - * @sync_obj_signaled: See ttm_fence_api.h - * @sync_obj_wait: See ttm_fence_api.h - * @sync_obj_flush: See ttm_fence_api.h - * @sync_obj_unref: See ttm_fence_api.h - * @sync_obj_ref: See ttm_fence_api.h */ struct ttm_bo_driver { @@ -418,23 +413,6 @@ struct ttm_bo_driver { int (*verify_access) (struct ttm_buffer_object *bo, struct file *filp); - /** - * In case a driver writer dislikes the TTM fence objects, - * the driver writer can replace those with sync objects of - * his / her own. If it turns out that no driver writer is - * using these. I suggest we remove these hooks and plug in - * fences directly. The bo driver needs the following functionality: - * See the corresponding functions in the fence object API - * documentation. - */ - - bool (*sync_obj_signaled) (void *sync_obj); - int (*sync_obj_wait) (void *sync_obj, - bool lazy, bool interruptible); - int (*sync_obj_flush) (void *sync_obj); - void (*sync_obj_unref) (void **sync_obj); - void *(*sync_obj_ref) (void *sync_obj); - /* hook to notify driver about a driver move so it * can do tiling things */ void (*move_notify)(struct ttm_buffer_object *bo, @@ -1022,7 +1000,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); * ttm_bo_move_accel_cleanup. * * @bo: A pointer to a struct ttm_buffer_object. - * @sync_obj: A sync object that signals when moving is complete. + * @fence: A fence object that signals when moving is complete. * @evict: This is an evict move. Don't return until the buffer is idle. * @no_wait_gpu: Return immediately if the GPU is busy. * @new_mem: struct ttm_mem_reg indicating where to move. @@ -1036,7 +1014,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); */ extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - void *sync_obj, + struct fence *fence, bool evict, bool no_wait_gpu, struct ttm_mem_reg *new_mem); /** diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 8490cb8ee0d..ff11a424f75 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -39,16 +39,11 @@ * * @head: list head for thread-private list. * @bo: refcounted buffer object pointer. - * @reserved: Indicates whether @bo has been reserved for validation. - * @removed: Indicates whether @bo has been removed from lru lists. - * @put_count: Number of outstanding references on bo::list_kref. - * @old_sync_obj: Pointer to a sync object about to be unreferenced */ struct ttm_validate_buffer { struct list_head head; struct ttm_buffer_object *bo; - void *old_sync_obj; }; /** @@ -100,7 +95,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, * * @ticket: ww_acquire_ctx from reserve call * @list: thread private list of ttm_validate_buffer structs. - * @sync_obj: The new sync object for the buffers. + * @fence: The new exclusive fence for the buffers. * * This function should be called when command submission is complete, and * it will add a new sync object to bos pointed to by entries on @list. @@ -109,6 +104,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, */ extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, - struct list_head *list, void *sync_obj); + struct list_head *list, + struct fence *fence); #endif -- cgit v1.2.3-70-g09d2 From 57d20a43c9b30663bdbacde8294a902edef35a84 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Sep 2014 20:01:53 +0200 Subject: drm/radeon: add the infrastructure for concurrent buffer access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows us to specify if we want to sync to the shared fences of a reservation object or not. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 25 ++++++----- drivers/gpu/drm/radeon/cik_sdma.c | 25 ++++++----- drivers/gpu/drm/radeon/evergreen_dma.c | 24 +++++----- drivers/gpu/drm/radeon/r100.c | 21 +++++---- drivers/gpu/drm/radeon/r200.c | 21 +++++---- drivers/gpu/drm/radeon/r600.c | 23 +++++----- drivers/gpu/drm/radeon/r600_dma.c | 25 ++++++----- drivers/gpu/drm/radeon/radeon.h | 43 +++++++++--------- drivers/gpu/drm/radeon/radeon_asic.h | 74 ++++++++++++++++--------------- drivers/gpu/drm/radeon/radeon_benchmark.c | 30 ++++++------- drivers/gpu/drm/radeon/radeon_cs.c | 8 +--- drivers/gpu/drm/radeon/radeon_ib.c | 2 +- drivers/gpu/drm/radeon/radeon_semaphore.c | 38 ++++++++++++++-- drivers/gpu/drm/radeon/radeon_test.c | 24 +++++++--- drivers/gpu/drm/radeon/radeon_ttm.c | 12 ++--- drivers/gpu/drm/radeon/radeon_vm.c | 16 ++----- drivers/gpu/drm/radeon/rv770_dma.c | 25 ++++++----- drivers/gpu/drm/radeon/si_dma.c | 25 ++++++----- 18 files changed, 253 insertions(+), 208 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 1f598ab3b9a..0b5a230d8b9 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3959,18 +3959,19 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev, * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the CP DMA engine (CIK+). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int cik_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.blit_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes, control; @@ -3980,7 +3981,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -3989,10 +3990,10 @@ int cik_copy_cpdma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -4014,17 +4015,17 @@ int cik_copy_cpdma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /* diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 192278bc993..c01a6100c31 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -537,18 +537,19 @@ void cik_sdma_fini(struct radeon_device *rdev) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (CIK). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int cik_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes; @@ -558,7 +559,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -567,10 +568,10 @@ int cik_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -589,17 +590,17 @@ int cik_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /** diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index afaba388c36..946f37d0b46 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -104,12 +104,14 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -119,7 +121,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -128,10 +130,10 @@ int evergreen_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -148,17 +150,17 @@ int evergreen_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } /** diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 4c5ec44ff32..c6b486f888d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -855,13 +855,14 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, return false; } -int r100_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_fence *fence; uint32_t cur_pages; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; uint32_t pitch; @@ -882,7 +883,7 @@ int r100_copy_blit(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ring, ndw); if (r) { DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); - return -EINVAL; + return ERR_PTR(-EINVAL); } while (num_gpu_pages > 0) { cur_pages = num_gpu_pages; @@ -922,11 +923,13 @@ int r100_copy_blit(struct radeon_device *rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_HOST_IDLECLEAN | RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); + r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - return r; + return fence; } static int r100_cp_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 67780374a65..732d4938aab 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -80,13 +80,14 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) return vtx_size; } -int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; + struct radeon_fence *fence; uint32_t size; uint32_t cur_size; int i, num_loops; @@ -98,7 +99,7 @@ int r200_copy_dma(struct radeon_device *rdev, r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } /* Must wait for 2D idle & clean before DMA or hangs might happen */ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); @@ -118,11 +119,13 @@ int r200_copy_dma(struct radeon_device *rdev, } radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); + r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a95ced569d8..5e9146b968b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2894,12 +2894,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int r600_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.blit_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes, tmp; @@ -2909,7 +2910,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -2918,10 +2919,10 @@ int r600_copy_cpdma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); @@ -2948,17 +2949,17 @@ int r600_copy_cpdma(struct radeon_device *rdev, radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } int r600_set_surface_reg(struct radeon_device *rdev, int reg, diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 51fd98553ea..fc54224ce87 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -436,18 +436,19 @@ void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (r6xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -457,7 +458,7 @@ int r600_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -466,10 +467,10 @@ int r600_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -486,15 +487,15 @@ int r600_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 79c988db79a..2e41dc12cd0 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -585,8 +585,11 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, struct radeon_semaphore *semaphore); -void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, - struct radeon_fence *fence); +void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore, + struct radeon_fence *fence); +void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore, + struct reservation_object *resv, + bool shared); int radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int waiting_ring); @@ -1855,24 +1858,24 @@ struct radeon_asic { } display; /* copy functions for bo handling */ struct { - int (*blit)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*blit)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 blit_ring_index; - int (*dma)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*dma)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 dma_ring_index; /* method used for bo copy */ - int (*copy)(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); + struct radeon_fence *(*copy)(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); /* ring used for bo copies */ u32 copy_ring_index; } copy; @@ -2833,9 +2836,9 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) #define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m)) #define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence)) #define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) -#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) -#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) -#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) +#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv)) +#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) +#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) #define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index #define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index #define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 987a3b713e0..ca01bb8ea21 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -81,11 +81,11 @@ bool r100_semaphore_ring_emit(struct radeon_device *rdev, int r100_cs_parse(struct radeon_cs_parser *p); void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); -int r100_copy_blit(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *r100_copy_blit(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); int r100_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t tiling_flags, uint32_t pitch, uint32_t offset, uint32_t obj_size); @@ -153,11 +153,11 @@ void r100_ring_hdp_flush(struct radeon_device *rdev, /* * r200,rv250,rs300,rv280 */ -extern int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void r200_set_safe_registers(struct radeon_device *rdev); /* @@ -341,12 +341,14 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); -int r600_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence **fence); -int r600_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, struct radeon_fence **fence); +struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); +struct radeon_fence *r600_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); @@ -462,10 +464,10 @@ bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc); void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); void r700_cp_stop(struct radeon_device *rdev); void r700_cp_fini(struct radeon_device *rdev); -int rv770_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); u32 rv770_get_xclk(struct radeon_device *rdev); int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int rv770_get_temp(struct radeon_device *rdev); @@ -536,10 +538,10 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int evergreen_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); int evergreen_get_temp(struct radeon_device *rdev); @@ -701,10 +703,10 @@ int si_vm_init(struct radeon_device *rdev); void si_vm_fini(struct radeon_device *rdev); void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); void si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, @@ -760,14 +762,14 @@ bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_semaphore *semaphore, bool emit_wait); void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); -int cik_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); -int cik_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence); +struct radeon_fence *cik_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); +struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv); int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 69f5695bdab..1e8855060fc 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -45,33 +45,29 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size, for (i = 0; i < n; i++) { switch (flag) { case RADEON_BENCHMARK_COPY_DMA: - r = radeon_copy_dma(rdev, saddr, daddr, - size / RADEON_GPU_PAGE_SIZE, - &fence); + fence = radeon_copy_dma(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, + NULL); break; case RADEON_BENCHMARK_COPY_BLIT: - r = radeon_copy_blit(rdev, saddr, daddr, - size / RADEON_GPU_PAGE_SIZE, - &fence); + fence = radeon_copy_blit(rdev, saddr, daddr, + size / RADEON_GPU_PAGE_SIZE, + NULL); break; default: DRM_ERROR("Unknown copy method\n"); - r = -EINVAL; + return -EINVAL; } - if (r) - goto exit_do_move; + if (IS_ERR(fence)) + return PTR_ERR(fence); + r = radeon_fence_wait(fence, false); - if (r) - goto exit_do_move; radeon_fence_unref(&fence); + if (r) + return r; } end_jiffies = jiffies; - r = jiffies_to_msecs(end_jiffies - start_jiffies); - -exit_do_move: - if (fence) - radeon_fence_unref(&fence); - return r; + return jiffies_to_msecs(end_jiffies - start_jiffies); } diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index cd517ab9360..ec4840cb8a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -255,16 +255,12 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) for (i = 0; i < p->nrelocs; i++) { struct reservation_object *resv; - struct fence *fence; if (!p->relocs[i].robj) continue; resv = p->relocs[i].robj->tbo.resv; - fence = reservation_object_get_excl(resv); - - radeon_semaphore_sync_to(p->ib.semaphore, - (struct radeon_fence *)fence); + radeon_semaphore_sync_resv(p->ib.semaphore, resv, false); } } @@ -569,7 +565,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, goto out; } radeon_cs_sync_rings(parser); - radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence); + radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence); if ((rdev->family >= CHIP_TAHITI) && (parser->chunk_const_ib_idx != -1)) { diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 6fc7461d70c..3f39fcca4d0 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -145,7 +145,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, if (ib->vm) { struct radeon_fence *vm_id_fence; vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); - radeon_semaphore_sync_to(ib->semaphore, vm_id_fence); + radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence); } /* sync with other rings */ diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 56d9fd66d8a..5bfbd8372a3 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -96,15 +96,15 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, } /** - * radeon_semaphore_sync_to - use the semaphore to sync to a fence + * radeon_semaphore_sync_fence - use the semaphore to sync to a fence * * @semaphore: semaphore object to add fence to * @fence: fence to sync to * * Sync to the fence using this semaphore object */ -void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, - struct radeon_fence *fence) +void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore, + struct radeon_fence *fence) { struct radeon_fence *other; @@ -115,6 +115,38 @@ void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore, semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other); } +/** + * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object + * + * @sema: semaphore object to add fence from reservation object to + * @resv: reservation object with embedded fence + * @shared: true if we should onyl sync to the exclusive fence + * + * Sync to the fence using this semaphore object + */ +void radeon_semaphore_sync_resv(struct radeon_semaphore *sema, + struct reservation_object *resv, + bool shared) +{ + struct reservation_object_list *flist; + struct fence *f; + unsigned i; + + /* always sync to the exclusive fence */ + f = reservation_object_get_excl(resv); + radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f); + + flist = reservation_object_get_list(resv); + if (shared || !flist) + return; + + for (i = 0; i < flist->shared_count; ++i) { + f = rcu_dereference_protected(flist->shared[i], + reservation_object_held(resv)); + radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f); + } +} + /** * radeon_semaphore_sync_rings - sync ring to all registered fences * diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 17bc3dced9f..ce943e1a5e5 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -116,11 +116,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) radeon_bo_kunmap(gtt_obj[i]); if (ring == R600_RING_TYPE_DMA_INDEX) - r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); + fence = radeon_copy_dma(rdev, gtt_addr, vram_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); else - r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); - if (r) { + fence = radeon_copy_blit(rdev, gtt_addr, vram_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); + if (IS_ERR(fence)) { DRM_ERROR("Failed GTT->VRAM copy %d\n", i); + r = PTR_ERR(fence); goto out_lclean_unpin; } @@ -162,11 +167,16 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) radeon_bo_kunmap(vram_obj); if (ring == R600_RING_TYPE_DMA_INDEX) - r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); + fence = radeon_copy_dma(rdev, vram_addr, gtt_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); else - r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); - if (r) { + fence = radeon_copy_blit(rdev, vram_addr, gtt_addr, + size / RADEON_GPU_PAGE_SIZE, + NULL); + if (IS_ERR(fence)) { DRM_ERROR("Failed VRAM->GTT copy %d\n", i); + r = PTR_ERR(fence); goto out_lclean_unpin; } @@ -222,7 +232,7 @@ out_lclean: radeon_bo_unreserve(gtt_obj[i]); radeon_bo_unref(>t_obj[i]); } - if (fence) + if (fence && !IS_ERR(fence)) radeon_fence_unref(&fence); break; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 62d1f4d730a..eca2ce60d44 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -233,6 +233,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, struct radeon_device *rdev; uint64_t old_start, new_start; struct radeon_fence *fence; + unsigned num_pages; int r, ridx; rdev = radeon_get_rdev(bo->bdev); @@ -269,12 +270,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); - /* sync other rings */ - fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv); - r = radeon_copy(rdev, old_start, new_start, - new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ - &fence); - /* FIXME: handle copy error */ + num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); + fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); + if (IS_ERR(fence)) + return PTR_ERR(fence); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, no_wait_gpu, new_mem); radeon_fence_unref(&fence); diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 1cce4468cd7..ce870959dff 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -696,15 +696,10 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, incr, R600_PTE_VALID); if (ib.length_dw != 0) { - struct fence *fence; - radeon_asic_vm_pad_ib(rdev, &ib); - fence = reservation_object_get_excl(pd->tbo.resv); - radeon_semaphore_sync_to(ib.semaphore, - (struct radeon_fence *)fence); - - radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); + radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false); + radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use); WARN_ON(ib.length_dw > ndw); r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { @@ -829,11 +824,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, struct radeon_bo *pt = vm->page_tables[pt_idx].bo; unsigned nptes; uint64_t pte; - struct fence *fence; - fence = reservation_object_get_excl(pt->tbo.resv); - radeon_semaphore_sync_to(ib->semaphore, - (struct radeon_fence *)fence); + radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false); if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; @@ -975,7 +967,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, radeon_asic_vm_pad_ib(rdev, &ib); WARN_ON(ib.length_dw > ndw); - radeon_semaphore_sync_to(ib.semaphore, vm->fence); + radeon_semaphore_sync_fence(ib.semaphore, vm->fence); r = radeon_ib_schedule(rdev, &ib, NULL, false); if (r) { radeon_ib_free(rdev, &ib); diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index 74426ac2bb5..c112764adfd 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -33,18 +33,19 @@ * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (r7xx). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int rv770_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_dw, cur_size_in_dw; @@ -54,7 +55,7 @@ int rv770_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; @@ -63,10 +64,10 @@ int rv770_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -83,15 +84,15 @@ int rv770_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_dw * 4; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 7c22baaf94d..9b0dfbc913f 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -218,18 +218,19 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) * @src_offset: src GPU address * @dst_offset: dst GPU address * @num_gpu_pages: number of GPU pages to xfer - * @fence: radeon fence object + * @resv: reservation object to sync to * * Copy GPU paging using the DMA engine (SI). * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int si_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.dma_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes; @@ -239,7 +240,7 @@ int si_copy_dma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -248,10 +249,10 @@ int si_copy_dma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); for (i = 0; i < num_loops; i++) { @@ -268,16 +269,16 @@ int si_copy_dma(struct radeon_device *rdev, dst_offset += cur_size_in_bytes; } - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } -- cgit v1.2.3-70-g09d2 From 3840a656f61fdc504f1b0c6617f6af800d551efe Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 17 Sep 2014 04:00:05 -0600 Subject: drm/radeon: fix AGP userptr handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AGP mappings are not cache coherent, so userptr support won't work. Additional to that the AGP implementation uses a different ttm_tt container structure so we run into problems if we cast the pointer without checking if it's the right type. Signed-off-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_ttm.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index eca2ce60d44..d73ea9c0422 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -675,10 +675,17 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, return >t->ttm.ttm; } +static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) +{ + if (!ttm || ttm->func != &radeon_backend_func) + return NULL; + return (struct radeon_ttm_tt *)ttm; +} + static int radeon_ttm_tt_populate(struct ttm_tt *ttm) { + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = (void *)ttm; unsigned i; int r; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -686,7 +693,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0; - if (gtt->userptr) { + if (gtt && gtt->userptr) { ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; @@ -741,11 +748,11 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct radeon_device *rdev; - struct radeon_ttm_tt *gtt = (void *)ttm; + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); - if (gtt->userptr) { + if (gtt && gtt->userptr) { kfree(ttm->sg); ttm->page_flags &= ~TTM_PAGE_FLAG_SG; return; @@ -782,7 +789,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags) { - struct radeon_ttm_tt *gtt = (void *)ttm; + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); if (gtt == NULL) return -EINVAL; @@ -795,7 +802,7 @@ int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = (void *)ttm; + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); if (gtt == NULL) return false; @@ -805,7 +812,7 @@ bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) { - struct radeon_ttm_tt *gtt = (void *)ttm; + struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); if (gtt == NULL) return false; -- cgit v1.2.3-70-g09d2 From 884c6dabb0eafe7227f099c9e78e514191efaf13 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 23 Sep 2014 15:46:47 +0200 Subject: drm/: Don't call drm_mmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Really, the legacy buffer api should be dead, especially for all these newfangled drivers. I suspect this is copypasta from the transitioning days, which probably originated in radeon. Cc: "Christian König" Cc: David Herrmann Cc: Rashika Cc: Josh Triplett Cc: Daniel Vetter Cc: Fabian Frederick Cc: Gerd Hoffmann Cc: Alexandre Courbot Cc: Maarten Lankhorst Cc: Christian Engelmayer Signed-off-by: Daniel Vetter Reviewed-by: David Herrmann Acked-by: Ben Skeggs Acked-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/ast/ast_ttm.c | 2 +- drivers/gpu/drm/bochs/bochs_mm.c | 2 +- drivers/gpu/drm/cirrus/cirrus_ttm.c | 2 +- drivers/gpu/drm/mgag200/mgag200_ttm.c | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 5098c7dd435..c65d432f42c 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -427,7 +427,7 @@ int ast_mmap(struct file *filp, struct vm_area_struct *vma) struct ast_private *ast; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return drm_mmap(filp, vma); + return -EINVAL; file_priv = filp->private_data; ast = file_priv->minor->dev->dev_private; diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 2af30e7607d..324f5a09a0a 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -339,7 +339,7 @@ int bochs_mmap(struct file *filp, struct vm_area_struct *vma) struct bochs_device *bochs; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return drm_mmap(filp, vma); + return -EINVAL; file_priv = filp->private_data; bochs = file_priv->minor->dev->dev_private; diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 3e7d758330a..d3c615f9b18 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -411,7 +411,7 @@ int cirrus_mmap(struct file *filp, struct vm_area_struct *vma) struct cirrus_device *cirrus; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return drm_mmap(filp, vma); + return -EINVAL; file_priv = filp->private_data; cirrus = file_priv->minor->dev->dev_private; diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index be883ef5a1d..8ac70626df6 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -428,7 +428,7 @@ int mgag200_mmap(struct file *filp, struct vm_area_struct *vma) struct mga_device *mdev; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return drm_mmap(filp, vma); + return -EINVAL; file_priv = filp->private_data; mdev = file_priv->minor->dev->dev_private; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index e81d086577c..753a6def61e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -281,7 +281,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) - return drm_mmap(filp, vma); + return -EINVAL; return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index abe945a04fd..0cbc4c98716 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -127,7 +127,7 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma) if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", __func__, vma->vm_pgoff); - return drm_mmap(filp, vma); + return -EINVAL; } file_priv = filp->private_data; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d73ea9c0422..738a2f248b3 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -963,7 +963,7 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma) int r; if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { - return drm_mmap(filp, vma); + return -EINVAL; } file_priv = filp->private_data; -- cgit v1.2.3-70-g09d2 From 831b6966a60fe72d85ae3576056b4e4e0775b112 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 18 Sep 2014 14:11:56 +0200 Subject: drm/radeon: export reservation_object from dmabuf to ttm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds an extra argument to radeon_bo_create, which is only used in radeon_prime.c. Reviewed-by: Christian König Signed-off-by: Maarten Lankhorst Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 4 ++-- drivers/gpu/drm/radeon/evergreen.c | 6 +++--- drivers/gpu/drm/radeon/r600.c | 4 ++-- drivers/gpu/drm/radeon/radeon_benchmark.c | 4 ++-- drivers/gpu/drm/radeon/radeon_device.c | 2 +- drivers/gpu/drm/radeon/radeon_gart.c | 2 +- drivers/gpu/drm/radeon/radeon_gem.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 8 +++++--- drivers/gpu/drm/radeon/radeon_object.h | 1 + drivers/gpu/drm/radeon/radeon_prime.c | 5 ++++- drivers/gpu/drm/radeon/radeon_ring.c | 2 +- drivers/gpu/drm/radeon/radeon_sa.c | 2 +- drivers/gpu/drm/radeon/radeon_test.c | 5 +++-- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_uvd.c | 3 ++- drivers/gpu/drm/radeon/radeon_vce.c | 3 ++- drivers/gpu/drm/radeon/radeon_vm.c | 5 +++-- 17 files changed, 35 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c') diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index fc49a8d0fe0..d48a539b038 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -4687,7 +4687,7 @@ static int cik_mec_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &rdev->mec.hpd_eop_obj); if (r) { dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); @@ -4858,7 +4858,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) sizeof(struct bonaire_mqd), PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 0, NULL, - &rdev->ring[idx].mqd_obj); + NULL, &rdev->ring[idx].mqd_obj); if (r) { dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); return r; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c09e40a0d3d..8fe9f870fb5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4022,7 +4022,7 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.save_restore_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.save_restore_obj); + NULL, &rdev->rlc.save_restore_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); return r; @@ -4101,7 +4101,7 @@ int sumo_rlc_init(struct radeon_device *rdev) if (rdev->rlc.clear_state_obj == NULL) { r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.clear_state_obj); + NULL, &rdev->rlc.clear_state_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); sumo_rlc_fini(rdev); @@ -4178,7 +4178,7 @@ int sumo_rlc_init(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &rdev->rlc.cp_table_obj); + NULL, &rdev->rlc.cp_table_obj); if (r) { dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); sumo_rlc_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f8eb519c328..85414283fcc 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1430,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) if (rdev->vram_scratch.robj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &rdev->vram_scratch.robj); + 0, NULL, NULL, &rdev->vram_scratch.robj); if (r) { return r; } @@ -3368,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->ih.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 0, - NULL, &rdev->ih.ring_obj); + NULL, NULL, &rdev->ih.ring_obj); if (r) { DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 1e8855060fc..9e7f23dd14b 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -93,7 +93,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, int time; n = RADEON_BENCHMARK_ITERATIONS; - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj); if (r) { goto out_cleanup; } @@ -105,7 +105,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, if (r) { goto out_cleanup; } - r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); + r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e84a76e6656..6fbab158211 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -430,7 +430,7 @@ int radeon_wb_init(struct radeon_device *rdev) if (rdev->wb.wb_obj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &rdev->wb.wb_obj); if (r) { dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a053a0779aa..84146d5901a 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { r = radeon_bo_create(rdev, rdev->gart.table_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &rdev->gart.robj); + 0, NULL, NULL, &rdev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 4b7c8ec36c2..c194497aa58 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, retry: r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, - flags, NULL, &robj); + flags, NULL, NULL, &robj); if (r) { if (r != -ERESTARTSYS) { if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0e82f0223fd..99a960a4f30 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -167,8 +167,10 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) } int radeon_bo_create(struct radeon_device *rdev, - unsigned long size, int byte_align, bool kernel, u32 domain, - u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) + unsigned long size, int byte_align, bool kernel, + u32 domain, u32 flags, struct sg_table *sg, + struct reservation_object *resv, + struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; @@ -216,7 +218,7 @@ int radeon_bo_create(struct radeon_device *rdev, down_read(&rdev->pm.mclk_lock); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, - acc_size, sg, NULL, &radeon_ttm_bo_destroy); + acc_size, sg, resv, &radeon_ttm_bo_destroy); up_read(&rdev->pm.mclk_lock); if (unlikely(r != 0)) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 98a47fdf362..1b8ec791715 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -126,6 +126,7 @@ extern int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, + struct reservation_object *resv, struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index 171daf7fc48..f3609c97496 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -61,12 +61,15 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { + struct reservation_object *resv = attach->dmabuf->resv; struct radeon_device *rdev = dev->dev_private; struct radeon_bo *bo; int ret; + ww_mutex_lock(&resv->lock, NULL); ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false, - RADEON_GEM_DOMAIN_GTT, 0, sg, &bo); + RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); + ww_mutex_unlock(&resv->lock); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 6f2a9bd6bb5..3d17af34afa 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -383,7 +383,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig /* Allocate ring buffer */ if (ring->ring_obj == NULL) { r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, &ring->ring_obj); if (r) { dev_err(rdev->dev, "(%d) ring create failed\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index b84f97c8718..c507896aca4 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev, } r = radeon_bo_create(rdev, size, align, true, - domain, flags, NULL, &sa_manager->bo); + domain, flags, NULL, NULL, &sa_manager->bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index ce943e1a5e5..07b506b4100 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -67,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) } r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &vram_obj); + 0, NULL, NULL, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -87,7 +87,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag) struct radeon_fence *fence = NULL; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); + RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL, + gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 738a2f248b3..8624979afb6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -865,7 +865,7 @@ int radeon_ttm_init(struct radeon_device *rdev) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, + RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &rdev->stollen_vga_memory); if (r) { return r; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index ba4f3891602..11b66246925 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -141,7 +141,8 @@ int radeon_uvd_init(struct radeon_device *rdev) RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE + RADEON_GPU_PAGE_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, + NULL, &rdev->uvd.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index c7190aadbd8..9e85757d559 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -126,7 +126,8 @@ int radeon_vce_init(struct radeon_device *rdev) size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) + RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE; r = radeon_bo_create(rdev, size, PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo); + RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, + &rdev->vce.vcpu_bo); if (r) { dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); return r; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 8af1a94e744..4532cc76a0a 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -548,7 +548,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, RADEON_GPU_PAGE_SIZE, true, - RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt); + RADEON_GEM_DOMAIN_VRAM, 0, + NULL, NULL, &pt); if (r) return r; @@ -1127,7 +1128,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_create(rdev, pd_size, align, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, - &vm->page_directory); + NULL, &vm->page_directory); if (r) return r; -- cgit v1.2.3-70-g09d2