summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2014-08-27 13:16:04 +0200
committerChristian König <christian.koenig@amd.com>2014-08-27 13:16:04 +0200
commitf1217ed09f827e42a49ffa6a5aab672aa6f57a65 (patch)
treeab6a78bc2f7b0d42165eb647e13e87f92b97f149 /drivers/gpu/drm/radeon/radeon_object.c
parent484048db6b4890bc433aac7f5e32fdcf1b2b4786 (diff)
drm/ttm: move fpfn and lpfn into each placement v2
This allows us to more fine grained specify where to place the buffer object. v2: rebased on drm-next, add bochs changes as well Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c71
1 files changed, 45 insertions, 26 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 28752380798..0129c7efae3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -97,40 +97,56 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
u32 c = 0, i;
- rbo->placement.fpfn = 0;
- rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
+
if (domain & RADEON_GEM_DOMAIN_GTT) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
+
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
(rbo->rdev->flags & RADEON_IS_AGP)) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_TT;
} else {
- rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_TT;
}
}
+
if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
+
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ rbo->placements[c++].flags = TTM_PL_FLAG_WC |
+ TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_SYSTEM;
} else {
- rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
+ TTM_PL_FLAG_SYSTEM;
}
}
if (!c)
- rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
+ TTM_PL_FLAG_SYSTEM;
+
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
+ for (i = 0; i < c; ++i) {
+ rbo->placements[i].fpfn = 0;
+ rbo->placements[i].lpfn = 0;
+ }
+
/*
* Use two-ended allocation depending on the buffer size to
* improve fragmentation quality.
@@ -138,7 +154,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
*/
if (rbo->tbo.mem.size > 512 * 1024) {
for (i = 0; i < c; i++) {
- rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
+ rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
}
}
}
@@ -287,21 +303,22 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
- if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ unsigned lpfn = 0;
+
/* force to pin into visible video ram */
- bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- }
- if (max_offset) {
- u64 lpfn = max_offset >> PAGE_SHIFT;
+ if (bo->placements[i].flags & TTM_PL_FLAG_VRAM)
+ lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ else
+ lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */
- if (!bo->placement.lpfn)
- bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+ if (max_offset)
+ lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT));
- if (lpfn < bo->placement.lpfn)
- bo->placement.lpfn = lpfn;
+ bo->placements[i].lpfn = lpfn;
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -333,8 +350,10 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->pin_count--;
if (bo->pin_count)
return 0;
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ bo->placements[i].lpfn = 0;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ }
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
@@ -735,7 +754,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);