diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 152 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_lock.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 35 |
6 files changed, 125 insertions, 77 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1fbb2eea5e8..c7320ce4567 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) return -EINVAL; } -static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob, - struct ttm_mem_type_manager *man) +static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) { + struct ttm_mem_type_manager *man = &bdev->man[mem_type]; + printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); - printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size); + printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", man->available_caching); printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", man->default_caching); - spin_lock(&glob->lru_lock); - drm_mm_debug_table(&man->manager, TTM_PFX); - spin_unlock(&glob->lru_lock); + if (mem_type != TTM_PL_SYSTEM) { + spin_lock(&bdev->glob->lru_lock); + drm_mm_debug_table(&man->manager, TTM_PFX); + spin_unlock(&bdev->glob->lru_lock); + } } static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; - struct ttm_mem_type_manager *man; int i, ret, mem_type; - printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n", + printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", bo, bo->mem.num_pages, bo->mem.size >> 10, bo->mem.size >> 20); for (i = 0; i < placement->num_placement; i++) { @@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, &mem_type); if (ret) return; - man = &bdev->man[mem_type]; printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", i, placement->placement[i], mem_type); - ttm_mem_type_manager_debug(glob, man); + ttm_mem_type_debug(bo->bdev, mem_type); } } @@ -427,7 +426,8 @@ moved: bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; spin_unlock(&bo->lock); - } + } else + bo->offset = 0; return 0; @@ -465,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); + put_count = ttm_bo_del_from_lru(bo); + ret = ttm_bo_reserve_locked(bo, false, false, false, 0); BUG_ON(ret); if (bo->ttm) @@ -472,20 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) if (!list_empty(&bo->ddestroy)) { list_del_init(&bo->ddestroy); - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ++put_count; } if (bo->mem.mm_node) { bo->mem.mm_node->private = NULL; drm_mm_put_block(bo->mem.mm_node); bo->mem.mm_node = NULL; } - put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); atomic_set(&bo->reserved, 0); while (put_count--) - kref_put(&bo->list_kref, ttm_bo_release_list); + kref_put(&bo->list_kref, ttm_bo_ref_bug); return 0; } @@ -523,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { struct ttm_bo_global *glob = bdev->glob; - struct ttm_buffer_object *entry, *nentry; - struct list_head *list, *next; - int ret; + struct ttm_buffer_object *entry = NULL; + int ret = 0; spin_lock(&glob->lru_lock); - list_for_each_safe(list, next, &bdev->ddestroy) { - entry = list_entry(list, struct ttm_buffer_object, ddestroy); - nentry = NULL; + if (list_empty(&bdev->ddestroy)) + goto out_unlock; - /* - * Protect the next list entry from destruction while we - * unlock the lru_lock. - */ + entry = list_first_entry(&bdev->ddestroy, + struct ttm_buffer_object, ddestroy); + kref_get(&entry->list_kref); + + for (;;) { + struct ttm_buffer_object *nentry = NULL; - if (next != &bdev->ddestroy) { - nentry = list_entry(next, struct ttm_buffer_object, - ddestroy); + if (entry->ddestroy.next != &bdev->ddestroy) { + nentry = list_first_entry(&entry->ddestroy, + struct ttm_buffer_object, ddestroy); kref_get(&nentry->list_kref); } - kref_get(&entry->list_kref); spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(entry, remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); + entry = nentry; + + if (ret || !entry) + goto out; spin_lock(&glob->lru_lock); - if (nentry) { - bool next_onlist = !list_empty(next); - spin_unlock(&glob->lru_lock); - kref_put(&nentry->list_kref, ttm_bo_release_list); - spin_lock(&glob->lru_lock); - /* - * Someone might have raced us and removed the - * next entry from the list. We don't bother restarting - * list traversal. - */ - - if (!next_onlist) - break; - } - if (ret) + if (list_empty(&entry->ddestroy)) break; } - ret = !list_empty(&bdev->ddestroy); - spin_unlock(&glob->lru_lock); +out_unlock: + spin_unlock(&glob->lru_lock); +out: + if (entry) + kref_put(&entry->list_kref, ttm_bo_release_list); return ret; } @@ -684,19 +677,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo; int ret, put_count = 0; +retry: spin_lock(&glob->lru_lock); + if (list_empty(&man->lru)) { + spin_unlock(&glob->lru_lock); + return -EBUSY; + } + bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); kref_get(&bo->list_kref); - ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); - if (likely(ret == 0)) - put_count = ttm_bo_del_from_lru(bo); + + ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + if (likely(!no_wait)) + ret = ttm_bo_wait_unreserved(bo, interruptible); + + kref_put(&bo->list_kref, ttm_bo_release_list); + + /** + * We *need* to retry after releasing the lru lock. + */ + + if (unlikely(ret != 0)) + return ret; + goto retry; + } + + put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - if (unlikely(ret != 0)) - return ret; + + BUG_ON(ret != 0); + while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); + ret = ttm_bo_evict(bo, interruptible, no_wait); ttm_bo_unreserve(bo); + kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } @@ -849,7 +868,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, int i, ret; mem->mm_node = NULL; - for (i = 0; i <= placement->num_placement; ++i) { + for (i = 0; i < placement->num_placement; ++i) { ret = ttm_mem_type_from_flags(placement->placement[i], &mem_type); if (ret) @@ -900,8 +919,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!type_found) return -EINVAL; - for (i = 0; i <= placement->num_busy_placement; ++i) { - ret = ttm_mem_type_from_flags(placement->placement[i], + for (i = 0; i < placement->num_busy_placement; ++i) { + ret = ttm_mem_type_from_flags(placement->busy_placement[i], &mem_type); if (ret) return ret; @@ -911,7 +930,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (!ttm_bo_mt_compatible(man, bo->type == ttm_bo_type_user, mem_type, - placement->placement[i], + placement->busy_placement[i], &cur_flags)) continue; @@ -921,9 +940,17 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the memory placement flags to the current flags */ - ttm_flag_masked(&cur_flags, placement->placement[i], + ttm_flag_masked(&cur_flags, placement->busy_placement[i], ~TTM_PL_MASK_MEMTYPE); + + if (mem_type == TTM_PL_SYSTEM) { + mem->mem_type = mem_type; + mem->placement = cur_flags; + mem->mm_node = NULL; + return 0; + } + ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, interruptible, no_wait); if (ret == 0 && mem->mm_node) { @@ -993,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem) { int i; + struct drm_mm_node *node = mem->mm_node; + + if (node && placement->lpfn != 0 && + (node->start < placement->fpfn || + node->start + node->size > placement->lpfn)) + return -1; for (i = 0; i < placement->num_placement; i++) { if ((placement->placement[i] & mem->placement & @@ -1115,6 +1148,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; + bo->mem.size = num_pages << PAGE_SHIFT; bo->mem.mem_type = TTM_PL_SYSTEM; bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; @@ -1817,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * anyone tries to access a ttm page. */ + if (bo->bdev->driver->swap_notify) + bo->bdev->driver->swap_notify(bo); + ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); out: @@ -1837,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } +EXPORT_SYMBOL(ttm_bo_swapout_all); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2ecf7d0c64f..5ca37a58a98 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - uint32_t save_flags = old_mem->placement; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { @@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, TTM_PL_MASK_MEM); old_mem->mem_type = TTM_PL_SYSTEM; - save_flags = old_mem->placement; } ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); @@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); + return 0; } EXPORT_SYMBOL(ttm_bo_move_ttm); @@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, void *old_iomap; void *new_iomap; int ret; - uint32_t save_flags = old_mem->placement; unsigned long i; unsigned long page; unsigned long add = 0; @@ -270,7 +267,6 @@ out2: *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { ttm_tt_unbind(ttm); @@ -537,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_reg *old_mem = &bo->mem; int ret; - uint32_t save_flags = old_mem->placement; struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; @@ -598,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); + return 0; } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 609a85a4d85..668dbe8b8dd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, return -EFAULT; driver = bo->bdev->driver; - if (unlikely(driver->verify_access)) { + if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index f619ebcaa4e..3d172ef04ee 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c @@ -288,6 +288,7 @@ void ttm_suspend_unlock(struct ttm_lock *lock) wake_up_all(&lock->queue); spin_unlock(&lock->lock); } +EXPORT_SYMBOL(ttm_suspend_unlock); static bool __ttm_suspend_lock(struct ttm_lock *lock) { @@ -309,3 +310,4 @@ void ttm_suspend_lock(struct ttm_lock *lock) { wait_event(lock->queue, __ttm_suspend_lock(lock)); } +EXPORT_SYMBOL(ttm_suspend_lock); diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 1099abac824..75e9d6f86ba 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c @@ -109,8 +109,8 @@ struct ttm_ref_object { struct drm_hash_item hash; struct list_head head; struct kref kref; - struct ttm_base_object *obj; enum ttm_ref_type ref_type; + struct ttm_base_object *obj; struct ttm_object_file *tfile; }; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9c2b1cc5dba..3d47a2c1232 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -196,23 +196,34 @@ EXPORT_SYMBOL(ttm_tt_populate); #ifdef CONFIG_X86 static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_state) + enum ttm_caching_state c_old, + enum ttm_caching_state c_new) { + int ret = 0; + if (PageHighMem(p)) return 0; - switch (c_state) { - case tt_cached: - return set_pages_wb(p, 1); - case tt_wc: - return set_memory_wc((unsigned long) page_address(p), 1); - default: - return set_pages_uc(p, 1); + if (c_old != tt_cached) { + /* p isn't in the default caching state, set it to + * writeback first to free its current memtype. */ + + ret = set_pages_wb(p, 1); + if (ret) + return ret; } + + if (c_new == tt_wc) + ret = set_memory_wc((unsigned long) page_address(p), 1); + else if (c_new == tt_uncached) + ret = set_pages_uc(p, 1); + + return ret; } #else /* CONFIG_X86 */ static inline int ttm_tt_set_page_caching(struct page *p, - enum ttm_caching_state c_state) + enum ttm_caching_state c_old, + enum ttm_caching_state c_new) { return 0; } @@ -245,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages[i]; if (likely(cur_page != NULL)) { - ret = ttm_tt_set_page_caching(cur_page, c_state); + ret = ttm_tt_set_page_caching(cur_page, + ttm->caching_state, + c_state); if (unlikely(ret != 0)) goto out_err; } @@ -259,7 +272,7 @@ out_err: for (j = 0; j < i; ++j) { cur_page = ttm->pages[j]; if (likely(cur_page != NULL)) { - (void)ttm_tt_set_page_caching(cur_page, + (void)ttm_tt_set_page_caching(cur_page, c_state, ttm->caching_state); } } |