diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-10-25 17:30:53 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-25 17:30:53 +0100 |
commit | 0b9e31e9264f1bad89856afb96da1688292f13b4 (patch) | |
tree | 7a9e9b6456dce993efeed8734de0a15a1f16ae94 /drivers/gpu/drm/ttm | |
parent | cf82ff7ea7695b0e82ba07bc5e9f1bd03a74e1aa (diff) | |
parent | 964fe080d94db82a3268443e9b9ece4c60246414 (diff) |
Merge branch 'linus' into sched/core
Conflicts:
fs/proc/array.c
Merge reason: resolve conflict and queue up dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 295 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_global.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_memory.c | 508 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_module.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 104 |
7 files changed, 702 insertions, 275 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index c2b0d710d10..87c06252d46 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -44,6 +44,39 @@ static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); +static void ttm_bo_global_kobj_release(struct kobject *kobj); + +static struct attribute ttm_bo_count = { + .name = "bo_count", + .mode = S_IRUGO +}; + +static ssize_t ttm_bo_global_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + return snprintf(buffer, PAGE_SIZE, "%lu\n", + (unsigned long) atomic_read(&glob->bo_count)); +} + +static struct attribute *ttm_bo_global_attrs[] = { + &ttm_bo_count, + NULL +}; + +static struct sysfs_ops ttm_bo_global_ops = { + .show = &ttm_bo_global_show +}; + +static struct kobj_type ttm_bo_glob_kobj_type = { + .release = &ttm_bo_global_kobj_release, + .sysfs_ops = &ttm_bo_global_ops, + .default_attrs = ttm_bo_global_attrs +}; + static inline uint32_t ttm_bo_type_flags(unsigned type) { @@ -66,10 +99,11 @@ static void ttm_bo_release_list(struct kref *list_kref) if (bo->ttm) ttm_tt_destroy(bo->ttm); + atomic_dec(&bo->glob->bo_count); if (bo->destroy) bo->destroy(bo); else { - ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); + ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size); kfree(bo); } } @@ -106,7 +140,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) kref_get(&bo->list_kref); if (bo->ttm != NULL) { - list_add_tail(&bo->swap, &bdev->swap_lru); + list_add_tail(&bo->swap, &bo->glob->swap_lru); kref_get(&bo->list_kref); } } @@ -141,7 +175,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { @@ -153,9 +187,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, if (no_wait) return -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_wait_unreserved(bo, interruptible); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (unlikely(ret)) return ret; @@ -181,16 +215,16 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int put_count = 0; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, sequence); if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -200,13 +234,13 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, void ttm_bo_unreserve(struct ttm_buffer_object *bo) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ttm_bo_add_to_lru(bo); atomic_set(&bo->reserved, 0); wake_up_all(&bo->event_queue); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); @@ -217,6 +251,7 @@ EXPORT_SYMBOL(ttm_bo_unreserve); static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; uint32_t page_flags = 0; @@ -232,14 +267,14 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; case ttm_bo_type_kernel: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, - page_flags, bdev->dummy_read_page); + page_flags, glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; case ttm_bo_type_user: bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, page_flags | TTM_PAGE_FLAG_USER, - bdev->dummy_read_page); + glob->dummy_read_page); if (unlikely(bo->ttm == NULL)) ret = -ENOMEM; break; @@ -360,6 +395,7 @@ out_err: static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver = bdev->driver; int ret; @@ -371,7 +407,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); ret = ttm_bo_reserve_locked(bo, false, false, false, 0); BUG_ON(ret); if (bo->ttm) @@ -386,7 +422,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) bo->mem.mm_node = NULL; } put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); atomic_set(&bo->reserved, 0); @@ -396,14 +432,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) return 0; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bo->ddestroy)) { void *sync_obj = bo->sync_obj; void *sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); if (sync_obj) @@ -413,7 +449,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ret = 0; } else { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); ret = -EBUSY; } @@ -428,11 +464,12 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry, *nentry; struct list_head *list, *next; int ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); list_for_each_safe(list, next, &bdev->ddestroy) { entry = list_entry(list, struct ttm_buffer_object, ddestroy); nentry = NULL; @@ -449,16 +486,16 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) } kref_get(&entry->list_kref); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ret = ttm_bo_cleanup_refs(entry, remove_all); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (nentry) { bool next_onlist = !list_empty(next); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); kref_put(&nentry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); /* * Someone might have raced us and removed the * next entry from the list. We don't bother restarting @@ -472,7 +509,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) break; } ret = !list_empty(&bdev->ddestroy); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return ret; } @@ -522,6 +559,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, { int ret = 0; struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_reg evict_mem; uint32_t proposed_placement; @@ -570,12 +608,12 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, goto out; } - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (evict_mem.mm_node) { drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); bo->evicted = true; out: return ret; @@ -590,6 +628,7 @@ static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, uint32_t mem_type, bool interruptible, bool no_wait) { + struct ttm_bo_global *glob = bdev->glob; struct drm_mm_node *node; struct ttm_buffer_object *entry; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; @@ -603,7 +642,7 @@ retry_pre_get: if (unlikely(ret != 0)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); do { node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); @@ -624,7 +663,7 @@ retry_pre_get: if (likely(ret == 0)) put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); if (unlikely(ret != 0)) return ret; @@ -640,21 +679,21 @@ retry_pre_get: if (ret) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } while (1); if (!node) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return -ENOMEM; } node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); goto retry_pre_get; } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); mem->mm_node = node; mem->mem_type = mem_type; return 0; @@ -723,6 +762,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; struct ttm_mem_type_manager *man; uint32_t num_prios = bdev->driver->num_mem_type_prio; @@ -762,20 +802,20 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) return ret; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); if (unlikely(!node)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); break; } node = drm_mm_get_block_atomic(node, mem->num_pages, mem-> page_alignment); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } while (!node); } if (node) @@ -848,7 +888,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, uint32_t proposed_placement, bool interruptible, bool no_wait) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_global *glob = bo->glob; int ret = 0; struct ttm_mem_reg mem; @@ -884,9 +924,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, out_unlock: if (ret && mem.mm_node) { - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); drm_mm_put_block(mem.mm_node); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; } @@ -1022,6 +1062,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); bo->bdev = bdev; + bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.mem_type = TTM_PL_SYSTEM; @@ -1034,6 +1075,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, bo->seq_valid = false; bo->persistant_swap_storage = persistant_swap_storage; bo->acc_size = acc_size; + atomic_inc(&bo->glob->bo_count); ret = ttm_bo_check_placement(bo, flags, 0ULL); if (unlikely(ret != 0)) @@ -1072,13 +1114,13 @@ out_err: } EXPORT_SYMBOL(ttm_buffer_object_init); -static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, +static inline size_t ttm_bo_size(struct ttm_bo_global *glob, unsigned long num_pages) { size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; - return bdev->ttm_bo_size + 2 * page_array_size; + return glob->ttm_bo_size + 2 * page_array_size; } int ttm_buffer_object_create(struct ttm_bo_device *bdev, @@ -1093,18 +1135,18 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, { struct ttm_buffer_object *bo; int ret; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; size_t acc_size = - ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); - ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); + ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); + ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(bo == NULL)) { - ttm_mem_global_free(mem_glob, acc_size, false); + ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } @@ -1150,6 +1192,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, struct list_head *head, unsigned mem_type, bool allow_errors) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_buffer_object *entry; int ret; int put_count; @@ -1158,30 +1201,31 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, * Can't use standard list traversal since we're unlocking. */ - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (!list_empty(head)) { entry = list_first_entry(head, struct ttm_buffer_object, lru); kref_get(&entry->list_kref); ret = ttm_bo_reserve_locked(entry, false, false, false, 0); put_count = ttm_bo_del_from_lru(entry); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&entry->list_kref, ttm_bo_ref_bug); BUG_ON(ret); ret = ttm_bo_leave_list(entry, mem_type, allow_errors); ttm_bo_unreserve(entry); kref_put(&entry->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); return 0; } int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) { + struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man; int ret = -EINVAL; @@ -1204,13 +1248,13 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) if (mem_type > 0) { ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (drm_mm_clean(&man->manager)) drm_mm_takedown(&man->manager); else ret = -EBUSY; - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); } return ret; @@ -1284,11 +1328,82 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, } EXPORT_SYMBOL(ttm_bo_init_mm); +static void ttm_bo_global_kobj_release(struct kobject *kobj) +{ + struct ttm_bo_global *glob = + container_of(kobj, struct ttm_bo_global, kobj); + + ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink); + __free_page(glob->dummy_read_page); + kfree(glob); +} + +void ttm_bo_global_release(struct ttm_global_reference *ref) +{ + struct ttm_bo_global *glob = ref->object; + + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); +} +EXPORT_SYMBOL(ttm_bo_global_release); + +int ttm_bo_global_init(struct ttm_global_reference *ref) +{ + struct ttm_bo_global_ref *bo_ref = + container_of(ref, struct ttm_bo_global_ref, ref); + struct ttm_bo_global *glob = ref->object; + int ret; + + mutex_init(&glob->device_list_mutex); + spin_lock_init(&glob->lru_lock); + glob->mem_glob = bo_ref->mem_glob; + glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); + + if (unlikely(glob->dummy_read_page == NULL)) { + ret = -ENOMEM; + goto out_no_drp; + } + + INIT_LIST_HEAD(&glob->swap_lru); + INIT_LIST_HEAD(&glob->device_list); + + ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); + ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink); + if (unlikely(ret != 0)) { + printk(KERN_ERR TTM_PFX + "Could not register buffer object swapout.\n"); + goto out_no_shrink; + } + + glob->ttm_bo_extra_size = + ttm_round_pot(sizeof(struct ttm_tt)) + + ttm_round_pot(sizeof(struct ttm_backend)); + + glob->ttm_bo_size = glob->ttm_bo_extra_size + + ttm_round_pot(sizeof(struct ttm_buffer_object)); + + atomic_set(&glob->bo_count, 0); + + kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); + ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); + if (unlikely(ret != 0)) + kobject_put(&glob->kobj); + return ret; +out_no_shrink: + __free_page(glob->dummy_read_page); +out_no_drp: + kfree(glob); + return ret; +} +EXPORT_SYMBOL(ttm_bo_global_init); + + int ttm_bo_device_release(struct ttm_bo_device *bdev) { int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; + struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; @@ -1304,100 +1419,74 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) } } + mutex_lock(&glob->device_list_mutex); + list_del(&bdev->device_list); + mutex_unlock(&glob->device_list_mutex); + if (!cancel_delayed_work(&bdev->wq)) flush_scheduled_work(); while (ttm_bo_delayed_delete(bdev, true)) ; - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); if (list_empty(&bdev->ddestroy)) TTM_DEBUG("Delayed destroy list was clean\n"); if (list_empty(&bdev->man[0].lru)) TTM_DEBUG("Swap list was clean\n"); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); - ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); write_lock(&bdev->vm_lock); drm_mm_takedown(&bdev->addr_space_mm); write_unlock(&bdev->vm_lock); - __free_page(bdev->dummy_read_page); return ret; } EXPORT_SYMBOL(ttm_bo_device_release); -/* - * This function is intended to be called on drm driver load. - * If you decide to call it from firstopen, you must protect the call - * from a potentially racing ttm_bo_driver_finish in lastclose. - * (This may happen on X server restart). - */ - int ttm_bo_device_init(struct ttm_bo_device *bdev, - struct ttm_mem_global *mem_glob, - struct ttm_bo_driver *driver, uint64_t file_page_offset, + struct ttm_bo_global *glob, + struct ttm_bo_driver *driver, + uint64_t file_page_offset, bool need_dma32) { int ret = -EINVAL; - bdev->dummy_read_page = NULL; rwlock_init(&bdev->vm_lock); - spin_lock_init(&bdev->lru_lock); - bdev->driver = driver; - bdev->mem_glob = mem_glob; memset(bdev->man, 0, sizeof(bdev->man)); - bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); - if (unlikely(bdev->dummy_read_page == NULL)) { - ret = -ENOMEM; - goto out_err0; - } - /* * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); if (unlikely(ret != 0)) - goto out_err1; + goto out_no_sys; bdev->addr_space_rb = RB_ROOT; ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); if (unlikely(ret != 0)) - goto out_err2; + goto out_no_addr_mm; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); bdev->nice_mode = true; INIT_LIST_HEAD(&bdev->ddestroy); - INIT_LIST_HEAD(&bdev->swap_lru); bdev->dev_mapping = NULL; + bdev->glob = glob; bdev->need_dma32 = need_dma32; - ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); - ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); - if (unlikely(ret != 0)) { - printk(KERN_ERR TTM_PFX - "Could not register buffer object swapout.\n"); - goto out_err2; - } - bdev->ttm_bo_extra_size = - ttm_round_pot(sizeof(struct ttm_tt)) + - ttm_round_pot(sizeof(struct ttm_backend)); - - bdev->ttm_bo_size = bdev->ttm_bo_extra_size + - ttm_round_pot(sizeof(struct ttm_buffer_object)); + mutex_lock(&glob->device_list_mutex); + list_add_tail(&bdev->device_list, &glob->device_list); + mutex_unlock(&glob->device_list_mutex); return 0; -out_err2: +out_no_addr_mm: ttm_bo_clean_mm(bdev, 0); -out_err1: - __free_page(bdev->dummy_read_page); -out_err0: +out_no_sys: return ret; } EXPORT_SYMBOL(ttm_bo_device_init); @@ -1647,21 +1736,21 @@ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) { - struct ttm_bo_device *bdev = - container_of(shrink, struct ttm_bo_device, shrink); + struct ttm_bo_global *glob = + container_of(shrink, struct ttm_bo_global, shrink); struct ttm_buffer_object *bo; int ret = -EBUSY; int put_count; uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); while (ret == -EBUSY) { - if (unlikely(list_empty(&bdev->swap_lru))) { - spin_unlock(&bdev->lru_lock); + if (unlikely(list_empty(&glob->swap_lru))) { + spin_unlock(&glob->lru_lock); return -EBUSY; } - bo = list_first_entry(&bdev->swap_lru, + bo = list_first_entry(&glob->swap_lru, struct ttm_buffer_object, swap); kref_get(&bo->list_kref); @@ -1673,16 +1762,16 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ret = ttm_bo_reserve_locked(bo, false, true, false, 0); if (unlikely(ret == -EBUSY)) { - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); ttm_bo_wait_unreserved(bo, false); kref_put(&bo->list_kref, ttm_bo_release_list); - spin_lock(&bdev->lru_lock); + spin_lock(&glob->lru_lock); } } BUG_ON(ret != 0); put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&bdev->lru_lock); + spin_unlock(&glob->lru_lock); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); @@ -1736,6 +1825,6 @@ out: void ttm_bo_swapout_all(struct ttm_bo_device *bdev) { - while (ttm_bo_swapout(&bdev->shrink) == 0) + while (ttm_bo_swapout(&bdev->glob->shrink) == 0) ; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ad4ada07c6c..c70927ecda2 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -41,9 +41,9 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo) struct ttm_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node) { - spin_lock(&bo->bdev->lru_lock); + spin_lock(&bo->glob->lru_lock); drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->bdev->lru_lock); + spin_unlock(&bo->glob->lru_lock); } old_mem->mm_node = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 33de7637c0c..1c040d04033 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) vma->vm_private_data = NULL; } -static struct vm_operations_struct ttm_bo_vm_ops = { +static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c index 0b14eb1972b..b17007178a3 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/ttm/ttm_global.c @@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) mutex_lock(&item->mutex); if (item->refcount == 0) { - item->object = kmalloc(ref->size, GFP_KERNEL); + item->object = kzalloc(ref->size, GFP_KERNEL); if (unlikely(item->object == NULL)) { ret = -ENOMEM; goto out_err; @@ -82,14 +82,13 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) if (unlikely(ret != 0)) goto out_err; - ++item->refcount; } + ++item->refcount; ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); return 0; out_err: - kfree(item->object); mutex_unlock(&item->mutex); item->object = NULL; return ret; @@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) BUG_ON(ref->object != item->object); if (--item->refcount == 0) { ref->release(ref); - kfree(item->object); item->object = NULL; } mutex_unlock(&item->mutex); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 87323d4ff68..072c281a6bb 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -26,15 +26,180 @@ **************************************************************************/ #include "ttm/ttm_memory.h" +#include "ttm/ttm_module.h" #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/module.h> -#define TTM_PFX "[TTM] " #define TTM_MEMORY_ALLOC_RETRIES 4 +struct ttm_mem_zone { + struct kobject kobj; + struct ttm_mem_global *glob; + const char *name; + uint64_t zone_mem; + uint64_t emer_mem; + uint64_t max_mem; + uint64_t swap_limit; + uint64_t used_mem; +}; + +static struct attribute ttm_mem_sys = { + .name = "zone_memory", + .mode = S_IRUGO +}; +static struct attribute ttm_mem_emer = { + .name = "emergency_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_max = { + .name = "available_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_swap = { + .name = "swap_limit", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_used = { + .name = "used_memory", + .mode = S_IRUGO +}; + +static void ttm_mem_zone_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + + printk(KERN_INFO TTM_PFX + "Zone %7s: Used memory at exit: %llu kiB.\n", + zone->name, (unsigned long long) zone->used_mem >> 10); + kfree(zone); +} + +static ssize_t ttm_mem_zone_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + uint64_t val = 0; + + spin_lock(&zone->glob->lock); + if (attr == &ttm_mem_sys) + val = zone->zone_mem; + else if (attr == &ttm_mem_emer) + val = zone->emer_mem; + else if (attr == &ttm_mem_max) + val = zone->max_mem; + else if (attr == &ttm_mem_swap) + val = zone->swap_limit; + else if (attr == &ttm_mem_used) + val = zone->used_mem; + spin_unlock(&zone->glob->lock); + + return snprintf(buffer, PAGE_SIZE, "%llu\n", + (unsigned long long) val >> 10); +} + +static void ttm_check_swapping(struct ttm_mem_global *glob); + +static ssize_t ttm_mem_zone_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t size) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + int chars; + unsigned long val; + uint64_t val64; + + chars = sscanf(buffer, "%lu", &val); + if (chars == 0) + return size; + + val64 = val; + val64 <<= 10; + + spin_lock(&zone->glob->lock); + if (val64 > zone->zone_mem) + val64 = zone->zone_mem; + if (attr == &ttm_mem_emer) { + zone->emer_mem = val64; + if (zone->max_mem > val64) + zone->max_mem = val64; + } else if (attr == &ttm_mem_max) { + zone->max_mem = val64; + if (zone->emer_mem < val64) + zone->emer_mem = val64; + } else if (attr == &ttm_mem_swap) + zone->swap_limit = val64; + spin_unlock(&zone->glob->lock); + + ttm_check_swapping(zone->glob); + + return size; +} + +static struct attribute *ttm_mem_zone_attrs[] = { + &ttm_mem_sys, + &ttm_mem_emer, + &ttm_mem_max, + &ttm_mem_swap, + &ttm_mem_used, + NULL +}; + +static struct sysfs_ops ttm_mem_zone_ops = { + .show = &ttm_mem_zone_show, + .store = &ttm_mem_zone_store +}; + +static struct kobj_type ttm_mem_zone_kobj_type = { + .release = &ttm_mem_zone_kobj_release, + .sysfs_ops = &ttm_mem_zone_ops, + .default_attrs = ttm_mem_zone_attrs, +}; + +static void ttm_mem_global_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_global *glob = + container_of(kobj, struct ttm_mem_global, kobj); + + kfree(glob); +} + +static struct kobj_type ttm_mem_glob_kobj_type = { + .release = &ttm_mem_global_kobj_release, +}; + +static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, + bool from_wq, uint64_t extra) +{ + unsigned int i; + struct ttm_mem_zone *zone; + uint64_t target; + + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + + if (from_wq) + target = zone->swap_limit; + else if (capable(CAP_SYS_ADMIN)) + target = zone->emer_mem; + else + target = zone->max_mem; + + target = (extra > target) ? 0ULL : target; + + if (zone->used_mem > target) + return true; + } + return false; +} + /** * At this point we only support a single shrink callback. * Extend this if needed, perhaps using a linked list of callbacks. @@ -42,34 +207,17 @@ * many threads may try to swap out at any given time. */ -static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue, +static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, uint64_t extra) { int ret; struct ttm_mem_shrink *shrink; - uint64_t target; - uint64_t total_target; spin_lock(&glob->lock); if (glob->shrink == NULL) goto out; - if (from_workqueue) { - target = glob->swap_limit; - total_target = glob->total_memory_swap_limit; - } else if (capable(CAP_SYS_ADMIN)) { - total_target = glob->emer_total_memory; - target = glob->emer_memory; - } else { - total_target = glob->max_total_memory; - target = glob->max_memory; - } - - total_target = (extra >= total_target) ? 0 : total_target - extra; - target = (extra >= target) ? 0 : target - extra; - - while (glob->used_memory > target || - glob->used_total_memory > total_target) { + while (ttm_zones_above_swap_target(glob, from_wq, extra)) { shrink = glob->shrink; spin_unlock(&glob->lock); ret = shrink->do_shrink(shrink); @@ -81,6 +229,8 @@ out: spin_unlock(&glob->lock); } + + static void ttm_shrink_work(struct work_struct *work) { struct ttm_mem_global *glob = @@ -89,63 +239,198 @@ static void ttm_shrink_work(struct work_struct *work) ttm_shrink(glob, true, 0ULL); } +static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + int ret; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram - si->totalhigh; + mem *= si->mem_unit; + + zone->name = "kernel"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_kernel = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} + +#ifdef CONFIG_HIGHMEM +static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + int ret; + + if (unlikely(!zone)) + return -ENOMEM; + + if (si->totalhigh == 0) + return 0; + + mem = si->totalram; + mem *= si->mem_unit; + + zone->name = "highmem"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_highmem = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} +#else +static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + int ret; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram; + mem *= si->mem_unit; + + /** + * No special dma32 zone needed. + */ + + if (mem <= ((uint64_t) 1ULL << 32)) + return 0; + + /* + * Limit max dma32 memory to 4GB for now + * until we can figure out how big this + * zone really is. + */ + + mem = ((uint64_t) 1ULL << 32); + zone->name = "dma32"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_dma32 = zone; + kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); + ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} +#endif + int ttm_mem_global_init(struct ttm_mem_global *glob) { struct sysinfo si; - uint64_t mem; + int ret; + int i; + struct ttm_mem_zone *zone; spin_lock_init(&glob->lock); glob->swap_queue = create_singlethread_workqueue("ttm_swap"); INIT_WORK(&glob->work, ttm_shrink_work); init_waitqueue_head(&glob->queue); + kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); + ret = kobject_add(&glob->kobj, + ttm_get_kobj(), + "memory_accounting"); + if (unlikely(ret != 0)) { + kobject_put(&glob->kobj); + return ret; + } si_meminfo(&si); - mem = si.totalram - si.totalhigh; - mem *= si.mem_unit; - - glob->max_memory = mem >> 1; - glob->emer_memory = (mem >> 1) + (mem >> 2); - glob->swap_limit = glob->max_memory - (mem >> 3); - glob->used_memory = 0; - glob->used_total_memory = 0; - glob->shrink = NULL; - - mem = si.totalram; - mem *= si.mem_unit; - - glob->max_total_memory = mem >> 1; - glob->emer_total_memory = (mem >> 1) + (mem >> 2); - - glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3); - - printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n", - glob->max_total_memory >> 20); - printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n", - glob->max_memory >> 20); - + ret = ttm_mem_init_kernel_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#ifdef CONFIG_HIGHMEM + ret = ttm_mem_init_highmem_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#else + ret = ttm_mem_init_dma32_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#endif + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + printk(KERN_INFO TTM_PFX + "Zone %7s: Available graphics memory: %llu kiB.\n", + zone->name, (unsigned long long) zone->max_mem >> 10); + } return 0; +out_no_zone: + ttm_mem_global_release(glob); + return ret; } EXPORT_SYMBOL(ttm_mem_global_init); void ttm_mem_global_release(struct ttm_mem_global *glob) { - printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n", - (unsigned long long)glob->used_total_memory); + unsigned int i; + struct ttm_mem_zone *zone; + flush_workqueue(glob->swap_queue); destroy_workqueue(glob->swap_queue); glob->swap_queue = NULL; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); + } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); } EXPORT_SYMBOL(ttm_mem_global_release); -static inline void ttm_check_swapping(struct ttm_mem_global *glob) +static void ttm_check_swapping(struct ttm_mem_global *glob) { - bool needs_swapping; + bool needs_swapping = false; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); - needs_swapping = (glob->used_memory > glob->swap_limit || - glob->used_total_memory > - glob->total_memory_swap_limit); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (zone->used_mem > zone->swap_limit) { + needs_swapping = true; + break; + } + } + spin_unlock(&glob->lock); if (unlikely(needs_swapping)) @@ -153,44 +438,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob) } -void ttm_mem_global_free(struct ttm_mem_global *glob, - uint64_t amount, bool himem) +static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t amount) { + unsigned int i; + struct ttm_mem_zone *zone; + spin_lock(&glob->lock); - glob->used_total_memory -= amount; - if (!himem) - glob->used_memory -= amount; - wake_up_all(&glob->queue); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem -= amount; + } spin_unlock(&glob->lock); } +void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount) +{ + return ttm_mem_global_free_zone(glob, NULL, amount); +} + static int ttm_mem_global_reserve(struct ttm_mem_global *glob, - uint64_t amount, bool himem, bool reserve) + struct ttm_mem_zone *single_zone, + uint64_t amount, bool reserve) { uint64_t limit; - uint64_t lomem_limit; int ret = -ENOMEM; + unsigned int i; + struct ttm_mem_zone *zone; spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; - if (capable(CAP_SYS_ADMIN)) { - limit = glob->emer_total_memory; - lomem_limit = glob->emer_memory; - } else { - limit = glob->max_total_memory; - lomem_limit = glob->max_memory; - } + limit = (capable(CAP_SYS_ADMIN)) ? + zone->emer_mem : zone->max_mem; - if (unlikely(glob->used_total_memory + amount > limit)) - goto out_unlock; - if (unlikely(!himem && glob->used_memory + amount > lomem_limit)) - goto out_unlock; + if (zone->used_mem > limit) + goto out_unlock; + } if (reserve) { - glob->used_total_memory += amount; - if (!himem) - glob->used_memory += amount; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem += amount; + } } + ret = 0; out_unlock: spin_unlock(&glob->lock); @@ -199,12 +500,17 @@ out_unlock: return ret; } -int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, - bool no_wait, bool interruptible, bool himem) + +static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t memory, + bool no_wait, bool interruptible) { int count = TTM_MEMORY_ALLOC_RETRIES; - while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true) + while (unlikely(ttm_mem_global_reserve(glob, + single_zone, + memory, true) != 0)) { if (no_wait) return -ENOMEM; @@ -216,6 +522,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, return 0; } +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + bool no_wait, bool interruptible) +{ + /** + * Normal allocations of kernel memory are registered in + * all zones. + */ + + return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, + interruptible); +} + +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, + bool no_wait, bool interruptible) +{ + + struct ttm_mem_zone *zone = NULL; + + /** + * Page allocations may be registed in a single zone + * only if highmem or !dma32. + */ + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait, + interruptible); +} + +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page) +{ + struct ttm_mem_zone *zone = NULL; + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + ttm_mem_global_free_zone(glob, zone, PAGE_SIZE); +} + + size_t ttm_round_pot(size_t size) { if ((size & (size - 1)) == 0) diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 59ce8191d58..9a6edbfeaa9 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -29,16 +29,72 @@ * Jerome Glisse */ #include <linux/module.h> -#include <ttm/ttm_module.h> +#include <linux/device.h> +#include <linux/sched.h> +#include "ttm/ttm_module.h" +#include "drm_sysfs.h" + +static DECLARE_WAIT_QUEUE_HEAD(exit_q); +atomic_t device_released; + +static struct device_type ttm_drm_class_type = { + .name = "ttm", + /** + * Add pm ops here. + */ +}; + +static void ttm_drm_class_device_release(struct device *dev) +{ + atomic_set(&device_released, 1); + wake_up_all(&exit_q); +} + +static struct device ttm_drm_class_device = { + .type = &ttm_drm_class_type, + .release = &ttm_drm_class_device_release +}; + +struct kobject *ttm_get_kobj(void) +{ + struct kobject *kobj = &ttm_drm_class_device.kobj; + BUG_ON(kobj == NULL); + return kobj; +} static int __init ttm_init(void) { + int ret; + + ret = dev_set_name(&ttm_drm_class_device, "ttm"); + if (unlikely(ret != 0)) + return ret; + ttm_global_init(); + + atomic_set(&device_released, 0); + ret = drm_class_device_register(&ttm_drm_class_device); + if (unlikely(ret != 0)) + goto out_no_dev_reg; + return 0; +out_no_dev_reg: + atomic_set(&device_released, 1); + wake_up_all(&exit_q); + ttm_global_release(); + return ret; } static void __exit ttm_exit(void) { + drm_class_device_unregister(&ttm_drm_class_device); + + /** + * Refuse to unload until the TTM device is released. + * Not sure this is 100% needed. + */ + + wait_event(exit_q, atomic_read(&device_released) == 1); ttm_global_release(); } diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index b8b6c4a5f98..a55ee1a56c1 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,76 +34,13 @@ #include <linux/pagemap.h> #include <linux/file.h> #include <linux/swap.h> +#include "drm_cache.h" #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" static int ttm_tt_swapin(struct ttm_tt *ttm); -#if defined(CONFIG_X86) -static void ttm_tt_clflush_page(struct page *page) -{ - uint8_t *page_virtual; - unsigned int i; - - if (unlikely(page == NULL)) - return; - - page_virtual = kmap_atomic(page, KM_USER0); - - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - clflush(page_virtual + i); - - kunmap_atomic(page_virtual, KM_USER0); -} - -static void ttm_tt_cache_flush_clflush(struct page *pages[], - unsigned long num_pages) -{ - unsigned long i; - - mb(); - for (i = 0; i < num_pages; ++i) - ttm_tt_clflush_page(*pages++); - mb(); -} -#elif !defined(__powerpc__) -static void ttm_tt_ipi_handler(void *null) -{ - ; -} -#endif - -void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages) -{ - -#if defined(CONFIG_X86) - if (cpu_has_clflush) { - ttm_tt_cache_flush_clflush(pages, num_pages); - return; - } -#elif defined(__powerpc__) - unsigned long i; - - for (i = 0; i < num_pages; ++i) { - struct page *page = pages[i]; - void *page_virtual; - - if (unlikely(page == NULL)) - continue; - - page_virtual = kmap_atomic(page, KM_USER0); - flush_dcache_range((unsigned long) page_virtual, - (unsigned long) page_virtual + PAGE_SIZE); - kunmap_atomic(page_virtual, KM_USER0); - } -#else - if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) - printk(KERN_ERR TTM_PFX - "Timed out waiting for drm cache flush.\n"); -#endif -} - /** * Allocates storage for pointers to the pages that back the ttm. * @@ -179,7 +116,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) set_page_dirty_lock(page); ttm->pages[i] = NULL; - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false); + ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE); put_page(page); } ttm->state = tt_unpopulated; @@ -190,8 +127,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm) static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) { struct page *p; - struct ttm_bo_device *bdev = ttm->bdev; - struct ttm_mem_global *mem_glob = bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; int ret; while (NULL == (p = ttm->pages[index])) { @@ -200,21 +136,14 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) if (!p) return NULL; - if (PageHighMem(p)) { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, true); - if (unlikely(ret != 0)) - goto out_err; + ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); + if (unlikely(ret != 0)) + goto out_err; + + if (PageHighMem(p)) ttm->pages[--ttm->first_himem_page] = p; - } else { - ret = - ttm_mem_global_alloc(mem_glob, PAGE_SIZE, - false, false, false); - if (unlikely(ret != 0)) - goto out_err; + else ttm->pages[++ttm->last_lomem_page] = p; - } } return p; out_err: @@ -310,7 +239,7 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm, } if (ttm->caching_state == tt_cached) - ttm_tt_cache_flush(ttm->pages, ttm->num_pages); + drm_clflush_pages(ttm->pages, ttm->num_pages); for (i = 0; i < ttm->num_pages; ++i) { cur_page = ttm->pages[i]; @@ -368,8 +297,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) printk(KERN_ERR TTM_PFX "Erroneous page count. " "Leaking pages.\n"); - ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, - PageHighMem(cur_page)); + ttm_mem_global_free_page(ttm->glob->mem_glob, + cur_page); __free_page(cur_page); } } @@ -414,7 +343,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, struct mm_struct *mm = tsk->mm; int ret; int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0; - struct ttm_mem_global *mem_glob = ttm->bdev->mem_glob; + struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; BUG_ON(num_pages != ttm->num_pages); BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0); @@ -424,7 +353,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, */ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE, - false, false, false); + false, false); if (unlikely(ret != 0)) return ret; @@ -435,7 +364,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm, if (ret != num_pages && write) { ttm_tt_free_user_pages(ttm); - ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false); + ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE); return -ENOMEM; } @@ -459,8 +388,7 @@ struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, if (!ttm) return NULL; - ttm->bdev = bdev; - + ttm->glob = bdev->glob; ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm->first_himem_page = ttm->num_pages; ttm->last_lomem_page = -1; |