diff options
author | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:18:27 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-04-14 13:19:04 +0200 |
commit | 6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch) | |
tree | 021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /drivers/gpu/drm/ttm/ttm_page_alloc.c | |
parent | 682968e0c425c60f0dde37977e5beb2b12ddc4cc (diff) | |
parent | a385ec4f11bdcf81af094c03e2444ee9b7fad2e5 (diff) |
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree),
to prepare for tooling changes, and also to pick up v3.4 MM
changes that the uprobes code needs to take care of.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 55 |
1 files changed, 19 insertions, 36 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 499debda791..ebc6fac96e3 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -30,6 +30,9 @@ * - Use page->lru to keep a free list * - doesn't track currently in use pages */ + +#define pr_fmt(fmt) "[TTM] " fmt + #include <linux/list.h> #include <linux/spinlock.h> #include <linux/highmem.h> @@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, m->options.small = val; else if (attr == &ttm_page_pool_alloc_size) { if (val > NUM_PAGES_TO_ALLOC*8) { - printk(KERN_ERR TTM_PFX - "Setting allocation size to %lu " - "is not allowed. Recommended size is " - "%lu\n", + pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); return size; } else if (val > NUM_PAGES_TO_ALLOC) { - printk(KERN_WARNING TTM_PFX - "Setting allocation size to " - "larger than %lu is not recommended.\n", - NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); + pr_warn("Setting allocation size to larger than %lu is not recommended\n", + NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); } m->options.alloc_size = val; } @@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages) { unsigned i; if (set_pages_array_wb(pages, npages)) - printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n", - npages); + pr_err("Failed to set %d pages to wb!\n", npages); for (i = 0; i < npages; ++i) __free_page(pages[i]); } @@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), GFP_KERNEL); if (!pages_to_free) { - printk(KERN_ERR TTM_PFX - "Failed to allocate memory for pool free operation.\n"); + pr_err("Failed to allocate memory for pool free operation\n"); return 0; } @@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages, case tt_uncached: r = set_pages_array_uc(pages, cpages); if (r) - printk(KERN_ERR TTM_PFX - "Failed to set %d pages to uc!\n", - cpages); + pr_err("Failed to set %d pages to uc!\n", cpages); break; case tt_wc: r = set_pages_array_wc(pages, cpages); if (r) - printk(KERN_ERR TTM_PFX - "Failed to set %d pages to wc!\n", - cpages); + pr_err("Failed to set %d pages to wc!\n", cpages); break; default: break; @@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); if (!caching_array) { - printk(KERN_ERR TTM_PFX - "Unable to allocate table for new pages."); + pr_err("Unable to allocate table for new pages\n"); return -ENOMEM; } @@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i); + pr_err("Unable to get page %u\n", i); /* store already allocated pages in the pool after * setting the caching state */ @@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ++pool->nrefills; pool->npages += alloc_size; } else { - printk(KERN_ERR TTM_PFX - "Failed to fill pool (%p).", pool); + pr_err("Failed to fill pool (%p)\n", pool); /* If we have any pages left put them to the pool. */ list_for_each_entry(p, &pool->list, lru) { ++cpages; @@ -675,9 +665,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, for (i = 0; i < npages; i++) { if (pages[i]) { if (page_count(pages[i]) != 1) - printk(KERN_ERR TTM_PFX - "Erroneous page count. " - "Leaking pages.\n"); + pr_err("Erroneous page count. Leaking pages.\n"); __free_page(pages[i]); pages[i] = NULL; } @@ -689,9 +677,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, for (i = 0; i < npages; i++) { if (pages[i]) { if (page_count(pages[i]) != 1) - printk(KERN_ERR TTM_PFX - "Erroneous page count. " - "Leaking pages.\n"); + pr_err("Erroneous page count. Leaking pages.\n"); list_add_tail(&pages[i]->lru, &pool->list); pages[i] = NULL; pool->npages++; @@ -740,8 +726,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, p = alloc_page(gfp_flags); if (!p) { - printk(KERN_ERR TTM_PFX - "Unable to allocate page."); + pr_err("Unable to allocate page\n"); return -ENOMEM; } @@ -781,9 +766,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, if (r) { /* If there is any pages in the list put them back to * the pool. */ - printk(KERN_ERR TTM_PFX - "Failed to allocate extra pages " - "for large request."); + pr_err("Failed to allocate extra pages for large request\n"); ttm_put_pages(pages, count, flags, cstate); return r; } @@ -809,7 +792,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) WARN_ON(_manager); - printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n"); + pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); @@ -844,7 +827,7 @@ void ttm_page_alloc_fini(void) { int i; - printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n"); + pr_info("Finalizing pool allocator\n"); ttm_pool_mm_shrink_fini(_manager); for (i = 0; i < NUM_POOLS; ++i) |