diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 72 |
1 files changed, 55 insertions, 17 deletions
diff --git a/mm/slob.c b/mm/slob.c index 06e5e725fab..b99b0ef2347 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -3,6 +3,8 @@ * * Matt Mackall <mpm@selenic.com> 12/30/03 * + * NUMA support by Paul Mundt, 2007. + * * How SLOB works: * * The core of SLOB is a traditional K&R style heap allocator, with @@ -10,7 +12,7 @@ * allocator is as little as 2 bytes, however typically most architectures * will require 4 bytes on 32-bit and 8 bytes on 64-bit. * - * The slob heap is a linked list of pages from __get_free_page, and + * The slob heap is a linked list of pages from alloc_pages(), and * within each page, there is a singly-linked list of free blocks (slob_t). * The heap is grown on demand and allocation from the heap is currently * first-fit. @@ -18,7 +20,7 @@ * Above this is an implementation of kmalloc/kfree. Blocks returned * from kmalloc are prepended with a 4-byte header with the kmalloc size. * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls - * __get_free_pages directly, allocating compound pages so the page order + * alloc_pages() directly, allocating compound pages so the page order * does not have to be separately tracked, and also stores the exact * allocation size in page->private so that it can be used to accurately * provide ksize(). These objects are detected in kfree() because slob_page() @@ -29,10 +31,23 @@ * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which * case the low-level allocator will fragment blocks to create the proper * alignment. Again, objects of page-size or greater are allocated by - * calling __get_free_pages. As SLAB objects know their size, no separate + * calling alloc_pages(). As SLAB objects know their size, no separate * size bookkeeping is necessary and there is essentially no allocation * space overhead, and compound pages aren't needed for multi-page * allocations. + * + * NUMA support in SLOB is fairly simplistic, pushing most of the real + * logic down to the page allocator, and simply doing the node accounting + * on the upper levels. In the event that a node id is explicitly + * provided, alloc_pages_node() with the specified node id is used + * instead. The common case (or when the node id isn't explicitly provided) + * will default to the current node, as per numa_node_id(). + * + * Node aware pages are still inserted in to the global freelist, and + * these are scanned for by matching against the node id encoded in the + * page flags. As a result, block allocations that can be satisfied from + * the freelist will only be done so on pages residing on the same node, + * in order to prevent random node placement. */ #include <linux/kernel.h> @@ -204,6 +219,23 @@ static int slob_last(slob_t *s) return !((unsigned long)slob_next(s) & ~PAGE_MASK); } +static void *slob_new_page(gfp_t gfp, int order, int node) +{ + void *page; + +#ifdef CONFIG_NUMA + if (node != -1) + page = alloc_pages_node(node, gfp, order); + else +#endif + page = alloc_pages(gfp, order); + + if (!page) + return NULL; + + return page_address(page); +} + /* * Allocate a slob block within a given slob_page sp. */ @@ -258,7 +290,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) /* * slob_alloc: entry point into the slob allocator. */ -static void *slob_alloc(size_t size, gfp_t gfp, int align) +static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct slob_page *sp; slob_t *b = NULL; @@ -267,6 +299,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align) spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ list_for_each_entry(sp, &free_slob_pages, list) { +#ifdef CONFIG_NUMA + /* + * If there's a node specification, search for a partial + * page with a matching node id in the freelist. + */ + if (node != -1 && page_to_nid(&sp->page) != node) + continue; +#endif + if (sp->units >= SLOB_UNITS(size)) { b = slob_page_alloc(sp, size, align); if (b) @@ -277,7 +318,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align) /* Not enough space: must allocate a new page */ if (!b) { - b = (slob_t *)__get_free_page(gfp); + b = slob_new_page(gfp, 0, node); if (!b) return 0; sp = (struct slob_page *)virt_to_page(b); @@ -381,22 +422,20 @@ out: #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) #endif - -void *__kmalloc(size_t size, gfp_t gfp) +void *__kmalloc_node(size_t size, gfp_t gfp, int node) { int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); if (size < PAGE_SIZE - align) { unsigned int *m; - m = slob_alloc(size + align, gfp, align); + m = slob_alloc(size + align, gfp, align, node); if (m) *m = size; return (void *)m + align; } else { void *ret; - ret = (void *) __get_free_pages(gfp | __GFP_COMP, - get_order(size)); + ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); if (ret) { struct page *page; page = virt_to_page(ret); @@ -405,7 +444,7 @@ void *__kmalloc(size_t size, gfp_t gfp) return ret; } } -EXPORT_SYMBOL(__kmalloc); +EXPORT_SYMBOL(__kmalloc_node); /** * krealloc - reallocate memory. The contents will remain unchanged. @@ -455,7 +494,6 @@ void kfree(const void *block) } else put_page(&sp->page); } - EXPORT_SYMBOL(kfree); /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ @@ -487,7 +525,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, { struct kmem_cache *c; - c = slob_alloc(sizeof(struct kmem_cache), flags, 0); + c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1); if (c) { c->name = name; @@ -517,21 +555,21 @@ void kmem_cache_destroy(struct kmem_cache *c) } EXPORT_SYMBOL(kmem_cache_destroy); -void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags) +void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) { void *b; if (c->size < PAGE_SIZE) - b = slob_alloc(c->size, flags, c->align); + b = slob_alloc(c->size, flags, c->align, node); else - b = (void *)__get_free_pages(flags, get_order(c->size)); + b = slob_new_page(flags, get_order(c->size), node); if (c->ctor) c->ctor(b, c, 0); return b; } -EXPORT_SYMBOL(kmem_cache_alloc); +EXPORT_SYMBOL(kmem_cache_alloc_node); void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) { |