diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 47 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 52 | ||||
-rw-r--r-- | mm/sparse.c | 53 |
6 files changed, 103 insertions, 55 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 84279127fcd..df9d554bea3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -65,7 +65,7 @@ static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) int zone_type; zone_type = zone - pgdat->node_zones; - if (!populated_zone(zone)) { + if (!zone->wait_table) { int ret = 0; ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages, MEMMAP_HOTPLUG); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b000d6803c..bd8e33582d2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -136,6 +136,11 @@ static unsigned long __meminitdata dma_reserve; #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ +#if MAX_NUMNODES > 1 +int nr_node_ids __read_mostly = MAX_NUMNODES; +EXPORT_SYMBOL(nr_node_ids); +#endif + #ifdef CONFIG_DEBUG_VM static int page_outside_zone_boundaries(struct zone *zone, struct page *page) { @@ -669,26 +674,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, return i; } -#if MAX_NUMNODES > 1 -int nr_node_ids __read_mostly = MAX_NUMNODES; -EXPORT_SYMBOL(nr_node_ids); - -/* - * Figure out the number of possible node ids. - */ -static void __init setup_nr_node_ids(void) -{ - unsigned int node; - unsigned int highest = 0; - - for_each_node_mask(node, node_possible_map) - highest = node; - nr_node_ids = highest + 1; -} -#else -static void __init setup_nr_node_ids(void) {} -#endif - #ifdef CONFIG_NUMA /* * Called from the vmstat counter updater to drain pagesets of this @@ -2704,7 +2689,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) map = alloc_bootmem_node(pgdat, size); pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); } -#ifdef CONFIG_FLATMEM +#ifndef CONFIG_NEED_MULTIPLE_NODES /* * With no DISCONTIG, the global mem_map is just set as node 0's */ @@ -2733,6 +2718,26 @@ void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, } #ifdef CONFIG_ARCH_POPULATES_NODE_MAP + +#if MAX_NUMNODES > 1 +/* + * Figure out the number of possible node ids. + */ +static void __init setup_nr_node_ids(void) +{ + unsigned int node; + unsigned int highest = 0; + + for_each_node_mask(node, node_possible_map) + highest = node; + nr_node_ids = highest + 1; +} +#else +static inline void setup_nr_node_ids(void) +{ +} +#endif + /** * add_active_range - Register a range of PFNs backed by physical memory * @nid: The node ID the range resides on diff --git a/mm/shmem.c b/mm/shmem.c index e537317bec4..b6aae2b3339 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -967,6 +967,8 @@ static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_ *nodelist++ = '\0'; if (nodelist_parse(nodelist, *policy_nodes)) goto out; + if (!nodes_subset(*policy_nodes, node_online_map)) + goto out; } if (!strcmp(value, "default")) { *policy = MPOL_DEFAULT; diff --git a/mm/slab.c b/mm/slab.c index 2e71a328aa0..6d65cf4e4b2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3539,7 +3539,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) check_irq_off(); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); - if (use_alien_caches && cache_free_alien(cachep, objp)) + if (cache_free_alien(cachep, objp)) return; if (likely(ac->avail < ac->limit)) { diff --git a/mm/slub.c b/mm/slub.c index 98801d404d6..c9ab68881b4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -939,7 +939,7 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s) * Debugging or ctor may create a need to move the free * pointer. Fail if this happens. */ - if (s->size >= 65535 * sizeof(void *)) { + if (s->objsize >= 65535 * sizeof(void *)) { BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | SLAB_DESTROY_BY_RCU)); BUG_ON(s->ctor); @@ -1917,7 +1917,6 @@ static int calculate_sizes(struct kmem_cache *s) */ s->inuse = size; -#ifdef CONFIG_SLUB_DEBUG if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || s->ctor)) { /* @@ -1932,6 +1931,7 @@ static int calculate_sizes(struct kmem_cache *s) size += sizeof(void *); } +#ifdef CONFIG_SLUB_DEBUG if (flags & SLAB_STORE_USER) /* * Need to store information about allocs and frees after @@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (s) return slab_alloc(s, flags, -1, __builtin_return_address(0)); - return NULL; + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc); @@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) if (s) return slab_alloc(s, flags, node, __builtin_return_address(0)); - return NULL; + return ZERO_SIZE_PTR; } EXPORT_SYMBOL(__kmalloc_node); #endif size_t ksize(const void *object) { - struct page *page = get_object_page(object); + struct page *page; struct kmem_cache *s; + if (object == ZERO_SIZE_PTR) + return 0; + + page = get_object_page(object); BUG_ON(!page); s = page->slab; BUG_ON(!s); @@ -2293,7 +2297,13 @@ void kfree(const void *x) struct kmem_cache *s; struct page *page; - if (!x) + /* + * This has to be an unsigned comparison. According to Linus + * some gcc version treat a pointer as a signed entity. Then + * this comparison would be true for all "negative" pointers + * (which would cover the whole upper half of the address space). + */ + if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) return; page = virt_to_head_page(x); @@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) void *ret; size_t ks; - if (unlikely(!p)) + if (unlikely(!p || p == ZERO_SIZE_PTR)) return kmalloc(new_size, flags); if (unlikely(!new_size)) { kfree(p); - return NULL; + return ZERO_SIZE_PTR; } ks = ksize(p); @@ -2435,6 +2445,7 @@ void __init kmem_cache_init(void) */ create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", sizeof(struct kmem_cache_node), GFP_KERNEL); + kmalloc_caches[0].refcount = -1; #endif /* Able to allocate the per node structures */ @@ -2482,6 +2493,12 @@ static int slab_unmergeable(struct kmem_cache *s) if (s->ctor) return 1; + /* + * We may have set a slab to be unmergeable during bootstrap. + */ + if (s->refcount < 0) + return 1; + return 0; } @@ -2601,6 +2618,19 @@ static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu) } /* + * Version of __flush_cpu_slab for the case that interrupts + * are enabled. + */ +static void cpu_slab_flush(struct kmem_cache *s, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + __flush_cpu_slab(s, cpu); + local_irq_restore(flags); +} + +/* * Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. */ @@ -2614,7 +2644,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - for_all_slabs(__flush_cpu_slab, cpu); + for_all_slabs(cpu_slab_flush, cpu); break; default: break; @@ -2632,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) struct kmem_cache *s = get_slab(size, gfpflags); if (!s) - return NULL; + return ZERO_SIZE_PTR; return slab_alloc(s, gfpflags, -1, caller); } @@ -2643,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s = get_slab(size, gfpflags); if (!s) - return NULL; + return ZERO_SIZE_PTR; return slab_alloc(s, gfpflags, node, caller); } diff --git a/mm/sparse.c b/mm/sparse.c index 1302f8348d5..e03b39f3540 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -209,6 +209,12 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, return 1; } +__attribute__((weak)) +void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +{ + return NULL; +} + static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; @@ -219,6 +225,11 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) if (map) return map; + map = alloc_bootmem_high_node(NODE_DATA(nid), + sizeof(struct page) * PAGES_PER_SECTION); + if (map) + return map; + map = alloc_bootmem_node(NODE_DATA(nid), sizeof(struct page) * PAGES_PER_SECTION); if (map) @@ -229,6 +240,27 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) return NULL; } +/* + * Allocate the accumulated non-linear sections, allocate a mem_map + * for each and record the physical to section mapping. + */ +void __init sparse_init(void) +{ + unsigned long pnum; + struct page *map; + + for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { + if (!valid_section_nr(pnum)) + continue; + + map = sparse_early_mem_map_alloc(pnum); + if (!map) + continue; + sparse_init_one_section(__nr_to_section(pnum), pnum, map); + } +} + +#ifdef CONFIG_MEMORY_HOTPLUG static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -269,27 +301,6 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) } /* - * Allocate the accumulated non-linear sections, allocate a mem_map - * for each and record the physical to section mapping. - */ -void __init sparse_init(void) -{ - unsigned long pnum; - struct page *map; - - for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { - if (!valid_section_nr(pnum)) - continue; - - map = sparse_early_mem_map_alloc(pnum); - if (!map) - continue; - sparse_init_one_section(__nr_to_section(pnum), pnum, map); - } -} - -#ifdef CONFIG_MEMORY_HOTPLUG -/* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. |