diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/internal.h | 13 | ||||
-rw-r--r-- | mm/memcontrol.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 87 |
5 files changed, 19 insertions, 92 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cb1b3a7ecdf..89e6286a7f5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -120,6 +120,7 @@ static void free_huge_page(struct page *page) struct address_space *mapping; mapping = (struct address_space *) page_private(page); + set_page_private(page, 0); BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); @@ -134,7 +135,6 @@ static void free_huge_page(struct page *page) spin_unlock(&hugetlb_lock); if (mapping) hugetlb_put_quota(mapping, 1); - set_page_private(page, 0); } /* diff --git a/mm/internal.h b/mm/internal.h index 5a9a6200e03..789727309f4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -47,4 +47,17 @@ static inline unsigned long page_order(struct page *page) VM_BUG_ON(!PageBuddy(page)); return page_private(page); } + +/* + * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, + * so all functions starting at paging_init should be marked __init + * in those cases. SPARSEMEM, however, allows for memory hotplug, + * and alloc_bootmem_node is not used. + */ +#ifdef CONFIG_SPARSEMEM +#define __paginginit __meminit +#else +#define __paginginit __init +#endif + #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6bded84c20c..631002d085d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -534,7 +534,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, if (scan >= nr_to_scan) break; page = pc->page; - VM_BUG_ON(!pc); if (unlikely(!PageLRU(page))) continue; @@ -1101,7 +1100,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); if (mem == NULL) - return NULL; + return ERR_PTR(-ENOMEM); res_counter_init(&mem->res); @@ -1117,7 +1116,7 @@ free_out: free_mem_cgroup_per_zone_info(mem, node); if (cont->parent != NULL) kfree(mem); - return NULL; + return ERR_PTR(-ENOMEM); } static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 75b97931334..8896e874a67 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3314,7 +3314,7 @@ static inline int pageblock_default_order(unsigned int order) * - mark all memory queues empty * - clear the memory bitmaps */ -static void __meminit free_area_init_core(struct pglist_data *pgdat, +static void __paginginit free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { enum zone_type j; @@ -3438,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } -void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, +void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { diff --git a/mm/slub.c b/mm/slub.c index 4b3895cb90e..74c65af0a54 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -149,13 +149,6 @@ static inline void ClearSlabDebug(struct page *page) /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST -/* - * Currently fastpath is not supported if preemption is enabled. - */ -#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT) -#define SLUB_FASTPATH -#endif - #if PAGE_SHIFT <= 12 /* @@ -1514,11 +1507,7 @@ static void *__slab_alloc(struct kmem_cache *s, { void **object; struct page *new; -#ifdef SLUB_FASTPATH - unsigned long flags; - local_irq_save(flags); -#endif if (!c->page) goto new_slab; @@ -1541,9 +1530,6 @@ load_freelist: unlock_out: slab_unlock(c->page); stat(c, ALLOC_SLOWPATH); -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif return object; another_slab: @@ -1575,9 +1561,7 @@ new_slab: c->page = new; goto load_freelist; } -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif + /* * No memory available. * @@ -1619,34 +1603,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, { void **object; struct kmem_cache_cpu *c; - -/* - * The SLUB_FASTPATH path is provisional and is currently disabled if the - * kernel is compiled with preemption or if the arch does not support - * fast cmpxchg operations. There are a couple of coming changes that will - * simplify matters and allow preemption. Ultimately we may end up making - * SLUB_FASTPATH the default. - * - * 1. The introduction of the per cpu allocator will avoid array lookups - * through get_cpu_slab(). A special register can be used instead. - * - * 2. The introduction of per cpu atomic operations (cpu_ops) means that - * we can realize the logic here entirely with per cpu atomics. The - * per cpu atomic ops will take care of the preemption issues. - */ - -#ifdef SLUB_FASTPATH - c = get_cpu_slab(s, raw_smp_processor_id()); - do { - object = c->freelist; - if (unlikely(is_end(object) || !node_match(c, node))) { - object = __slab_alloc(s, gfpflags, node, addr, c); - break; - } - stat(c, ALLOC_FASTPATH); - } while (cmpxchg_local(&c->freelist, object, object[c->offset]) - != object); -#else unsigned long flags; local_irq_save(flags); @@ -1661,7 +1617,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, stat(c, ALLOC_FASTPATH); } local_irq_restore(flags); -#endif if (unlikely((gfpflags & __GFP_ZERO) && object)) memset(object, 0, c->objsize); @@ -1698,11 +1653,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, void **object = (void *)x; struct kmem_cache_cpu *c; -#ifdef SLUB_FASTPATH - unsigned long flags; - - local_irq_save(flags); -#endif c = get_cpu_slab(s, raw_smp_processor_id()); stat(c, FREE_SLOWPATH); slab_lock(page); @@ -1734,9 +1684,6 @@ checks_ok: out_unlock: slab_unlock(page); -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif return; slab_empty: @@ -1749,9 +1696,6 @@ slab_empty: } slab_unlock(page); stat(c, FREE_SLAB); -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif discard_slab(s, page); return; @@ -1777,34 +1721,6 @@ static __always_inline void slab_free(struct kmem_cache *s, { void **object = (void *)x; struct kmem_cache_cpu *c; - -#ifdef SLUB_FASTPATH - void **freelist; - - c = get_cpu_slab(s, raw_smp_processor_id()); - debug_check_no_locks_freed(object, s->objsize); - do { - freelist = c->freelist; - barrier(); - /* - * If the compiler would reorder the retrieval of c->page to - * come before c->freelist then an interrupt could - * change the cpu slab before we retrieve c->freelist. We - * could be matching on a page no longer active and put the - * object onto the freelist of the wrong slab. - * - * On the other hand: If we already have the freelist pointer - * then any change of cpu_slab will cause the cmpxchg to fail - * since the freelist pointers are unique per slab. - */ - if (unlikely(page != c->page || c->node < 0)) { - __slab_free(s, page, x, addr, c->offset); - break; - } - object[c->offset] = freelist; - stat(c, FREE_FASTPATH); - } while (cmpxchg_local(&c->freelist, freelist, object) != freelist); -#else unsigned long flags; local_irq_save(flags); @@ -1818,7 +1734,6 @@ static __always_inline void slab_free(struct kmem_cache *s, __slab_free(s, page, x, addr, c->offset); local_irq_restore(flags); -#endif } void kmem_cache_free(struct kmem_cache *s, void *x) |