summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig15
-rw-r--r--mm/backing-dev.c4
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/filemap_xip.c11
-rw-r--r--mm/hugetlb.c70
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page_alloc.c37
-rw-r--r--mm/rmap.c9
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/slab.c7
-rw-r--r--mm/slob.c3
-rw-r--r--mm/slub.c122
-rw-r--r--mm/sparse-vmemmap.c12
-rw-r--r--mm/sparse.c20
15 files changed, 281 insertions, 64 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index c070ec0c15b..9ef97417a0b 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -112,18 +112,17 @@ config SPARSEMEM_EXTREME
def_bool y
depends on SPARSEMEM && !SPARSEMEM_STATIC
-#
-# SPARSEMEM_VMEMMAP uses a virtually mapped mem_map to optimise pfn_to_page
-# and page_to_pfn. The most efficient option where kernel virtual space is
-# not under pressure.
-#
config SPARSEMEM_VMEMMAP_ENABLE
def_bool n
config SPARSEMEM_VMEMMAP
- bool
- depends on SPARSEMEM
- default y if (SPARSEMEM_VMEMMAP_ENABLE)
+ bool "Sparse Memory virtual memmap"
+ depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
+ default y
+ help
+ SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
+ pfn_to_page and page_to_pfn operations. This is the most
+ efficient option when sufficient kernel resources are available.
# eventually, we can have this option just 'select SPARSEMEM'
config MEMORY_HOTPLUG
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b0ceb29da4c..e8644b1e552 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -7,7 +7,7 @@
int bdi_init(struct backing_dev_info *bdi)
{
- int i, j;
+ int i;
int err;
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
@@ -21,7 +21,7 @@ int bdi_init(struct backing_dev_info *bdi)
if (err) {
err:
- for (j = 0; j < i; j++)
+ while (i--)
percpu_counter_destroy(&bdi->bdi_stat[i]);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 188cf5fd3e8..f4d0cded0e1 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -124,6 +124,18 @@ void __remove_from_page_cache(struct page *page)
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
BUG_ON(page_mapped(page));
+
+ /*
+ * Some filesystems seem to re-dirty the page even after
+ * the VM has canceled the dirty bit (eg ext3 journaling).
+ *
+ * Fix it up by doing a final dirty accounting check after
+ * having removed the page entirely.
+ */
+ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+ dec_zone_page_state(page, NR_FILE_DIRTY);
+ dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ }
}
void remove_from_page_cache(struct page *page)
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 32132f3cd64..f874ae818ad 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -25,14 +25,15 @@ static struct page *__xip_sparse_page;
static struct page *xip_sparse_page(void)
{
if (!__xip_sparse_page) {
- unsigned long zeroes = get_zeroed_page(GFP_HIGHUSER);
- if (zeroes) {
+ struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
+
+ if (page) {
static DEFINE_SPINLOCK(xip_alloc_lock);
spin_lock(&xip_alloc_lock);
if (!__xip_sparse_page)
- __xip_sparse_page = virt_to_page(zeroes);
+ __xip_sparse_page = page;
else
- free_page(zeroes);
+ __free_page(page);
spin_unlock(&xip_alloc_lock);
}
}
@@ -314,7 +315,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
fault_in_pages_readable(buf, bytes);
kaddr = kmap_atomic(page, KM_USER0);
copied = bytes -
- __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
+ __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6121b57bbe9..7224a4f0710 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -31,7 +31,7 @@ static unsigned int free_huge_pages_node[MAX_NUMNODES];
static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
-int hugetlb_dynamic_pool;
+unsigned long nr_overcommit_huge_pages;
static int hugetlb_next_nid;
/*
@@ -227,22 +227,58 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
unsigned long address)
{
struct page *page;
+ unsigned int nid;
- /* Check if the dynamic pool is enabled */
- if (!hugetlb_dynamic_pool)
+ /*
+ * Assume we will successfully allocate the surplus page to
+ * prevent racing processes from causing the surplus to exceed
+ * overcommit
+ *
+ * This however introduces a different race, where a process B
+ * tries to grow the static hugepage pool while alloc_pages() is
+ * called by process A. B will only examine the per-node
+ * counters in determining if surplus huge pages can be
+ * converted to normal huge pages in adjust_pool_surplus(). A
+ * won't be able to increment the per-node counter, until the
+ * lock is dropped by B, but B doesn't drop hugetlb_lock until
+ * no more huge pages can be converted from surplus to normal
+ * state (and doesn't try to convert again). Thus, we have a
+ * case where a surplus huge page exists, the pool is grown, and
+ * the surplus huge page still exists after, even though it
+ * should just have been converted to a normal huge page. This
+ * does not leak memory, though, as the hugepage will be freed
+ * once it is out of use. It also does not allow the counters to
+ * go out of whack in adjust_pool_surplus() as we don't modify
+ * the node values until we've gotten the hugepage and only the
+ * per-node value is checked there.
+ */
+ spin_lock(&hugetlb_lock);
+ if (surplus_huge_pages >= nr_overcommit_huge_pages) {
+ spin_unlock(&hugetlb_lock);
return NULL;
+ } else {
+ nr_huge_pages++;
+ surplus_huge_pages++;
+ }
+ spin_unlock(&hugetlb_lock);
page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
HUGETLB_PAGE_ORDER);
+
+ spin_lock(&hugetlb_lock);
if (page) {
+ nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
- spin_lock(&hugetlb_lock);
- nr_huge_pages++;
- nr_huge_pages_node[page_to_nid(page)]++;
- surplus_huge_pages++;
- surplus_huge_pages_node[page_to_nid(page)]++;
- spin_unlock(&hugetlb_lock);
+ /*
+ * We incremented the global counters already
+ */
+ nr_huge_pages_node[nid]++;
+ surplus_huge_pages_node[nid]++;
+ } else {
+ nr_huge_pages--;
+ surplus_huge_pages--;
}
+ spin_unlock(&hugetlb_lock);
return page;
}
@@ -481,6 +517,12 @@ static unsigned long set_max_huge_pages(unsigned long count)
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
+ *
+ * We might race with alloc_buddy_huge_page() here and be unable
+ * to convert a surplus huge page to a normal huge page. That is
+ * not critical, though, it just means the overall size of the
+ * pool might be one hugepage larger than it needs to be, but
+ * within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (surplus_huge_pages && count > persistent_huge_pages) {
@@ -509,6 +551,14 @@ static unsigned long set_max_huge_pages(unsigned long count)
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
+ *
+ * By placing pages into the surplus state independent of the
+ * overcommit value, we are allowing the surplus pool size to
+ * exceed overcommit. There are few sane options here. Since
+ * alloc_buddy_huge_page() is checking the global counter,
+ * though, we'll note that we're not allowed to exceed surplus
+ * and won't grow the pool anywhere else. Not until one of the
+ * sysctls are changed, or the surplus pages go out of use.
*/
min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
min_count = max(count, min_count);
@@ -907,7 +957,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
- if (!pte || pte_none(*pte)) {
+ if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
int ret;
spin_unlock(&mm->page_table_lock);
diff --git a/mm/mmap.c b/mm/mmap.c
index facc1a75bd4..15678aa6ec7 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -912,6 +912,9 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
if (!len)
return -EINVAL;
+ if (!(flags & MAP_FIXED))
+ addr = round_hint_to_min(addr);
+
error = arch_mmap_check(addr, len, flags);
if (error)
return error;
@@ -1615,6 +1618,12 @@ static inline int expand_downwards(struct vm_area_struct *vma,
*/
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
+
+ address &= PAGE_MASK;
+ error = security_file_mmap(0, 0, 0, 0, address, 1);
+ if (error)
+ return error;
+
anon_vma_lock(vma);
/*
@@ -1622,8 +1631,6 @@ static inline int expand_downwards(struct vm_area_struct *vma,
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
- address &= PAGE_MASK;
- error = 0;
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
@@ -1934,6 +1941,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
if (is_hugepage_only_range(mm, addr, len))
return -EINVAL;
+ error = security_file_mmap(0, 0, 0, 0, addr, 1);
+ if (error)
+ return error;
+
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = arch_mmap_check(addr, len, flags);
diff --git a/mm/nommu.c b/mm/nommu.c
index 35622c59092..b989cb928a7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -829,6 +829,9 @@ unsigned long do_mmap_pgoff(struct file *file,
void *result;
int ret;
+ if (!(flags & MAP_FIXED))
+ addr = round_hint_to_min(addr);
+
/* decide whether we should attempt the mapping, and if so what sort of
* mapping */
ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 12376ae3f73..e1028fae3eb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
int i;
- VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
/*
* clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
* and __GFP_HIGHMEM from hard or soft interrupt context.
@@ -848,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
struct page *page = __rmqueue(zone, order, migratetype);
if (unlikely(page == NULL))
break;
+
+ /*
+ * Split buddy pages returned by expand() are received here
+ * in physical page order. The page is added to the callers and
+ * list and the list head then moves forward. From the callers
+ * perspective, the linked list is ordered by page number in
+ * some conditions. This is useful for IO devices that can
+ * merge IO requests if the physical pages are ordered
+ * properly.
+ */
list_add(&page->lru, list);
set_page_private(page, migratetype);
+ list = &page->lru;
}
spin_unlock(&zone->lock);
return i;
@@ -3266,6 +3276,16 @@ static void inline setup_usemap(struct pglist_data *pgdat,
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+/* Return a sensible default order for the pageblock size. */
+static inline int pageblock_default_order(void)
+{
+ if (HPAGE_SHIFT > PAGE_SHIFT)
+ return HUGETLB_PAGE_ORDER;
+
+ return MAX_ORDER-1;
+}
+
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
static inline void __init set_pageblock_order(unsigned int order)
{
@@ -3281,7 +3301,16 @@ static inline void __init set_pageblock_order(unsigned int order)
}
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */
+/*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+ * and pageblock_default_order() are unused as pageblock_order is set
+ * at compile-time. See include/linux/pageblock-flags.h for the values of
+ * pageblock_order based on the kernel config
+ */
+static inline int pageblock_default_order(unsigned int order)
+{
+ return MAX_ORDER-1;
+}
#define set_pageblock_order(x) do {} while (0)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -3366,7 +3395,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
if (!size)
continue;
- set_pageblock_order(HUGETLB_PAGE_ORDER);
+ set_pageblock_order(pageblock_default_order());
setup_usemap(pgdat, zone, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
@@ -3409,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
mem_map = NODE_DATA(0)->node_mem_map;
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
- mem_map -= pgdat->node_start_pfn;
+ mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
}
#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index dc3be5f5b0d..dbc2ca2057a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -471,11 +471,12 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page);
- if (mapping)
+ if (mapping) {
ret = page_mkclean_file(mapping, page);
- if (page_test_dirty(page)) {
- page_clear_dirty(page);
- ret = 1;
+ if (page_test_dirty(page)) {
+ page_clear_dirty(page);
+ ret = 1;
+ }
}
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 253d205914b..51b3d6ccdda 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1072,7 +1072,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
pvma.vm_pgoff = idx;
pvma.vm_end = PAGE_SIZE;
- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
+ page = alloc_page_vma(gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
@@ -1093,7 +1093,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
static inline struct page *
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
{
- return alloc_page(gfp | __GFP_ZERO);
+ return alloc_page(gfp);
}
#endif
@@ -1306,6 +1306,7 @@ repeat:
info->alloced++;
spin_unlock(&info->lock);
+ clear_highpage(filepage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
}
diff --git a/mm/slab.c b/mm/slab.c
index c31cd3682a0..aebb9f68557 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2881,6 +2881,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
unsigned int objnr;
struct slab *slabp;
+ BUG_ON(virt_to_cache(objp) != cachep);
+
objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
@@ -3759,8 +3761,6 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
- BUG_ON(virt_to_cache(objp) != cachep);
-
local_irq_save(flags);
debug_check_no_locks_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
@@ -4105,7 +4105,7 @@ out:
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
-#ifdef CONFIG_PROC_FS
+#ifdef CONFIG_SLABINFO
static void print_slabinfo_header(struct seq_file *m)
{
@@ -4475,3 +4475,4 @@ size_t ksize(const void *objp)
return obj_size(virt_to_cache(objp));
}
+EXPORT_SYMBOL(ksize);
diff --git a/mm/slob.c b/mm/slob.c
index 08a9bd91a1a..773a7aa80ab 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -330,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp, 0, node);
+ b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return 0;
sp = (struct slob_page *)virt_to_page(b);
@@ -495,6 +495,7 @@ size_t ksize(const void *block)
else
return sp->page.private;
}
+EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
diff --git a/mm/slub.c b/mm/slub.c
index 9acb413858a..474945ecd89 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -172,7 +172,7 @@ static inline void ClearSlabDebug(struct page *page)
* Mininum number of partial slabs. These will be left on the partial
* lists even if they are empty. kmem_cache_shrink may reclaim them.
*/
-#define MIN_PARTIAL 2
+#define MIN_PARTIAL 5
/*
* Maximum number of desirable partial slabs.
@@ -1613,7 +1613,7 @@ checks_ok:
* then add it.
*/
if (unlikely(!prior))
- add_partial(get_node(s, page_to_nid(page)), page);
+ add_partial_tail(get_node(s, page_to_nid(page)), page);
out_unlock:
slab_unlock(page);
@@ -2558,8 +2558,12 @@ size_t ksize(const void *object)
if (unlikely(object == ZERO_SIZE_PTR))
return 0;
- page = get_object_page(object);
+ page = virt_to_head_page(object);
BUG_ON(!page);
+
+ if (unlikely(!PageSlab(page)))
+ return PAGE_SIZE << compound_order(page);
+
s = page->slab;
BUG_ON(!s);
@@ -3072,6 +3076,19 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
return slab_alloc(s, gfpflags, node, caller);
}
+static unsigned long count_partial(struct kmem_cache_node *n)
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += page->inuse;
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
@@ -3454,19 +3471,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
return n;
}
-static unsigned long count_partial(struct kmem_cache_node *n)
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += page->inuse;
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
enum slab_stat_type {
SL_FULL,
SL_PARTIAL,
@@ -4119,3 +4123,89 @@ static int __init slab_sysfs_init(void)
__initcall(slab_sysfs_init);
#endif
+
+/*
+ * The /proc/slabinfo ABI
+ */
+#ifdef CONFIG_SLABINFO
+
+ssize_t slabinfo_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static void print_slabinfo_header(struct seq_file *m)
+{
+ seq_puts(m, "slabinfo - version: 2.1\n");
+ seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ "<objperslab> <pagesperslab>");
+ seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
+ seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
+ seq_putc(m, '\n');
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ down_read(&slub_lock);
+ if (!n)
+ print_slabinfo_header(m);
+
+ return seq_list_start(&slab_caches, *pos);
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ return seq_list_next(p, &slab_caches, pos);
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+ up_read(&slub_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+ unsigned long nr_partials = 0;
+ unsigned long nr_slabs = 0;
+ unsigned long nr_inuse = 0;
+ unsigned long nr_objs;
+ struct kmem_cache *s;
+ int node;
+
+ s = list_entry(p, struct kmem_cache, list);
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(s, node);
+
+ if (!n)
+ continue;
+
+ nr_partials += n->nr_partial;
+ nr_slabs += atomic_long_read(&n->nr_slabs);
+ nr_inuse += count_partial(n);
+ }
+
+ nr_objs = nr_slabs * s->objects;
+ nr_inuse += (nr_slabs - nr_partials) * s->objects;
+
+ seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
+ nr_objs, s->size, s->objects, (1 << s->order));
+ seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
+ seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
+ 0UL);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+const struct seq_operations slabinfo_op = {
+ .start = s_start,
+ .next = s_next,
+ .stop = s_stop,
+ .show = s_show,
+};
+
+#endif /* CONFIG_SLABINFO */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 22620f6a976..cd75b21dd4c 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -34,6 +34,16 @@
* or to back the page tables that are used to create the mapping.
* Uses the main allocators if they are available, else bootmem.
*/
+
+static void * __init_refok __earlyonly_bootmem_alloc(int node,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal)
+{
+ return __alloc_bootmem_node(NODE_DATA(node), size, align, goal);
+}
+
+
void * __meminit vmemmap_alloc_block(unsigned long size, int node)
{
/* If the main allocator is up use that, fallback to bootmem. */
@@ -44,7 +54,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
return page_address(page);
return NULL;
} else
- return __alloc_bootmem_node(NODE_DATA(node), size, size,
+ return __earlyonly_bootmem_alloc(node, size, size,
__pa(MAX_DMA_ADDRESS));
}
diff --git a/mm/sparse.c b/mm/sparse.c
index e06f514fe04..a2183cb5d52 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -83,6 +83,8 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
return -EEXIST;
section = sparse_index_alloc(nid);
+ if (!section)
+ return -ENOMEM;
/*
* This lock keeps two different sections from
* reallocating for the same index
@@ -389,9 +391,17 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
* no locking for this, because it does its own
* plus, it does a kmalloc
*/
- sparse_index_init(section_nr, pgdat->node_id);
+ ret = sparse_index_init(section_nr, pgdat->node_id);
+ if (ret < 0 && ret != -EEXIST)
+ return ret;
memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
+ if (!memmap)
+ return -ENOMEM;
usemap = __kmalloc_section_usemap();
+ if (!usemap) {
+ __kfree_section_memmap(memmap, nr_pages);
+ return -ENOMEM;
+ }
pgdat_resize_lock(pgdat, &flags);
@@ -401,18 +411,16 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
goto out;
}
- if (!usemap) {
- ret = -ENOMEM;
- goto out;
- }
ms->section_mem_map |= SECTION_MARKED_PRESENT;
ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
out:
pgdat_resize_unlock(pgdat, &flags);
- if (ret <= 0)
+ if (ret <= 0) {
+ kfree(usemap);
__kfree_section_memmap(memmap, nr_pages);
+ }
return ret;
}
#endif