diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 1 | ||||
-rw-r--r-- | mm/memcontrol.c | 8 | ||||
-rw-r--r-- | mm/memory.c | 28 | ||||
-rw-r--r-- | mm/mmap.c | 18 | ||||
-rw-r--r-- | mm/nobootmem.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 22 | ||||
-rw-r--r-- | mm/slub.c | 2 |
8 files changed, 50 insertions, 34 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5a16423a512..ae8f708e3d7 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2498,7 +2498,6 @@ retry_avoidcopy: if (outside_reserve) { BUG_ON(huge_pte_none(pte)); if (unmap_ref_private(mm, vma, old_page, address)) { - BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 31ab9c3f017..7685d4a0b3c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4507,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, swap_buffers: /* Swap primary and spare array */ thresholds->spare = thresholds->primary; + /* If all events are unregistered, free the spare array */ + if (!new) { + kfree(thresholds->spare); + thresholds->spare = NULL; + } + rcu_assign_pointer(thresholds->primary, new); /* To be sure that nobody uses thresholds */ @@ -5475,7 +5481,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, * part of thp split is not executed yet. */ if (pmd_trans_huge_lock(pmd, vma) == 1) { - if (!mc.precharge) { + if (mc.precharge < HPAGE_PMD_NR) { spin_unlock(&vma->vm_mm->page_table_lock); return 0; } diff --git a/mm/memory.c b/mm/memory.c index 6105f475fa8..1e77da6d82c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1295,7 +1295,7 @@ static void unmap_page_range(struct mmu_gather *tlb, static void unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, - unsigned long end_addr, unsigned long *nr_accounted, + unsigned long end_addr, struct zap_details *details) { unsigned long start = max(vma->vm_start, start_addr); @@ -1307,9 +1307,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, if (end <= vma->vm_start) return; - if (vma->vm_flags & VM_ACCOUNT) - *nr_accounted += (end - start) >> PAGE_SHIFT; - if (unlikely(is_pfn_mapping(vma))) untrack_pfn_vma(vma, 0, 0); @@ -1339,8 +1336,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, * @vma: the starting vma * @start_addr: virtual address at which to start unmapping * @end_addr: virtual address at which to end unmapping - * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here - * @details: details of nonlinear truncation or shared cache invalidation * * Unmap all pages in the vma list. * @@ -1355,15 +1350,13 @@ static void unmap_single_vma(struct mmu_gather *tlb, */ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, - unsigned long end_addr, unsigned long *nr_accounted, - struct zap_details *details) + unsigned long end_addr) { struct mm_struct *mm = vma->vm_mm; mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) - unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, - details); + unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); } @@ -1376,19 +1369,21 @@ void unmap_vmas(struct mmu_gather *tlb, * * Caller must protect the VMA list */ -void zap_page_range(struct vm_area_struct *vma, unsigned long address, +void zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; - unsigned long end = address + size; - unsigned long nr_accounted = 0; + unsigned long end = start + size; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); - tlb_finish_mmu(&tlb, address, end); + mmu_notifier_invalidate_range_start(mm, start, end); + for ( ; vma && vma->vm_start < end; vma = vma->vm_next) + unmap_single_vma(&tlb, vma, start, end, details); + mmu_notifier_invalidate_range_end(mm, start, end); + tlb_finish_mmu(&tlb, start, end); } /** @@ -1406,13 +1401,12 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; unsigned long end = address + size; - unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, address, end); - unmap_single_vma(&tlb, vma, address, end, &nr_accounted, details); + unmap_single_vma(&tlb, vma, address, end, details); mmu_notifier_invalidate_range_end(mm, address, end); tlb_finish_mmu(&tlb, address, end); } diff --git a/mm/mmap.c b/mm/mmap.c index 848ef52d960..69a1889f379 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1889,15 +1889,20 @@ find_extend_vma(struct mm_struct * mm, unsigned long addr) */ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) { + unsigned long nr_accounted = 0; + /* Update high watermark before we lower total_vm */ update_hiwater_vm(mm); do { long nrpages = vma_pages(vma); + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += nrpages; mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); } while (vma); + vm_unacct_memory(nr_accounted); validate_mm(mm); } @@ -1912,13 +1917,11 @@ static void unmap_region(struct mm_struct *mm, { struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; struct mmu_gather tlb; - unsigned long nr_accounted = 0; lru_add_drain(); tlb_gather_mmu(&tlb, mm, 0); update_hiwater_rss(mm); - unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); - vm_unacct_memory(nr_accounted); + unmap_vmas(&tlb, vma, start, end); free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, next ? next->vm_start : 0); tlb_finish_mmu(&tlb, start, end); @@ -2305,8 +2308,7 @@ void exit_mmap(struct mm_struct *mm) tlb_gather_mmu(&tlb, mm, 1); /* update_hiwater_rss(mm) here? but nobody should be looking */ /* Use -1 here to ensure all VMAs in the mm are unmapped */ - unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); - vm_unacct_memory(nr_accounted); + unmap_vmas(&tlb, vma, 0, -1); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); tlb_finish_mmu(&tlb, 0, -1); @@ -2315,8 +2317,12 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ - while (vma) + while (vma) { + if (vma->vm_flags & VM_ACCOUNT) + nr_accounted += vma_pages(vma); vma = remove_vma(vma); + } + vm_unacct_memory(nr_accounted); BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } diff --git a/mm/nobootmem.c b/mm/nobootmem.c index e53bb8a256b..1983fb1c702 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) static void __init __free_pages_memory(unsigned long start, unsigned long end) { - int i; - unsigned long start_aligned, end_aligned; + unsigned long i, start_aligned, end_aligned; int order = ilog2(BITS_PER_LONG); start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 89b9af2ca26..9f389e50ed1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, int ret; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (!write || (ret == -EINVAL)) + if (!write || (ret < 0)) return ret; for_each_populated_zone(zone) { for_each_possible_cpu(cpu) { diff --git a/mm/percpu.c b/mm/percpu.c index f47af9123af..bb4be7435ce 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl, for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { - printk("\n"); + printk(KERN_CONT "\n"); printk("%spcpu-alloc: ", lvl); } - printk("[%0*d] ", group_width, group); + printk(KERN_CONT "[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) - printk("%0*d ", cpu_width, + printk(KERN_CONT "%0*d ", cpu_width, gi->cpu_map[unit]); else - printk("%s ", empty_str); + printk(KERN_CONT "%s ", empty_str); } } - printk("\n"); + printk(KERN_CONT "\n"); } /** @@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, areas[group] = ptr; base = min(ptr, base); + } + + /* + * Copy data and free unused parts. This should happen after all + * allocations are complete; otherwise, we may end up with + * overlapping groups. + */ + for (group = 0; group < ai->nr_groups; group++) { + struct pcpu_group_info *gi = &ai->groups[group]; + void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { @@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void) fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); + /* kmemleak tracks the percpu allocations separately */ + kmemleak_free(fc); ai->dyn_size = unit_size; ai->unit_size = unit_size; diff --git a/mm/slub.c b/mm/slub.c index ffe13fdf814..80848cd3901 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2040,7 +2040,7 @@ static bool has_cpu_slab(int cpu, void *info) struct kmem_cache *s = info; struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); - return !!(c->page); + return c->page || c->partial; } static void flush_all(struct kmem_cache *s) |