diff options
author | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-05-12 13:06:08 +0100 |
---|---|---|
committer | Mark Brown <broonie@opensource.wolfsonmicro.com> | 2012-05-12 13:06:08 +0100 |
commit | 25061d285747f20aafa4b50df1b0b5665fef29cd (patch) | |
tree | c302ac93e3476788a4671ee556a902fce2592f3a /mm | |
parent | 7a6476143270d947924f5bbbc124accb0e558bf4 (diff) | |
parent | 6560ffd1ccd688152393dc7c35dbdcc33140633b (diff) |
Merge tag 'regmap-3.4' into regmap-stride
regmap: Last minute bug fix for 3.4
This is a last minute bug fix that was only just noticed since the code
path that's being exercised here is one that is fairly rarely used. The
changelog for the change itself is extremely clear and the code itself
is obvious to inspection so should be pretty safe.
Conflicts:
drivers/base/regmap/regmap.c (overlap between the fix and stride code)
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/memblock.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 22 | ||||
-rw-r--r-- | mm/mempolicy.c | 11 | ||||
-rw-r--r-- | mm/migrate.c | 16 | ||||
-rw-r--r-- | mm/mmap.c | 59 | ||||
-rw-r--r-- | mm/nobootmem.c | 10 | ||||
-rw-r--r-- | mm/nommu.c | 41 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 18 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
11 files changed, 138 insertions, 56 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b8ce6f45095..5a16423a512 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -532,7 +532,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve) { - struct page *page; + struct page *page = NULL; struct mempolicy *mpol; nodemask_t *nodemask; struct zonelist *zonelist; @@ -2791,6 +2791,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * so no worry about deadlock. */ page = pte_page(entry); + get_page(page); if (page != pagecache_page) lock_page(page); @@ -2822,6 +2823,7 @@ out_page_table_lock: } if (page != pagecache_page) unlock_page(page); + put_page(page); out_mutex: mutex_unlock(&hugetlb_instantiation_mutex); diff --git a/mm/memblock.c b/mm/memblock.c index 99f28559950..a44eab3157f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -330,6 +330,9 @@ static int __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t end = base + memblock_cap_size(base, &size); int i, nr_new; + if (!size) + return 0; + /* special case for empty array */ if (type->regions[0].size == 0) { WARN_ON(type->cnt != 1 || type->total_size); @@ -430,6 +433,9 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, *start_rgn = *end_rgn = 0; + if (!size) + return 0; + /* we'll create at most two more regions */ while (type->cnt + 2 > type->max) if (memblock_double_array(type) < 0) @@ -514,7 +520,6 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) (unsigned long long)base, (unsigned long long)base + size, (void *)_RET_IP_); - BUG_ON(0 == size); return memblock_add_region(_rgn, base, size, MAX_NUMNODES); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7d698df4a06..31ab9c3f017 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2165,7 +2165,7 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, if (action == CPU_ONLINE) return NOTIFY_OK; - if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) + if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) return NOTIFY_OK; for_each_mem_cgroup(iter) @@ -2476,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, struct page *page, unsigned int nr_pages, - struct page_cgroup *pc, enum charge_type ctype, bool lrucare) { + struct page_cgroup *pc = lookup_page_cgroup(page); struct zone *uninitialized_var(zone); bool was_on_lru = false; bool anon; @@ -2716,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, { struct mem_cgroup *memcg = NULL; unsigned int nr_pages = 1; - struct page_cgroup *pc; bool oom = true; int ret; @@ -2730,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, oom = false; } - pc = lookup_page_cgroup(page); ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); if (ret == -ENOMEM) return ret; - __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); + __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); return 0; } @@ -2831,16 +2829,13 @@ static void __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, enum charge_type ctype) { - struct page_cgroup *pc; - if (mem_cgroup_disabled()) return; if (!memcg) return; cgroup_exclude_rmdir(&memcg->css); - pc = lookup_page_cgroup(page); - __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true); + __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); /* * Now swap is on-memory. This means this page may be * counted both as mem and swap....double count. @@ -3298,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page, * page. In the case new page is migrated but not remapped, new page's * mapcount will be finally 0 and we call uncharge in end_migration(). */ - pc = lookup_page_cgroup(newpage); if (PageAnon(page)) ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; else if (page_is_file_cache(page)) ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; else ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; - __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); + __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false); return ret; } @@ -3392,7 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, * the newpage may be on LRU(or pagevec for LRU) already. We lock * LRU while we overwrite pc->mem_cgroup. */ - __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); + __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); } #ifdef CONFIG_DEBUG_VM @@ -3763,7 +3757,7 @@ move_account: goto try_to_free; cond_resched(); /* "ret" should also be checked to ensure all lists are empty. */ - } while (memcg->res.usage > 0 || ret); + } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret); out: css_put(&memcg->css); return ret; @@ -3778,7 +3772,7 @@ try_to_free: lru_add_drain_all(); /* try to free all pages in this cgroup */ shrink = 1; - while (nr_retries && memcg->res.usage > 0) { + while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { int progress; if (signal_pending(current)) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cfb6c867875..b1956913752 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1361,11 +1361,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, mm = get_task_mm(task); put_task_struct(task); - if (mm) - err = do_migrate_pages(mm, old, new, - capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); - else + + if (!mm) { err = -EINVAL; + goto out; + } + + err = do_migrate_pages(mm, old, new, + capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: diff --git a/mm/migrate.c b/mm/migrate.c index 51c08a0c6f6..11072383ae1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1388,14 +1388,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, mm = get_task_mm(task); put_task_struct(task); - if (mm) { - if (nodes) - err = do_pages_move(mm, task_nodes, nr_pages, pages, - nodes, status, flags); - else - err = do_pages_stat(mm, nr_pages, pages, status); - } else - err = -EINVAL; + if (!mm) + return -EINVAL; + + if (nodes) + err = do_pages_move(mm, task_nodes, nr_pages, pages, + nodes, status, flags); + else + err = do_pages_stat(mm, nr_pages, pages, status); mmput(mm); return err; diff --git a/mm/mmap.c b/mm/mmap.c index a7bf6a31c9f..848ef52d960 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -240,6 +240,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } +static unsigned long do_brk(unsigned long addr, unsigned long len); + SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long rlim, retval; @@ -951,7 +953,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) * The caller must hold down_write(¤t->mm->mmap_sem). */ -unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { @@ -1087,7 +1089,32 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, return mmap_region(file, addr, len, flags, vm_flags, pgoff); } -EXPORT_SYMBOL(do_mmap_pgoff); + +unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset & ~PAGE_MASK)) + return -EINVAL; + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); +} +EXPORT_SYMBOL(do_mmap); + +unsigned long vm_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + unsigned long ret; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + ret = do_mmap(file, addr, len, prot, flag, offset); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_mmap); SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, @@ -2105,21 +2132,25 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return 0; } - EXPORT_SYMBOL(do_munmap); -SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +int vm_munmap(unsigned long start, size_t len) { int ret; struct mm_struct *mm = current->mm; - profile_munmap(addr); - down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); + ret = do_munmap(mm, start, len); up_write(&mm->mmap_sem); return ret; } +EXPORT_SYMBOL(vm_munmap); + +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +{ + profile_munmap(addr); + return vm_munmap(addr, len); +} static inline void verify_mm_writelocked(struct mm_struct *mm) { @@ -2136,7 +2167,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -unsigned long do_brk(unsigned long addr, unsigned long len) +static unsigned long do_brk(unsigned long addr, unsigned long len) { struct mm_struct * mm = current->mm; struct vm_area_struct * vma, * prev; @@ -2232,7 +2263,17 @@ out: return addr; } -EXPORT_SYMBOL(do_brk); +unsigned long vm_brk(unsigned long addr, unsigned long len) +{ + struct mm_struct *mm = current->mm; + unsigned long ret; + + down_write(&mm->mmap_sem); + ret = do_brk(addr, len); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 24f0fc1a56d..e53bb8a256b 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -298,13 +298,19 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); +again: ptr = __alloc_memory_core_early(pgdat->node_id, size, align, goal, -1ULL); if (ptr) return ptr; - return __alloc_memory_core_early(MAX_NUMNODES, size, align, - goal, -1ULL); + ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, + goal, -1ULL); + if (!ptr && goal) { + goal = 0; + goto again; + } + return ptr; } void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, diff --git a/mm/nommu.c b/mm/nommu.c index f59e170fceb..bb8f4f004a8 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1233,7 +1233,7 @@ enomem: /* * handle mapping creation for uClinux */ -unsigned long do_mmap_pgoff(struct file *file, +static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1470,7 +1470,32 @@ error_getting_region: show_free_areas(0); return -ENOMEM; } -EXPORT_SYMBOL(do_mmap_pgoff); + +unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset & ~PAGE_MASK)) + return -EINVAL; + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); +} +EXPORT_SYMBOL(do_mmap); + +unsigned long vm_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + unsigned long ret; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + ret = do_mmap(file, addr, len, prot, flag, offset); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_mmap); SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, @@ -1709,16 +1734,22 @@ erase_whole_vma: } EXPORT_SYMBOL(do_munmap); -SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +int vm_munmap(unsigned long addr, size_t len) { - int ret; struct mm_struct *mm = current->mm; + int ret; down_write(&mm->mmap_sem); ret = do_munmap(mm, addr, len); up_write(&mm->mmap_sem); return ret; } +EXPORT_SYMBOL(vm_munmap); + +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +{ + return vm_munmap(addr, len); +} /* * release all the mappings made in a process's VM space @@ -1744,7 +1775,7 @@ void exit_mmap(struct mm_struct *mm) kleave(""); } -unsigned long do_brk(unsigned long addr, unsigned long len) +unsigned long vm_brk(unsigned long addr, unsigned long len) { return -ENOMEM; } diff --git a/mm/swap_state.c b/mm/swap_state.c index 9d3dd3763cf..4c5ff7f284d 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -26,7 +26,7 @@ */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = __set_page_dirty_no_writeback, .migratepage = migrate_page, }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 33c332bbab7..33dc256033b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1568,9 +1568,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, reclaim_stat->recent_scanned[0] += nr_anon; reclaim_stat->recent_scanned[1] += nr_file; - if (current_is_kswapd()) - __count_vm_events(KSWAPD_STEAL, nr_reclaimed); - __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); + if (global_reclaim(sc)) { + if (current_is_kswapd()) + __count_zone_vm_events(PGSTEAL_KSWAPD, zone, + nr_reclaimed); + else + __count_zone_vm_events(PGSTEAL_DIRECT, zone, + nr_reclaimed); + } putback_inactive_pages(mz, &page_list); @@ -2107,12 +2112,7 @@ restart: * with multiple processes reclaiming pages, the total * freeing target can get unreasonably large. */ - if (nr_reclaimed >= nr_to_reclaim) - nr_to_reclaim = 0; - else - nr_to_reclaim -= nr_reclaimed; - - if (!nr_to_reclaim && priority < DEF_PRIORITY) + if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) break; } blk_finish_plug(&plug); diff --git a/mm/vmstat.c b/mm/vmstat.c index f600557a765..7db1b9bab49 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -738,7 +738,8 @@ const char * const vmstat_text[] = { "pgmajfault", TEXTS_FOR_ZONES("pgrefill") - TEXTS_FOR_ZONES("pgsteal") + TEXTS_FOR_ZONES("pgsteal_kswapd") + TEXTS_FOR_ZONES("pgsteal_direct") TEXTS_FOR_ZONES("pgscan_kswapd") TEXTS_FOR_ZONES("pgscan_direct") @@ -747,7 +748,6 @@ const char * const vmstat_text[] = { #endif "pginodesteal", "slabs_scanned", - "kswapd_steal", "kswapd_inodesteal", "kswapd_low_wmark_hit_quickly", "kswapd_high_wmark_hit_quickly", |