diff options
Diffstat (limited to 'mm')
45 files changed, 1745 insertions, 408 deletions
diff --git a/mm/Makefile b/mm/Makefile index 7a68d2ab556..6c2a73a54a4 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -33,7 +33,11 @@ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o -obj-$(CONFIG_SMP) += percpu.o +ifdef CONFIG_SMP +obj-y += percpu.o +else +obj-y += percpu_up.o +endif obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0e8ca034770..707d0dc6da0 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -11,6 +11,8 @@ #include <linux/writeback.h> #include <linux/device.h> +static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); + void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { } @@ -25,6 +27,11 @@ struct backing_dev_info default_backing_dev_info = { }; EXPORT_SYMBOL_GPL(default_backing_dev_info); +struct backing_dev_info noop_backing_dev_info = { + .name = "noop", +}; +EXPORT_SYMBOL_GPL(noop_backing_dev_info); + static struct class *bdi_class; /* @@ -227,6 +234,9 @@ static struct device_attribute bdi_dev_attrs[] = { static __init int bdi_class_init(void) { bdi_class = class_create(THIS_MODULE, "bdi"); + if (IS_ERR(bdi_class)) + return PTR_ERR(bdi_class); + bdi_class->dev_attrs = bdi_dev_attrs; bdi_debug_init(); return 0; @@ -712,6 +722,33 @@ void bdi_destroy(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_destroy); +/* + * For use from filesystems to quickly init and register a bdi associated + * with dirty writeback + */ +int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, + unsigned int cap) +{ + char tmp[32]; + int err; + + bdi->name = name; + bdi->capabilities = cap; + err = bdi_init(bdi); + if (err) + return err; + + sprintf(tmp, "%.28s%s", name, "-%d"); + err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq)); + if (err) { + bdi_destroy(bdi); + return err; + } + + return 0; +} +EXPORT_SYMBOL(bdi_setup_and_register); + static wait_queue_head_t congestion_wqh[2] = { __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) diff --git a/mm/bootmem.c b/mm/bootmem.c index d7c791ef003..58c66cc5056 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -10,6 +10,7 @@ */ #include <linux/init.h> #include <linux/pfn.h> +#include <linux/slab.h> #include <linux/bootmem.h> #include <linux/module.h> #include <linux/kmemleak.h> @@ -180,19 +181,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) end_aligned = end & ~(BITS_PER_LONG - 1); if (end_aligned <= start_aligned) { -#if 1 - printk(KERN_DEBUG " %lx - %lx\n", start, end); -#endif for (i = start; i < end; i++) __free_pages_bootmem(pfn_to_page(i), 0); return; } -#if 1 - printk(KERN_DEBUG " %lx %lx - %lx %lx\n", - start, start_aligned, end_aligned, end); -#endif for (i = start; i < start_aligned; i++) __free_pages_bootmem(pfn_to_page(i), 0); @@ -310,9 +304,22 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat) unsigned long __init free_all_bootmem(void) { #ifdef CONFIG_NO_BOOTMEM - return free_all_memory_core_early(NODE_DATA(0)->node_id); + /* + * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id + * because in some case like Node0 doesnt have RAM installed + * low ram will be on Node1 + * Use MAX_NUMNODES will make sure all ranges in early_node_map[] + * will be used instead of only Node0 related + */ + return free_all_memory_core_early(MAX_NUMNODES); #else - return free_all_bootmem_core(NODE_DATA(0)->bdata); + unsigned long total_pages = 0; + bootmem_data_t *bdata; + + list_for_each_entry(bdata, &bdata_list, list) + total_pages += free_all_bootmem_core(bdata); + + return total_pages; #endif } @@ -428,9 +435,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, { #ifdef CONFIG_NO_BOOTMEM free_early(physaddr, physaddr + size); -#if 0 - printk(KERN_DEBUG "free %lx %lx\n", physaddr, size); -#endif #else unsigned long start, end; @@ -456,9 +460,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size) { #ifdef CONFIG_NO_BOOTMEM free_early(addr, addr + size); -#if 0 - printk(KERN_DEBUG "free %lx %lx\n", addr, size); -#endif #else unsigned long start, end; diff --git a/mm/bounce.c b/mm/bounce.c index a2b76a588e3..13b6dad1eed 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -6,6 +6,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/swap.h> +#include <linux/gfp.h> #include <linux/bio.h> #include <linux/pagemap.h> #include <linux/mempool.h> diff --git a/mm/failslab.c b/mm/failslab.c index bb41f98dd8b..c5f88f240dd 100644 --- a/mm/failslab.c +++ b/mm/failslab.c @@ -1,5 +1,4 @@ #include <linux/fault-inject.h> -#include <linux/gfp.h> #include <linux/slab.h> static struct { diff --git a/mm/filemap.c b/mm/filemap.c index 045b31c3765..140ebda9640 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -10,13 +10,13 @@ * the NFS filesystem used to do this differently, for example) */ #include <linux/module.h> -#include <linux/slab.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/aio.h> #include <linux/capability.h> #include <linux/kernel_stat.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/mman.h> diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 78b94f0b6d5..83364df74a3 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -17,6 +17,7 @@ #include <linux/sched.h> #include <linux/seqlock.h> #include <linux/mutex.h> +#include <linux/gfp.h> #include <asm/tlbflush.h> #include <asm/io.h> diff --git a/mm/highmem.c b/mm/highmem.c index 9c1e627f282..bed8a8bfd01 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -220,7 +220,7 @@ EXPORT_SYMBOL(kmap_high); * @page: &struct page to pin * * Returns the page's current virtual memory address, or NULL if no mapping - * exists. When and only when a non null address is returned then a + * exists. If and only if a non null address is returned then a * matching call to kunmap_high() is necessary. * * This can be called from any context. diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3a5aeb37c11..ffbdfc86aed 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2,7 +2,6 @@ * Generic hugetlb support. * (C) William Irwin, April 2004 */ -#include <linux/gfp.h> #include <linux/list.h> #include <linux/init.h> #include <linux/module.h> @@ -18,6 +17,7 @@ #include <linux/mutex.h> #include <linux/bootmem.h> #include <linux/sysfs.h> +#include <linux/slab.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -546,6 +546,7 @@ static void free_huge_page(struct page *page) mapping = (struct address_space *) page_private(page); set_page_private(page, 0); + page->mapping = NULL; BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); @@ -2447,8 +2448,10 @@ retry: spin_lock(&inode->i_lock); inode->i_blocks += blocks_per_huge_page(h); spin_unlock(&inode->i_lock); - } else + } else { lock_page(page); + page->mapping = HUGETLB_POISON; + } } /* diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5b069e4f5e4..2c0d032ac89 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -72,7 +72,6 @@ #include <linux/module.h> #include <linux/kthread.h> #include <linux/prio_tree.h> -#include <linux/gfp.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> @@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) do { cond_resched(); page = follow_page(vma, addr, FOLL_GET); - if (!page) + if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) ret = handle_mm_fault(vma->vm_mm, vma, addr, @@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) goto out; page = follow_page(vma, addr, FOLL_GET); - if (!page) + if (IS_ERR_OR_NULL(page)) goto out; if (PageAnon(page)) { flush_anon_page(vma, page, addr); @@ -751,7 +751,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * page */ if (page_mapcount(page) + 1 + swapped != page_count(page)) { - set_pte_at_notify(mm, addr, ptep, entry); + set_pte_at(mm, addr, ptep, entry); goto out_unlock; } entry = pte_wrprotect(entry); @@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, cond_resched(); tree_rmap_item = rb_entry(*new, struct rmap_item, node); tree_page = get_mergeable_page(tree_rmap_item); - if (!tree_page) + if (IS_ERR_OR_NULL(tree_page)) return NULL; /* @@ -1294,7 +1294,7 @@ next_mm: if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (*page && PageAnon(*page)) { + if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, @@ -1308,7 +1308,7 @@ next_mm: up_read(&mm->mmap_sem); return rmap_item; } - if (*page) + if (!IS_ERR_OR_NULL(*page)) put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); @@ -1367,7 +1367,7 @@ next_mm: static void ksm_do_scan(unsigned int scan_npages) { struct rmap_item *rmap_item; - struct page *page; + struct page *uninitialized_var(page); while (scan_npages--) { cond_resched(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d813823ab08..6c755de385f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6,6 +6,10 @@ * Copyright 2007 OpenVZ SWsoft Inc * Author: Pavel Emelianov <xemul@openvz.org> * + * Memory thresholds + * Copyright (C) 2009 Nokia Corporation + * Author: Kirill A. Shutemov + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or @@ -21,6 +25,7 @@ #include <linux/memcontrol.h> #include <linux/cgroup.h> #include <linux/mm.h> +#include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/smp.h> #include <linux/page-flags.h> @@ -32,7 +37,10 @@ #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/swap.h> +#include <linux/swapops.h> #include <linux/spinlock.h> +#include <linux/eventfd.h> +#include <linux/sort.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> @@ -55,7 +63,15 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/ #define do_swap_account (0) #endif -#define SOFTLIMIT_EVENTS_THRESH (1000) +/* + * Per memcg event counter is incremented at every pagein/pageout. This counter + * is used for trigger some periodic events. This is straightforward and better + * than using jiffies etc. to handle periodic memcg event. + * + * These values will be used as !((event) & ((1 <<(thresh)) - 1)) + */ +#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */ +#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */ /* * Statistics for memory cgroup. @@ -69,62 +85,16 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ - MEM_CGROUP_STAT_EVENTS, /* sum of pagein + pageout for internal use */ MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ + MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */ MEM_CGROUP_STAT_NSTATS, }; struct mem_cgroup_stat_cpu { s64 count[MEM_CGROUP_STAT_NSTATS]; -} ____cacheline_aligned_in_smp; - -struct mem_cgroup_stat { - struct mem_cgroup_stat_cpu cpustat[0]; }; -static inline void -__mem_cgroup_stat_reset_safe(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx) -{ - stat->count[idx] = 0; -} - -static inline s64 -__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx) -{ - return stat->count[idx]; -} - -/* - * For accounting under irq disable, no need for increment preempt count. - */ -static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat, - enum mem_cgroup_stat_index idx, int val) -{ - stat->count[idx] += val; -} - -static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, - enum mem_cgroup_stat_index idx) -{ - int cpu; - s64 ret = 0; - for_each_possible_cpu(cpu) - ret += stat->cpustat[cpu].count[idx]; - return ret; -} - -static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat) -{ - s64 ret; - - ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE); - ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS); - return ret; -} - /* * per-zone information in memory controller. */ @@ -174,6 +144,22 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + u64 threshold; +}; + +struct mem_cgroup_threshold_ary { + /* An array index points to threshold just below usage. */ + atomic_t current_threshold; + /* Size of entries[] */ + unsigned int size; + /* Array of thresholds */ + struct mem_cgroup_threshold entries[0]; +}; + +static void mem_cgroup_threshold(struct mem_cgroup *mem); + /* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide @@ -217,7 +203,7 @@ struct mem_cgroup { * Should the accounting and control be hierarchical, per subtree? */ bool use_hierarchy; - unsigned long last_oom_jiffies; + atomic_t oom_lock; atomic_t refcnt; unsigned int swappiness; @@ -225,10 +211,48 @@ struct mem_cgroup { /* set when res.limit == memsw.limit */ bool memsw_is_minimum; + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_threshold_ary *thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_threshold_ary *memsw_thresholds; + /* - * statistics. This must be placed at the end of memcg. + * Should we move charges of a task when a task is moved into this + * mem_cgroup ? And what type of charges should we move ? */ - struct mem_cgroup_stat stat; + unsigned long move_charge_at_immigrate; + + /* + * percpu counter. + */ + struct mem_cgroup_stat_cpu *stat; +}; + +/* Stuffs for move charges at task migration. */ +/* + * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a + * left-shifted bitmap of these types. + */ +enum move_type { + MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */ + NR_MOVE_TYPE, +}; + +/* "mc" and its members are protected by cgroup_mutex */ +static struct move_charge_struct { + struct mem_cgroup *from; + struct mem_cgroup *to; + unsigned long precharge; + unsigned long moved_charge; + unsigned long moved_swap; + struct task_struct *moving_task; /* a task moving charges */ + wait_queue_head_t waitq; /* a waitq for other context */ +} mc = { + .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq), }; /* @@ -371,23 +395,6 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem, spin_unlock(&mctz->lock); } -static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem) -{ - bool ret = false; - int cpu; - s64 val; - struct mem_cgroup_stat_cpu *cpustat; - - cpu = get_cpu(); - cpustat = &mem->stat.cpustat[cpu]; - val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_EVENTS); - if (unlikely(val > SOFTLIMIT_EVENTS_THRESH)) { - __mem_cgroup_stat_reset_safe(cpustat, MEM_CGROUP_STAT_EVENTS); - ret = true; - } - put_cpu(); - return ret; -} static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) { @@ -481,17 +488,31 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) return mz; } +static s64 mem_cgroup_read_stat(struct mem_cgroup *mem, + enum mem_cgroup_stat_index idx) +{ + int cpu; + s64 val = 0; + + for_each_possible_cpu(cpu) + val += per_cpu(mem->stat->count[idx], cpu); + return val; +} + +static s64 mem_cgroup_local_usage(struct mem_cgroup *mem) +{ + s64 ret; + + ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); + ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); + return ret; +} + static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, bool charge) { int val = (charge) ? 1 : -1; - struct mem_cgroup_stat *stat = &mem->stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu = get_cpu(); - - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val); - put_cpu(); + this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); } static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, @@ -499,24 +520,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, bool charge) { int val = (charge) ? 1 : -1; - struct mem_cgroup_stat *stat = &mem->stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu = get_cpu(); - cpustat = &stat->cpustat[cpu]; + preempt_disable(); + if (PageCgroupCache(pc)) - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val); + __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val); else - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val); + __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val); if (charge) - __mem_cgroup_stat_add_safe(cpustat, - MEM_CGROUP_STAT_PGPGIN_COUNT, 1); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); else - __mem_cgroup_stat_add_safe(cpustat, - MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_EVENTS, 1); - put_cpu(); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); + __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]); + + preempt_enable(); } static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, @@ -534,6 +552,29 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, return total; } +static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift) +{ + s64 val; + + val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]); + + return !(val & ((1 << event_mask_shift) - 1)); +} + +/* + * Check events in order. + * + */ +static void memcg_check_events(struct mem_cgroup *mem, struct page *page) +{ + /* threshold event is triggered in finer grain than soft limit */ + if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) { + mem_cgroup_threshold(mem); + if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH))) + mem_cgroup_update_tree(mem, page); + } +} + static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) { return container_of(cgroup_subsys_state(cont, @@ -1000,7 +1041,7 @@ static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data) } /** - * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode. + * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. * @memcg: The memory cgroup that went over limit * @p: Task that is going to be killed * @@ -1174,7 +1215,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, } } } - if (!mem_cgroup_local_usage(&victim->stat)) { + if (!mem_cgroup_local_usage(victim)) { /* this cgroup's local usage == 0 */ css_put(&victim->css); continue; @@ -1205,32 +1246,102 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, return total; } -bool mem_cgroup_oom_called(struct task_struct *task) +static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data) { - bool ret = false; - struct mem_cgroup *mem; - struct mm_struct *mm; + int *val = (int *)data; + int x; + /* + * Logically, we can stop scanning immediately when we find + * a memcg is already locked. But condidering unlock ops and + * creation/removal of memcg, scan-all is simple operation. + */ + x = atomic_inc_return(&mem->oom_lock); + *val = max(x, *val); + return 0; +} +/* + * Check OOM-Killer is already running under our hierarchy. + * If someone is running, return false. + */ +static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) +{ + int lock_count = 0; - rcu_read_lock(); - mm = task->mm; - if (!mm) - mm = &init_mm; - mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10)) - ret = true; - rcu_read_unlock(); - return ret; + mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb); + + if (lock_count == 1) + return true; + return false; } -static int record_last_oom_cb(struct mem_cgroup *mem, void *data) +static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data) { - mem->last_oom_jiffies = jiffies; + /* + * When a new child is created while the hierarchy is under oom, + * mem_cgroup_oom_lock() may not be called. We have to use + * atomic_add_unless() here. + */ + atomic_add_unless(&mem->oom_lock, -1, 0); return 0; } -static void record_last_oom(struct mem_cgroup *mem) +static void mem_cgroup_oom_unlock(struct mem_cgroup *mem) +{ + mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb); +} + +static DEFINE_MUTEX(memcg_oom_mutex); +static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); + +/* + * try to call OOM killer. returns false if we should exit memory-reclaim loop. + */ +bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) { - mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb); + DEFINE_WAIT(wait); + bool locked; + + /* At first, try to OOM lock hierarchy under mem.*/ + mutex_lock(&memcg_oom_mutex); + locked = mem_cgroup_oom_lock(mem); + /* + * Even if signal_pending(), we can't quit charge() loop without + * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL + * under OOM is always welcomed, use TASK_KILLABLE here. + */ + if (!locked) + prepare_to_wait(&memcg_oom_waitq, &wait, TASK_KILLABLE); + mutex_unlock(&memcg_oom_mutex); + + if (locked) + mem_cgroup_out_of_memory(mem, mask); + else { + schedule(); + finish_wait(&memcg_oom_waitq, &wait); + } + mutex_lock(&memcg_oom_mutex); + mem_cgroup_oom_unlock(mem); + /* + * Here, we use global waitq .....more fine grained waitq ? + * Assume following hierarchy. + * A/ + * 01 + * 02 + * assume OOM happens both in A and 01 at the same time. Tthey are + * mutually exclusive by lock. (kill in 01 helps A.) + * When we use per memcg waitq, we have to wake up waiters on A and 02 + * in addtion to waiters on 01. We use global waitq for avoiding mess. + * It will not be a big problem. + * (And a task may be moved to other groups while it's waiting for OOM.) + */ + wake_up_all(&memcg_oom_waitq); + mutex_unlock(&memcg_oom_mutex); + + if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current)) + return false; + /* Give chance to dying process */ + schedule_timeout(1); + return true; } /* @@ -1240,9 +1351,6 @@ static void record_last_oom(struct mem_cgroup *mem) void mem_cgroup_update_file_mapped(struct page *page, int val) { struct mem_cgroup *mem; - struct mem_cgroup_stat *stat; - struct mem_cgroup_stat_cpu *cpustat; - int cpu; struct page_cgroup *pc; pc = lookup_page_cgroup(page); @@ -1251,20 +1359,20 @@ void mem_cgroup_update_file_mapped(struct page *page, int val) lock_page_cgroup(pc); mem = pc->mem_cgroup; - if (!mem) - goto done; - - if (!PageCgroupUsed(pc)) + if (!mem || !PageCgroupUsed(pc)) goto done; /* - * Preemption is already disabled, we don't need get_cpu() + * Preemption is already disabled. We can use __this_cpu_xxx */ - cpu = smp_processor_id(); - stat = &mem->stat; - cpustat = &stat->cpustat[cpu]; + if (val > 0) { + __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + SetPageCgroupFileMapped(pc); + } else { + __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + ClearPageCgroupFileMapped(pc); + } - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val); done: unlock_page_cgroup(pc); } @@ -1401,19 +1509,21 @@ static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb, * oom-killer can be invoked. */ static int __mem_cgroup_try_charge(struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcg, - bool oom, struct page *page) + gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom) { struct mem_cgroup *mem, *mem_over_limit; int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct res_counter *fail_res; int csize = CHARGE_SIZE; - if (unlikely(test_thread_flag(TIF_MEMDIE))) { - /* Don't account this! */ - *memcg = NULL; - return 0; - } + /* + * Unlike gloval-vm's OOM-kill, we're not in memory shortage + * in system level. So, allow to go ahead dying process in addition to + * MEMDIE process. + */ + if (unlikely(test_thread_flag(TIF_MEMDIE) + || fatal_signal_pending(current))) + goto bypass; /* * We always charge the cgroup the mm_struct belongs to. @@ -1440,7 +1550,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, unsigned long flags = 0; if (consume_stock(mem)) - goto charged; + goto done; ret = res_counter_charge(&mem->res, csize, &fail_res); if (likely(!ret)) { @@ -1483,28 +1593,70 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, if (mem_cgroup_check_under_limit(mem_over_limit)) continue; + /* try to avoid oom while someone is moving charge */ + if (mc.moving_task && current != mc.moving_task) { + struct mem_cgroup *from, *to; + bool do_continue = false; + /* + * There is a small race that "from" or "to" can be + * freed by rmdir, so we use css_tryget(). + */ + rcu_read_lock(); + from = mc.from; + to = mc.to; + if (from && css_tryget(&from->css)) { + if (mem_over_limit->use_hierarchy) + do_continue = css_is_ancestor( + &from->css, + &mem_over_limit->css); + else + do_continue = (from == mem_over_limit); + css_put(&from->css); + } + if (!do_continue && to && css_tryget(&to->css)) { + if (mem_over_limit->use_hierarchy) + do_continue = css_is_ancestor( + &to->css, + &mem_over_limit->css); + else + do_continue = (to == mem_over_limit); + css_put(&to->css); + } + rcu_read_unlock(); + if (do_continue) { + DEFINE_WAIT(wait); + prepare_to_wait(&mc.waitq, &wait, + TASK_INTERRUPTIBLE); + /* moving charge context might have finished. */ + if (mc.moving_task) + schedule(); + finish_wait(&mc.waitq, &wait); + continue; + } + } + if (!nr_retries--) { - if (oom) { - mem_cgroup_out_of_memory(mem_over_limit, gfp_mask); - record_last_oom(mem_over_limit); + if (!oom) + goto nomem; + if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) { + nr_retries = MEM_CGROUP_RECLAIM_RETRIES; + continue; } - goto nomem; + /* When we reach here, current task is dying .*/ + css_put(&mem->css); + goto bypass; } } if (csize > PAGE_SIZE) refill_stock(mem, csize - PAGE_SIZE); -charged: - /* - * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. - * if they exceeds softlimit. - */ - if (mem_cgroup_soft_limit_check(mem)) - mem_cgroup_update_tree(mem, page); done: return 0; nomem: css_put(&mem->css); return -ENOMEM; +bypass: + *memcg = NULL; + return 0; } /* @@ -1512,14 +1664,23 @@ nomem: * This function is for that and do uncharge, put css's refcnt. * gotten by try_charge(). */ -static void mem_cgroup_cancel_charge(struct mem_cgroup *mem) +static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem, + unsigned long count) { if (!mem_cgroup_is_root(mem)) { - res_counter_uncharge(&mem->res, PAGE_SIZE); + res_counter_uncharge(&mem->res, PAGE_SIZE * count); if (do_swap_account) - res_counter_uncharge(&mem->memsw, PAGE_SIZE); + res_counter_uncharge(&mem->memsw, PAGE_SIZE * count); + VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags)); + WARN_ON_ONCE(count > INT_MAX); + __css_put(&mem->css, (int)count); } - css_put(&mem->css); + /* we don't need css_put for root */ +} + +static void mem_cgroup_cancel_charge(struct mem_cgroup *mem) +{ + __mem_cgroup_cancel_charge(mem, 1); } /* @@ -1615,6 +1776,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, mem_cgroup_charge_statistics(mem, pc, true); unlock_page_cgroup(pc); + /* + * "charge_statistics" updated event counter. Then, check it. + * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. + * if they exceeds softlimit. + */ + memcg_check_events(mem, pc->page); } /** @@ -1622,61 +1789,48 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, * @pc: page_cgroup of the page. * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. + * @uncharge: whether we should call uncharge and css_put against @from. * * The caller must confirm following. * - page is not on LRU (isolate_page() is useful.) * - the pc is locked, used, and ->mem_cgroup points to @from. * - * This function does "uncharge" from old cgroup but doesn't do "charge" to - * new cgroup. It should be done by a caller. + * This function doesn't do "charge" nor css_get to new cgroup. It should be + * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is + * true, this function does "uncharge" from old cgroup, but it doesn't if + * @uncharge is false, so a caller should do "uncharge". */ static void __mem_cgroup_move_account(struct page_cgroup *pc, - struct mem_cgroup *from, struct mem_cgroup *to) + struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) { - struct page *page; - int cpu; - struct mem_cgroup_stat *stat; - struct mem_cgroup_stat_cpu *cpustat; - VM_BUG_ON(from == to); VM_BUG_ON(PageLRU(pc->page)); VM_BUG_ON(!PageCgroupLocked(pc)); VM_BUG_ON(!PageCgroupUsed(pc)); VM_BUG_ON(pc->mem_cgroup != from); - if (!mem_cgroup_is_root(from)) - res_counter_uncharge(&from->res, PAGE_SIZE); - mem_cgroup_charge_statistics(from, pc, false); - - page = pc->page; - if (page_mapped(page) && !PageAnon(page)) { - cpu = smp_processor_id(); - /* Update mapped_file data for mem_cgroup "from" */ - stat = &from->stat; - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, - -1); - - /* Update mapped_file data for mem_cgroup "to" */ - stat = &to->stat; - cpustat = &stat->cpustat[cpu]; - __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, - 1); + if (PageCgroupFileMapped(pc)) { + /* Update mapped_file data for mem_cgroup */ + preempt_disable(); + __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); + preempt_enable(); } + mem_cgroup_charge_statistics(from, pc, false); + if (uncharge) + /* This is not "cancel", but cancel_charge does all we need. */ + mem_cgroup_cancel_charge(from); - if (do_swap_account && !mem_cgroup_is_root(from)) - res_counter_uncharge(&from->memsw, PAGE_SIZE); - css_put(&from->css); - - css_get(&to->css); + /* caller should have done css_get */ pc->mem_cgroup = to; mem_cgroup_charge_statistics(to, pc, true); /* * We charges against "to" which may not have any tasks. Then, "to" * can be under rmdir(). But in current implementation, caller of - * this function is just force_empty() and it's garanteed that - * "to" is never removed. So, we don't check rmdir status here. + * this function is just force_empty() and move charge, so it's + * garanteed that "to" is never removed. So, we don't check rmdir + * status here. */ } @@ -1685,15 +1839,20 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc, * __mem_cgroup_move_account() */ static int mem_cgroup_move_account(struct page_cgroup *pc, - struct mem_cgroup *from, struct mem_cgroup *to) + struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge) { int ret = -EINVAL; lock_page_cgroup(pc); if (PageCgroupUsed(pc) && pc->mem_cgroup == from) { - __mem_cgroup_move_account(pc, from, to); + __mem_cgroup_move_account(pc, from, to, uncharge); ret = 0; } unlock_page_cgroup(pc); + /* + * check events + */ + memcg_check_events(to, pc->page); + memcg_check_events(from, pc->page); return ret; } @@ -1722,15 +1881,13 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc, goto put; parent = mem_cgroup_from_cont(pcg); - ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page); + ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false); if (ret || !parent) goto put_back; - ret = mem_cgroup_move_account(pc, child, parent); - if (!ret) - css_put(&parent->css); /* drop extra refcnt by try_charge() */ - else - mem_cgroup_cancel_charge(parent); /* does css_put */ + ret = mem_cgroup_move_account(pc, child, parent, true); + if (ret) + mem_cgroup_cancel_charge(parent); put_back: putback_lru_page(page); put: @@ -1760,7 +1917,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, prefetchw(pc); mem = memcg; - ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page); + ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true); if (ret || !mem) return ret; @@ -1880,14 +2037,14 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm, if (!mem) goto charge_cur_mm; *ptr = mem; - ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page); + ret = __mem_cgroup_try_charge(NULL, mask, ptr, true); /* drop extra refcnt from tryget */ css_put(&mem->css); return ret; charge_cur_mm: if (unlikely(!mm)) mm = &init_mm; - return __mem_cgroup_try_charge(mm, mask, ptr, true, page); + return __mem_cgroup_try_charge(mm, mask, ptr, true); } static void @@ -2064,8 +2221,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) mz = page_cgroup_zoneinfo(pc); unlock_page_cgroup(pc); - if (mem_cgroup_soft_limit_check(mem)) - mem_cgroup_update_tree(mem, page); + memcg_check_events(mem, page); /* at swapout, this memcg will be accessed to record to swap */ if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) css_put(&mem->css); @@ -2192,6 +2348,64 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent) } rcu_read_unlock(); } + +/** + * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record. + * @entry: swap entry to be moved + * @from: mem_cgroup which the entry is moved from + * @to: mem_cgroup which the entry is moved to + * @need_fixup: whether we should fixup res_counters and refcounts. + * + * It succeeds only when the swap_cgroup's record for this entry is the same + * as the mem_cgroup's id of @from. + * + * Returns 0 on success, -EINVAL on failure. + * + * The caller must have charged to @to, IOW, called res_counter_charge() about + * both res and memsw, and called css_get(). + */ +static int mem_cgroup_move_swap_account(swp_entry_t entry, + struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) +{ + unsigned short old_id, new_id; + + old_id = css_id(&from->css); + new_id = css_id(&to->css); + + if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { + mem_cgroup_swap_statistics(from, false); + mem_cgroup_swap_statistics(to, true); + /* + * This function is only called from task migration context now. + * It postpones res_counter and refcount handling till the end + * of task migration(mem_cgroup_clear_mc()) for performance + * improvement. But we cannot postpone mem_cgroup_get(to) + * because if the process that has been moved to @to does + * swap-in, the refcount of @to might be decreased to 0. + */ + mem_cgroup_get(to); + if (need_fixup) { + if (!mem_cgroup_is_root(from)) + res_counter_uncharge(&from->memsw, PAGE_SIZE); + mem_cgroup_put(from); + /* + * we charged both to->res and to->memsw, so we should + * uncharge to->res. + */ + if (!mem_cgroup_is_root(to)) + res_counter_uncharge(&to->res, PAGE_SIZE); + css_put(&to->css); + } + return 0; + } + return -EINVAL; +} +#else +static inline int mem_cgroup_move_swap_account(swp_entry_t entry, + struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) +{ + return -EINVAL; +} #endif /* @@ -2215,12 +2429,11 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) } unlock_page_cgroup(pc); + *ptr = mem; if (mem) { - ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false, - page); + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false); css_put(&mem->css); } - *ptr = mem; return ret; } @@ -2704,7 +2917,7 @@ static int mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data) { struct mem_cgroup_idx_data *d = data; - d->val += mem_cgroup_read_stat(&mem->stat, d->idx); + d->val += mem_cgroup_read_stat(mem, d->idx); return 0; } @@ -2719,40 +2932,50 @@ mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem, *val = d.val; } +static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) +{ + u64 idx_val, val; + + if (!mem_cgroup_is_root(mem)) { + if (!swap) + return res_counter_read_u64(&mem->res, RES_USAGE); + else + return res_counter_read_u64(&mem->memsw, RES_USAGE); + } + + mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val); + val = idx_val; + mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val); + val += idx_val; + + if (swap) { + mem_cgroup_get_recursive_idx_stat(mem, + MEM_CGROUP_STAT_SWAPOUT, &idx_val); + val += idx_val; + } + + return val << PAGE_SHIFT; +} + static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) { struct mem_cgroup *mem = mem_cgroup_from_cont(cont); - u64 idx_val, val; + u64 val; int type, name; type = MEMFILE_TYPE(cft->private); name = MEMFILE_ATTR(cft->private); switch (type) { case _MEM: - if (name == RES_USAGE && mem_cgroup_is_root(mem)) { - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_CACHE, &idx_val); - val = idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_RSS, &idx_val); - val += idx_val; - val <<= PAGE_SHIFT; - } else + if (name == RES_USAGE) + val = mem_cgroup_usage(mem, false); + else val = res_counter_read_u64(&mem->res, name); break; case _MEMSWAP: - if (name == RES_USAGE && mem_cgroup_is_root(mem)) { - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_CACHE, &idx_val); - val = idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_RSS, &idx_val); - val += idx_val; - mem_cgroup_get_recursive_idx_stat(mem, - MEM_CGROUP_STAT_SWAPOUT, &idx_val); - val += idx_val; - val <<= PAGE_SHIFT; - } else + if (name == RES_USAGE) + val = mem_cgroup_usage(mem, true); + else val = res_counter_read_u64(&mem->memsw, name); break; default: @@ -2865,6 +3088,39 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) return 0; } +static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp, + struct cftype *cft) +{ + return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate; +} + +#ifdef CONFIG_MMU +static int mem_cgroup_move_charge_write(struct cgroup *cgrp, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp); + + if (val >= (1 << NR_MOVE_TYPE)) + return -EINVAL; + /* + * We check this value several times in both in can_attach() and + * attach(), so we need cgroup lock to prevent this value from being + * inconsistent. + */ + cgroup_lock(); + mem->move_charge_at_immigrate = val; + cgroup_unlock(); + + return 0; +} +#else +static int mem_cgroup_move_charge_write(struct cgroup *cgrp, + struct cftype *cft, u64 val) +{ + return -ENOSYS; +} +#endif + /* For read statistics */ enum { @@ -2910,18 +3166,18 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data) s64 val; /* per cpu stat */ - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE); s->stat[MCS_CACHE] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS); s->stat[MCS_RSS] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED); s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT); s->stat[MCS_PGPGIN] += val; - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT); s->stat[MCS_PGPGOUT] += val; if (do_swap_account) { - val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT); + val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); s->stat[MCS_SWAP] += val * PAGE_SIZE; } @@ -3049,12 +3305,249 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft, return 0; } +static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) +{ + struct mem_cgroup_threshold_ary *t; + u64 usage; + int i; + + rcu_read_lock(); + if (!swap) + t = rcu_dereference(memcg->thresholds); + else + t = rcu_dereference(memcg->memsw_thresholds); + + if (!t) + goto unlock; + + usage = mem_cgroup_usage(memcg, swap); + + /* + * current_threshold points to threshold just below usage. + * If it's not true, a threshold was crossed after last + * call of __mem_cgroup_threshold(). + */ + i = atomic_read(&t->current_threshold); + + /* + * Iterate backward over array of thresholds starting from + * current_threshold and check if a threshold is crossed. + * If none of thresholds below usage is crossed, we read + * only one element of the array here. + */ + for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) + eventfd_signal(t->entries[i].eventfd, 1); + + /* i = current_threshold + 1 */ + i++; + + /* + * Iterate forward over array of thresholds starting from + * current_threshold+1 and check if a threshold is crossed. + * If none of thresholds above usage is crossed, we read + * only one element of the array here. + */ + for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) + eventfd_signal(t->entries[i].eventfd, 1); + + /* Update current_threshold */ + atomic_set(&t->current_threshold, i - 1); +unlock: + rcu_read_unlock(); +} + +static void mem_cgroup_threshold(struct mem_cgroup *memcg) +{ + __mem_cgroup_threshold(memcg, false); + if (do_swap_account) + __mem_cgroup_threshold(memcg, true); +} + +static int compare_thresholds(const void *a, const void *b) +{ + const struct mem_cgroup_threshold *_a = a; + const struct mem_cgroup_threshold *_b = b; + + return _a->threshold - _b->threshold; +} + +static int mem_cgroup_register_event(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd, const char *args) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; + int type = MEMFILE_TYPE(cft->private); + u64 threshold, usage; + int size; + int i, ret; + + ret = res_counter_memparse_write_strategy(args, &threshold); + if (ret) + return ret; + + mutex_lock(&memcg->thresholds_lock); + if (type == _MEM) + thresholds = memcg->thresholds; + else if (type == _MEMSWAP) + thresholds = memcg->memsw_thresholds; + else + BUG(); + + usage = mem_cgroup_usage(memcg, type == _MEMSWAP); + + /* Check if a threshold crossed before adding a new one */ + if (thresholds) + __mem_cgroup_threshold(memcg, type == _MEMSWAP); + + if (thresholds) + size = thresholds->size + 1; + else + size = 1; + + /* Allocate memory for new array of thresholds */ + thresholds_new = kmalloc(sizeof(*thresholds_new) + + size * sizeof(struct mem_cgroup_threshold), + GFP_KERNEL); + if (!thresholds_new) { + ret = -ENOMEM; + goto unlock; + } + thresholds_new->size = size; + + /* Copy thresholds (if any) to new array */ + if (thresholds) + memcpy(thresholds_new->entries, thresholds->entries, + thresholds->size * + sizeof(struct mem_cgroup_threshold)); + /* Add new threshold */ + thresholds_new->entries[size - 1].eventfd = eventfd; + thresholds_new->entries[size - 1].threshold = threshold; + + /* Sort thresholds. Registering of new threshold isn't time-critical */ + sort(thresholds_new->entries, size, + sizeof(struct mem_cgroup_threshold), + compare_thresholds, NULL); + + /* Find current threshold */ + atomic_set(&thresholds_new->current_threshold, -1); + for (i = 0; i < size; i++) { + if (thresholds_new->entries[i].threshold < usage) { + /* + * thresholds_new->current_threshold will not be used + * until rcu_assign_pointer(), so it's safe to increment + * it here. + */ + atomic_inc(&thresholds_new->current_threshold); + } + } + + if (type == _MEM) + rcu_assign_pointer(memcg->thresholds, thresholds_new); + else + rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); + + /* To be sure that nobody uses thresholds before freeing it */ + synchronize_rcu(); + + kfree(thresholds); +unlock: + mutex_unlock(&memcg->thresholds_lock); + + return ret; +} + +static int mem_cgroup_unregister_event(struct cgroup *cgrp, struct cftype *cft, + struct eventfd_ctx *eventfd) +{ + struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); + struct mem_cgroup_threshold_ary *thresholds, *thresholds_new; + int type = MEMFILE_TYPE(cft->private); + u64 usage; + int size = 0; + int i, j, ret; + + mutex_lock(&memcg->thresholds_lock); + if (type == _MEM) + thresholds = memcg->thresholds; + else if (type == _MEMSWAP) + thresholds = memcg->memsw_thresholds; + else + BUG(); + + /* + * Something went wrong if we trying to unregister a threshold + * if we don't have thresholds + */ + BUG_ON(!thresholds); + + usage = mem_cgroup_usage(memcg, type == _MEMSWAP); + + /* Check if a threshold crossed before removing */ + __mem_cgroup_threshold(memcg, type == _MEMSWAP); + + /* Calculate new number of threshold */ + for (i = 0; i < thresholds->size; i++) { + if (thresholds->entries[i].eventfd != eventfd) + size++; + } + + /* Set thresholds array to NULL if we don't have thresholds */ + if (!size) { + thresholds_new = NULL; + goto assign; + } + + /* Allocate memory for new array of thresholds */ + thresholds_new = kmalloc(sizeof(*thresholds_new) + + size * sizeof(struct mem_cgroup_threshold), + GFP_KERNEL); + if (!thresholds_new) { + ret = -ENOMEM; + goto unlock; + } + thresholds_new->size = size; + + /* Copy thresholds and find current threshold */ + atomic_set(&thresholds_new->current_threshold, -1); + for (i = 0, j = 0; i < thresholds->size; i++) { + if (thresholds->entries[i].eventfd == eventfd) + continue; + + thresholds_new->entries[j] = thresholds->entries[i]; + if (thresholds_new->entries[j].threshold < usage) { + /* + * thresholds_new->current_threshold will not be used + * until rcu_assign_pointer(), so it's safe to increment + * it here. + */ + atomic_inc(&thresholds_new->current_threshold); + } + j++; + } + +assign: + if (type == _MEM) + rcu_assign_pointer(memcg->thresholds, thresholds_new); + else + rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new); + + /* To be sure that nobody uses thresholds before freeing it */ + synchronize_rcu(); + + kfree(thresholds); +unlock: + mutex_unlock(&memcg->thresholds_lock); + + return ret; +} static struct cftype mem_cgroup_files[] = { { .name = "usage_in_bytes", .private = MEMFILE_PRIVATE(_MEM, RES_USAGE), .read_u64 = mem_cgroup_read, + .register_event = mem_cgroup_register_event, + .unregister_event = mem_cgroup_unregister_event, }, { .name = "max_usage_in_bytes", @@ -3098,6 +3591,11 @@ static struct cftype mem_cgroup_files[] = { .read_u64 = mem_cgroup_swappiness_read, .write_u64 = mem_cgroup_swappiness_write, }, + { + .name = "move_charge_at_immigrate", + .read_u64 = mem_cgroup_move_charge_read, + .write_u64 = mem_cgroup_move_charge_write, + }, }; #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP @@ -3106,6 +3604,8 @@ static struct cftype memsw_cgroup_files[] = { .name = "memsw.usage_in_bytes", .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE), .read_u64 = mem_cgroup_read, + .register_event = mem_cgroup_register_event, + .unregister_event = mem_cgroup_unregister_event, }, { .name = "memsw.max_usage_in_bytes", @@ -3180,24 +3680,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) kfree(mem->info.nodeinfo[node]); } -static int mem_cgroup_size(void) -{ - int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu); - return sizeof(struct mem_cgroup) + cpustat_size; -} - static struct mem_cgroup *mem_cgroup_alloc(void) { struct mem_cgroup *mem; - int size = mem_cgroup_size(); + int size = sizeof(struct mem_cgroup); + /* Can be very big if MAX_NUMNODES is very big */ if (size < PAGE_SIZE) mem = kmalloc(size, GFP_KERNEL); else mem = vmalloc(size); - if (mem) - memset(mem, 0, size); + if (!mem) + return NULL; + + memset(mem, 0, size); + mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); + if (!mem->stat) { + if (size < PAGE_SIZE) + kfree(mem); + else + vfree(mem); + mem = NULL; + } return mem; } @@ -3222,7 +3727,8 @@ static void __mem_cgroup_free(struct mem_cgroup *mem) for_each_node_state(node, N_POSSIBLE) free_mem_cgroup_per_zone_info(mem, node); - if (mem_cgroup_size() < PAGE_SIZE) + free_percpu(mem->stat); + if (sizeof(struct mem_cgroup) < PAGE_SIZE) kfree(mem); else vfree(mem); @@ -3233,9 +3739,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem) atomic_inc(&mem->refcnt); } -static void mem_cgroup_put(struct mem_cgroup *mem) +static void __mem_cgroup_put(struct mem_cgroup *mem, int count) { - if (atomic_dec_and_test(&mem->refcnt)) { + if (atomic_sub_and_test(count, &mem->refcnt)) { struct mem_cgroup *parent = parent_mem_cgroup(mem); __mem_cgroup_free(mem); if (parent) @@ -3243,6 +3749,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem) } } +static void mem_cgroup_put(struct mem_cgroup *mem) +{ + __mem_cgroup_put(mem, 1); +} + /* * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled. */ @@ -3319,7 +3830,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) INIT_WORK(&stock->work, drain_local_stock); } hotcpu_notifier(memcg_stock_cpu_callback, 0); - } else { parent = mem_cgroup_from_cont(cont->parent); mem->use_hierarchy = parent->use_hierarchy; @@ -3345,6 +3855,8 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) if (parent) mem->swappiness = get_swappiness(parent); atomic_set(&mem->refcnt, 1); + mem->move_charge_at_immigrate = 0; + mutex_init(&mem->thresholds_lock); return &mem->css; free_out: __mem_cgroup_free(mem); @@ -3381,17 +3893,445 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss, return ret; } +#ifdef CONFIG_MMU +/* Handlers for move charge at task migration. */ +#define PRECHARGE_COUNT_AT_ONCE 256 +static int mem_cgroup_do_precharge(unsigned long count) +{ + int ret = 0; + int batch_count = PRECHARGE_COUNT_AT_ONCE; + struct mem_cgroup *mem = mc.to; + + if (mem_cgroup_is_root(mem)) { + mc.precharge += count; + /* we don't need css_get for root */ + return ret; + } + /* try to charge at once */ + if (count > 1) { + struct res_counter *dummy; + /* + * "mem" cannot be under rmdir() because we've already checked + * by cgroup_lock_live_cgroup() that it is not removed and we + * are still under the same cgroup_mutex. So we can postpone + * css_get(). + */ + if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy)) + goto one_by_one; + if (do_swap_account && res_counter_charge(&mem->memsw, + PAGE_SIZE * count, &dummy)) { + res_counter_uncharge(&mem->res, PAGE_SIZE * count); + goto one_by_one; + } + mc.precharge += count; + VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags)); + WARN_ON_ONCE(count > INT_MAX); + __css_get(&mem->css, (int)count); + return ret; + } +one_by_one: + /* fall back to one by one charge */ + while (count--) { + if (signal_pending(current)) { + ret = -EINTR; + break; + } + if (!batch_count--) { + batch_count = PRECHARGE_COUNT_AT_ONCE; + cond_resched(); + } + ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false); + if (ret || !mem) + /* mem_cgroup_clear_mc() will do uncharge later */ + return -ENOMEM; + mc.precharge++; + } + return ret; +} + +/** + * is_target_pte_for_mc - check a pte whether it is valid for move charge + * @vma: the vma the pte to be checked belongs + * @addr: the address corresponding to the pte to be checked + * @ptent: the pte to be checked + * @target: the pointer the target page or swap ent will be stored(can be NULL) + * + * Returns + * 0(MC_TARGET_NONE): if the pte is not a target for move charge. + * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for + * move charge. if @target is not NULL, the page is stored in target->page + * with extra refcnt got(Callers should handle it). + * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a + * target for charge migration. if @target is not NULL, the entry is stored + * in target->ent. + * + * Called with pte lock held. + */ +union mc_target { + struct page *page; + swp_entry_t ent; +}; + +enum mc_target_type { + MC_TARGET_NONE, /* not used */ + MC_TARGET_PAGE, + MC_TARGET_SWAP, +}; + +static int is_target_pte_for_mc(struct vm_area_struct *vma, + unsigned long addr, pte_t ptent, union mc_target *target) +{ + struct page *page = NULL; + struct page_cgroup *pc; + int ret = 0; + swp_entry_t ent = { .val = 0 }; + int usage_count = 0; + bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON, + &mc.to->move_charge_at_immigrate); + + if (!pte_present(ptent)) { + /* TODO: handle swap of shmes/tmpfs */ + if (pte_none(ptent) || pte_file(ptent)) + return 0; + else if (is_swap_pte(ptent)) { + ent = pte_to_swp_entry(ptent); + if (!move_anon || non_swap_entry(ent)) + return 0; + usage_count = mem_cgroup_count_swap_user(ent, &page); + } + } else { + page = vm_normal_page(vma, addr, ptent); + if (!page || !page_mapped(page)) + return 0; + /* + * TODO: We don't move charges of file(including shmem/tmpfs) + * pages for now. + */ + if (!move_anon || !PageAnon(page)) + return 0; + if (!get_page_unless_zero(page)) + return 0; + usage_count = page_mapcount(page); + } + if (usage_count > 1) { + /* + * TODO: We don't move charges of shared(used by multiple + * processes) pages for now. + */ + if (page) + put_page(page); + return 0; + } + if (page) { + pc = lookup_page_cgroup(page); + /* + * Do only loose check w/o page_cgroup lock. + * mem_cgroup_move_account() checks the pc is valid or not under + * the lock. + */ + if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + ret = MC_TARGET_PAGE; + if (target) + target->page = page; + } + if (!ret || !target) + put_page(page); + } + /* throught */ + if (ent.val && do_swap_account && !ret && + css_id(&mc.from->css) == lookup_swap_cgroup(ent)) { + ret = MC_TARGET_SWAP; + if (target) + target->ent = ent; + } + return ret; +} + +static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->private; + pte_t *pte; + spinlock_t *ptl; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) + if (is_target_pte_for_mc(vma, addr, *pte, NULL)) + mc.precharge++; /* increment precharge temporarily */ + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + + return 0; +} + +static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) +{ + unsigned long precharge; + struct vm_area_struct *vma; + + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + struct mm_walk mem_cgroup_count_precharge_walk = { + .pmd_entry = mem_cgroup_count_precharge_pte_range, + .mm = mm, + .private = vma, + }; + if (is_vm_hugetlb_page(vma)) + continue; + /* TODO: We don't move charges of shmem/tmpfs pages for now. */ + if (vma->vm_flags & VM_SHARED) + continue; + walk_page_range(vma->vm_start, vma->vm_end, + &mem_cgroup_count_precharge_walk); + } + up_read(&mm->mmap_sem); + + precharge = mc.precharge; + mc.precharge = 0; + + return precharge; +} + +static int mem_cgroup_precharge_mc(struct mm_struct *mm) +{ + return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm)); +} + +static void mem_cgroup_clear_mc(void) +{ + /* we must uncharge all the leftover precharges from mc.to */ + if (mc.precharge) { + __mem_cgroup_cancel_charge(mc.to, mc.precharge); + mc.precharge = 0; + } + /* + * we didn't uncharge from mc.from at mem_cgroup_move_account(), so + * we must uncharge here. + */ + if (mc.moved_charge) { + __mem_cgroup_cancel_charge(mc.from, mc.moved_charge); + mc.moved_charge = 0; + } + /* we must fixup refcnts and charges */ + if (mc.moved_swap) { + WARN_ON_ONCE(mc.moved_swap > INT_MAX); + /* uncharge swap account from the old cgroup */ + if (!mem_cgroup_is_root(mc.from)) + res_counter_uncharge(&mc.from->memsw, + PAGE_SIZE * mc.moved_swap); + __mem_cgroup_put(mc.from, mc.moved_swap); + + if (!mem_cgroup_is_root(mc.to)) { + /* + * we charged both to->res and to->memsw, so we should + * uncharge to->res. + */ + res_counter_uncharge(&mc.to->res, + PAGE_SIZE * mc.moved_swap); + VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags)); + __css_put(&mc.to->css, mc.moved_swap); + } + /* we've already done mem_cgroup_get(mc.to) */ + + mc.moved_swap = 0; + } + mc.from = NULL; + mc.to = NULL; + mc.moving_task = NULL; + wake_up_all(&mc.waitq); +} + +static int mem_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + int ret = 0; + struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); + + if (mem->move_charge_at_immigrate) { + struct mm_struct *mm; + struct mem_cgroup *from = mem_cgroup_from_task(p); + + VM_BUG_ON(from == mem); + + mm = get_task_mm(p); + if (!mm) + return 0; + /* We move charges only when we move a owner of the mm */ + if (mm->owner == p) { + VM_BUG_ON(mc.from); + VM_BUG_ON(mc.to); + VM_BUG_ON(mc.precharge); + VM_BUG_ON(mc.moved_charge); + VM_BUG_ON(mc.moved_swap); + VM_BUG_ON(mc.moving_task); + mc.from = from; + mc.to = mem; + mc.precharge = 0; + mc.moved_charge = 0; + mc.moved_swap = 0; + mc.moving_task = current; + + ret = mem_cgroup_precharge_mc(mm); + if (ret) + mem_cgroup_clear_mc(); + } + mmput(mm); + } + return ret; +} + +static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + mem_cgroup_clear_mc(); +} + +static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + int ret = 0; + struct vm_area_struct *vma = walk->private; + pte_t *pte; + spinlock_t *ptl; + +retry: + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; addr += PAGE_SIZE) { + pte_t ptent = *(pte++); + union mc_target target; + int type; + struct page *page; + struct page_cgroup *pc; + swp_entry_t ent; + + if (!mc.precharge) + break; + + type = is_target_pte_for_mc(vma, addr, ptent, &target); + switch (type) { + case MC_TARGET_PAGE: + page = target.page; + if (isolate_lru_page(page)) + goto put; + pc = lookup_page_cgroup(page); + if (!mem_cgroup_move_account(pc, + mc.from, mc.to, false)) { + mc.precharge--; + /* we uncharge from mc.from later. */ + mc.moved_charge++; + } + putback_lru_page(page); +put: /* is_target_pte_for_mc() gets the page */ + put_page(page); + break; + case MC_TARGET_SWAP: + ent = target.ent; + if (!mem_cgroup_move_swap_account(ent, + mc.from, mc.to, false)) { + mc.precharge--; + /* we fixup refcnts and charges later. */ + mc.moved_swap++; + } + break; + default: + break; + } + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + + if (addr != end) { + /* + * We have consumed all precharges we got in can_attach(). + * We try charge one by one, but don't do any additional + * charges to mc.to if we have failed in charge once in attach() + * phase. + */ + ret = mem_cgroup_do_precharge(1); + if (!ret) + goto retry; + } + + return ret; +} + +static void mem_cgroup_move_charge(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + lru_add_drain_all(); + down_read(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) { + int ret; + struct mm_walk mem_cgroup_move_charge_walk = { + .pmd_entry = mem_cgroup_move_charge_pte_range, + .mm = mm, + .private = vma, + }; + if (is_vm_hugetlb_page(vma)) + continue; + /* TODO: We don't move charges of shmem/tmpfs pages for now. */ + if (vma->vm_flags & VM_SHARED) + continue; + ret = walk_page_range(vma->vm_start, vma->vm_end, + &mem_cgroup_move_charge_walk); + if (ret) + /* + * means we have consumed all precharges and failed in + * doing additional charge. Just abandon here. + */ + break; + } + up_read(&mm->mmap_sem); +} + static void mem_cgroup_move_task(struct cgroup_subsys *ss, struct cgroup *cont, struct cgroup *old_cont, struct task_struct *p, bool threadgroup) { - /* - * FIXME: It's better to move charges of this process from old - * memcg to new memcg. But it's just on TODO-List now. - */ + struct mm_struct *mm; + + if (!mc.to) + /* no need to move charge */ + return; + + mm = get_task_mm(p); + if (mm) { + mem_cgroup_move_charge(mm); + mmput(mm); + } + mem_cgroup_clear_mc(); } +#else /* !CONFIG_MMU */ +static int mem_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ + return 0; +} +static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, + struct cgroup *cgroup, + struct task_struct *p, + bool threadgroup) +{ +} +static void mem_cgroup_move_task(struct cgroup_subsys *ss, + struct cgroup *cont, + struct cgroup *old_cont, + struct task_struct *p, + bool threadgroup) +{ +} +#endif struct cgroup_subsys mem_cgroup_subsys = { .name = "memory", @@ -3400,6 +4340,8 @@ struct cgroup_subsys mem_cgroup_subsys = { .pre_destroy = mem_cgroup_pre_destroy, .destroy = mem_cgroup_destroy, .populate = mem_cgroup_populate, + .can_attach = mem_cgroup_can_attach, + .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, .early_init = 0, .use_id = 1, diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d1f33516297..620b0b46159 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -44,6 +44,7 @@ #include <linux/migrate.h> #include <linux/page-isolation.h> #include <linux/suspend.h> +#include <linux/slab.h> #include "internal.h" int sysctl_memory_failure_early_kill __read_mostly = 0; diff --git a/mm/memory.c b/mm/memory.c index d1153e37e9b..833952d8b74 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -56,6 +56,7 @@ #include <linux/kallsyms.h> #include <linux/swapops.h> #include <linux/elf.h> +#include <linux/gfp.h> #include <asm/io.h> #include <asm/pgalloc.h> @@ -124,7 +125,7 @@ core_initcall(init_zero_pfn); #if defined(SPLIT_RSS_COUNTING) -void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) +static void __sync_task_rss_stat(struct task_struct *task, struct mm_struct *mm) { int i; @@ -190,9 +191,6 @@ static void check_sync_rss_stat(struct task_struct *task) { } -void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) -{ -} #endif /* @@ -512,12 +510,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd)); - if (page) { - printk(KERN_ALERT - "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", - page, (void *)page->flags, page_count(page), - page_mapcount(page), page->mapping, page->index); - } + if (page) + dump_page(page); printk(KERN_ALERT "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 78e34e63c7b..be211a58293 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -688,9 +688,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (page_count(page)) not_managed++; #ifdef CONFIG_DEBUG_VM - printk(KERN_INFO "removing from LRU failed" - " %lx/%d/%lx\n", - pfn, page_count(page), page->flags); + printk(KERN_ALERT "removing pfn %lx from LRU failed\n", + pfn); + dump_page(page); #endif } } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bda230e52ac..08f40a2f3fe 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -73,7 +73,6 @@ #include <linux/sched.h> #include <linux/nodemask.h> #include <linux/cpuset.h> -#include <linux/gfp.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/module.h> @@ -806,9 +805,13 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, err = 0; if (nmask) { - task_lock(current); - get_policy_nodemask(pol, nmask); - task_unlock(current); + if (mpol_store_user_nodemask(pol)) { + *nmask = pol->w.user_nodemask; + } else { + task_lock(current); + get_policy_nodemask(pol, nmask); + task_unlock(current); + } } out: @@ -1756,10 +1759,12 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) if (!new) return ERR_PTR(-ENOMEM); + rcu_read_lock(); if (current_cpuset_is_being_rebound()) { nodemask_t mems = cpuset_mems_allowed(current); mpol_rebind_policy(old, &mems); } + rcu_read_unlock(); *new = *old; atomic_set(&new->refcnt, 1); return new; @@ -2193,8 +2198,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) char *rest = nodelist; while (isdigit(*rest)) rest++; - if (!*rest) - err = 0; + if (*rest) + goto out; } break; case MPOL_INTERLEAVE: @@ -2203,7 +2208,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) */ if (!nodelist) nodes = node_states[N_HIGH_MEMORY]; - err = 0; break; case MPOL_LOCAL: /* @@ -2213,11 +2217,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) goto out; mode = MPOL_PREFERRED; break; - - /* - * case MPOL_BIND: mpol_new() enforces non-empty nodemask. - * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. - */ + case MPOL_DEFAULT: + /* + * Insist on a empty nodelist + */ + if (!nodelist) + err = 0; + goto out; + case MPOL_BIND: + /* + * Insist on a nodelist + */ + if (!nodelist) + goto out; } mode_flags = 0; @@ -2231,13 +2243,14 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) else if (!strcmp(flags, "relative")) mode_flags |= MPOL_F_RELATIVE_NODES; else - err = 1; + goto out; } new = mpol_new(mode, mode_flags, &nodes); if (IS_ERR(new)) - err = 1; - else { + goto out; + + { int ret; NODEMASK_SCRATCH(scratch); if (scratch) { @@ -2248,13 +2261,15 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) ret = -ENOMEM; NODEMASK_SCRATCH_FREE(scratch); if (ret) { - err = 1; mpol_put(new); - } else if (no_context) { - /* save for contextualization */ - new->w.user_nodemask = nodes; + goto out; } } + err = 0; + if (no_context) { + /* save for contextualization */ + new->w.user_nodemask = nodes; + } out: /* Restore string for error message */ diff --git a/mm/migrate.c b/mm/migrate.c index 88000b89fc9..d3f3f7f8107 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -32,6 +32,7 @@ #include <linux/security.h> #include <linux/memcontrol.h> #include <linux/syscalls.h> +#include <linux/gfp.h> #include "internal.h" diff --git a/mm/mincore.c b/mm/mincore.c index 7a3436ef39e..f77433c2027 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -7,8 +7,8 @@ /* * The mincore() system call. */ -#include <linux/slab.h> #include <linux/pagemap.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/syscalls.h> diff --git a/mm/mmap.c b/mm/mmap.c index f1b4448626b..456ec6f2788 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -507,11 +507,12 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, struct address_space *mapping = NULL; struct prio_tree_root *root = NULL; struct file *file = vma->vm_file; - struct anon_vma *anon_vma = NULL; long adjust_next = 0; int remove_next = 0; if (next && !insert) { + struct vm_area_struct *exporter = NULL; + if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and @@ -519,7 +520,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start, */ again: remove_next = 1 + (end > next->vm_end); end = next->vm_end; - anon_vma = next->anon_vma; + exporter = next; importer = vma; } else if (end > next->vm_start) { /* @@ -527,7 +528,7 @@ again: remove_next = 1 + (end > next->vm_end); * mprotect case 5 shifting the boundary up. */ adjust_next = (end - next->vm_start) >> PAGE_SHIFT; - anon_vma = next->anon_vma; + exporter = next; importer = vma; } else if (end < vma->vm_end) { /* @@ -536,28 +537,19 @@ again: remove_next = 1 + (end > next->vm_end); * mprotect case 4 shifting the boundary down. */ adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); - anon_vma = next->anon_vma; + exporter = vma; importer = next; } - } - /* - * When changing only vma->vm_end, we don't really need anon_vma lock. - */ - if (vma->anon_vma && (insert || importer || start != vma->vm_start)) - anon_vma = vma->anon_vma; - if (anon_vma) { /* * Easily overlooked: when mprotect shifts the boundary, * make sure the expanding vma has anon_vma set if the * shrinking vma had, to cover any anon pages imported. */ - if (importer && !importer->anon_vma) { - /* Block reverse map lookups until things are set up. */ - if (anon_vma_clone(importer, vma)) { + if (exporter && exporter->anon_vma && !importer->anon_vma) { + if (anon_vma_clone(importer, exporter)) return -ENOMEM; - } - importer->anon_vma = anon_vma; + importer->anon_vma = exporter->anon_vma; } } @@ -825,6 +817,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, } /* + * Rough compatbility check to quickly see if it's even worth looking + * at sharing an anon_vma. + * + * They need to have the same vm_file, and the flags can only differ + * in things that mprotect may change. + * + * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that + * we can merge the two vma's. For example, we refuse to merge a vma if + * there is a vm_ops->close() function, because that indicates that the + * driver is doing some kind of reference counting. But that doesn't + * really matter for the anon_vma sharing case. + */ +static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) +{ + return a->vm_end == b->vm_start && + mpol_equal(vma_policy(a), vma_policy(b)) && + a->vm_file == b->vm_file && + !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) && + b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); +} + +/* + * Do some basic sanity checking to see if we can re-use the anon_vma + * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be + * the same as 'old', the other will be the new one that is trying + * to share the anon_vma. + * + * NOTE! This runs with mm_sem held for reading, so it is possible that + * the anon_vma of 'old' is concurrently in the process of being set up + * by another page fault trying to merge _that_. But that's ok: if it + * is being set up, that automatically means that it will be a singleton + * acceptable for merging, so we can do all of this optimistically. But + * we do that ACCESS_ONCE() to make sure that we never re-load the pointer. + * + * IOW: that the "list_is_singular()" test on the anon_vma_chain only + * matters for the 'stable anon_vma' case (ie the thing we want to avoid + * is to return an anon_vma that is "complex" due to having gone through + * a fork). + * + * We also make sure that the two vma's are compatible (adjacent, + * and with the same memory policies). That's all stable, even with just + * a read lock on the mm_sem. + */ +static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b) +{ + if (anon_vma_compatible(a, b)) { + struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma); + + if (anon_vma && list_is_singular(&old->anon_vma_chain)) + return anon_vma; + } + return NULL; +} + +/* * find_mergeable_anon_vma is used by anon_vma_prepare, to check * neighbouring vmas for a suitable anon_vma, before it goes off * to allocate a new anon_vma. It checks because a repetitive @@ -834,28 +881,16 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, */ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) { + struct anon_vma *anon_vma; struct vm_area_struct *near; - unsigned long vm_flags; near = vma->vm_next; if (!near) goto try_prev; - /* - * Since only mprotect tries to remerge vmas, match flags - * which might be mprotected into each other later on. - * Neither mlock nor madvise tries to remerge at present, - * so leave their flags as obstructing a merge. - */ - vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); - - if (near->anon_vma && vma->vm_end == near->vm_start && - mpol_equal(vma_policy(vma), vma_policy(near)) && - can_vma_merge_before(near, vm_flags, - NULL, vma->vm_file, vma->vm_pgoff + - ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) - return near->anon_vma; + anon_vma = reusable_anon_vma(near, vma, near); + if (anon_vma) + return anon_vma; try_prev: /* * It is potentially slow to have to call find_vma_prev here. @@ -868,14 +903,9 @@ try_prev: if (!near) goto none; - vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); - vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); - - if (near->anon_vma && near->vm_end == vma->vm_start && - mpol_equal(vma_policy(near), vma_policy(vma)) && - can_vma_merge_after(near, vm_flags, - NULL, vma->vm_file, vma->vm_pgoff)) - return near->anon_vma; + anon_vma = reusable_anon_vma(near, near, vma); + if (anon_vma) + return anon_vma; none: /* * There's no absolute need to look only at touching neighbours: @@ -1088,6 +1118,30 @@ out: return retval; } +#ifdef __ARCH_WANT_SYS_OLD_MMAP +struct mmap_arg_struct { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) +{ + struct mmap_arg_struct a; + + if (copy_from_user(&a, arg, sizeof(a))) + return -EFAULT; + if (a.offset & ~PAGE_MASK) + return -EINVAL; + + return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); +} +#endif /* __ARCH_WANT_SYS_OLD_MMAP */ + /* * Some shared mappigns will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot @@ -1923,7 +1977,8 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, return 0; /* Clean everything up if vma_adjust failed. */ - new->vm_ops->close(new); + if (new->vm_ops && new->vm_ops->close) + new->vm_ops->close(new); if (new->vm_file) { if (vma->vm_flags & VM_EXECUTABLE) removed_exe_file_vma(mm); diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 0777654147c..9e82e937000 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -53,6 +53,7 @@ void unuse_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + sync_mm_rss(tsk, mm); tsk->mm = NULL; /* active_mm is still 'mm' */ enter_lazy_tlb(mm, tsk); diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 7e33f2cb3c7..438951d366f 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -16,6 +16,7 @@ #include <linux/err.h> #include <linux/rcupdate.h> #include <linux/sched.h> +#include <linux/slab.h> /* * This function can't run concurrently against mmu_notifier_register diff --git a/mm/mprotect.c b/mm/mprotect.c index 8bc969d8112..2d1bf7cf885 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -10,7 +10,6 @@ #include <linux/mm.h> #include <linux/hugetlb.h> -#include <linux/slab.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> diff --git a/mm/mremap.c b/mm/mremap.c index e9c75efce60..cde56ee51ef 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <linux/hugetlb.h> -#include <linux/slab.h> #include <linux/shm.h> #include <linux/ksm.h> #include <linux/mman.h> diff --git a/mm/nommu.c b/mm/nommu.c index b9b5cceb1b6..63fa17d121f 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -146,7 +146,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { - vma = find_extend_vma(mm, start); + vma = find_vma(mm, start); if (!vma) goto finish_or_fault; @@ -162,7 +162,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, } if (vmas) vmas[i] = vma; - start += PAGE_SIZE; + start = (start + PAGE_SIZE) & PAGE_MASK; } return i; @@ -764,7 +764,7 @@ EXPORT_SYMBOL(find_vma); */ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) { - return find_vma(mm, addr & PAGE_MASK); + return find_vma(mm, addr); } /* @@ -1040,10 +1040,9 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) if (ret != -ENOSYS) return ret; - /* getting an ENOSYS error indicates that direct mmap isn't - * possible (as opposed to tried but failed) so we'll fall - * through to making a private copy of the data and mapping - * that if we can */ + /* getting -ENOSYS indicates that direct mmap isn't possible (as + * opposed to tried but failed) so we can only give a suitable error as + * it's not possible to make a private copy if MAP_SHARED was given */ return -ENODEV; } @@ -1428,6 +1427,30 @@ out: return retval; } +#ifdef __ARCH_WANT_SYS_OLD_MMAP +struct mmap_arg_struct { + unsigned long addr; + unsigned long len; + unsigned long prot; + unsigned long flags; + unsigned long fd; + unsigned long offset; +}; + +SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) +{ + struct mmap_arg_struct a; + + if (copy_from_user(&a, arg, sizeof(a))) + return -EFAULT; + if (a.offset & ~PAGE_MASK) + return -EINVAL; + + return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, + a.offset >> PAGE_SHIFT); +} +#endif /* __ARCH_WANT_SYS_OLD_MMAP */ + /* * split a vma into two pieces at address 'addr', a new vma is allocated either * for the first part or the tail. diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 35755a4156d..b68e802a7a7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -18,6 +18,7 @@ #include <linux/oom.h> #include <linux/mm.h> #include <linux/err.h> +#include <linux/gfp.h> #include <linux/sched.h> #include <linux/swap.h> #include <linux/timex.h> @@ -473,6 +474,8 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) unsigned long points = 0; struct task_struct *p; + if (sysctl_panic_on_oom == 2) + panic("out of memory(memcg). panic_on_oom is selected.\n"); read_lock(&tasklist_lock); retry: p = select_bad_process(&points, mem); @@ -601,13 +604,6 @@ void pagefault_out_of_memory(void) /* Got some memory back in the last second. */ return; - /* - * If this is from memcg, oom-killer is already invoked. - * and not worth to go system-wide-oom. - */ - if (mem_cgroup_oom_called(current)) - goto rest_and_return; - if (sysctl_panic_on_oom) panic("out of memory from page fault. panic_on_oom is selected.\n"); @@ -619,7 +615,6 @@ void pagefault_out_of_memory(void) * Give "p" a good chance of killing itself before we * retry to allocate memory. */ -rest_and_return: if (!test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a8182c89de5..d03c946d556 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -50,6 +50,7 @@ #include <linux/kmemleak.h> #include <linux/memory.h> #include <trace/events/kmem.h> +#include <linux/ftrace_event.h> #include <asm/tlbflush.h> #include <asm/div64.h> @@ -288,10 +289,7 @@ static void bad_page(struct page *page) printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); - printk(KERN_ALERT - "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n", - page, (void *)page->flags, page_count(page), - page_mapcount(page), page->mapping, page->index); + dump_page(page); dump_stack(); out: @@ -3224,7 +3222,7 @@ static int __zone_pcp_update(void *data) int cpu; unsigned long batch = zone_batchsize(zone), flags; - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_possible_cpu(cpu) { struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; @@ -5183,3 +5181,80 @@ bool is_free_buddy_page(struct page *page) return order < MAX_ORDER; } #endif + +static struct trace_print_flags pageflag_names[] = { + {1UL << PG_locked, "locked" }, + {1UL << PG_error, "error" }, + {1UL << PG_referenced, "referenced" }, + {1UL << PG_uptodate, "uptodate" }, + {1UL << PG_dirty, "dirty" }, + {1UL << PG_lru, "lru" }, + {1UL << PG_active, "active" }, + {1UL << PG_slab, "slab" }, + {1UL << PG_owner_priv_1, "owner_priv_1" }, + {1UL << PG_arch_1, "arch_1" }, + {1UL << PG_reserved, "reserved" }, + {1UL << PG_private, "private" }, + {1UL << PG_private_2, "private_2" }, + {1UL << PG_writeback, "writeback" }, +#ifdef CONFIG_PAGEFLAGS_EXTENDED + {1UL << PG_head, "head" }, + {1UL << PG_tail, "tail" }, +#else + {1UL << PG_compound, "compound" }, +#endif + {1UL << PG_swapcache, "swapcache" }, + {1UL << PG_mappedtodisk, "mappedtodisk" }, + {1UL << PG_reclaim, "reclaim" }, + {1UL << PG_buddy, "buddy" }, + {1UL << PG_swapbacked, "swapbacked" }, + {1UL << PG_unevictable, "unevictable" }, +#ifdef CONFIG_MMU + {1UL << PG_mlocked, "mlocked" }, +#endif +#ifdef CONFIG_ARCH_USES_PG_UNCACHED + {1UL << PG_uncached, "uncached" }, +#endif +#ifdef CONFIG_MEMORY_FAILURE + {1UL << PG_hwpoison, "hwpoison" }, +#endif + {-1UL, NULL }, +}; + +static void dump_page_flags(unsigned long flags) +{ + const char *delim = ""; + unsigned long mask; + int i; + + printk(KERN_ALERT "page flags: %#lx(", flags); + + /* remove zone id */ + flags &= (1UL << NR_PAGEFLAGS) - 1; + + for (i = 0; pageflag_names[i].name && flags; i++) { + + mask = pageflag_names[i].mask; + if ((flags & mask) != mask) + continue; + + flags &= ~mask; + printk("%s%s", delim, pageflag_names[i].name); + delim = "|"; + } + + /* check for left over flags */ + if (flags) + printk("%s%#lx", delim, flags); + + printk(")\n"); +} + +void dump_page(struct page *page) +{ + printk(KERN_ALERT + "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", + page, page_count(page), page_mapcount(page), + page->mapping, page->index); + dump_page_flags(page->flags); +} diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 3d535d59482..6c0081441a3 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -284,6 +284,7 @@ static DEFINE_MUTEX(swap_cgroup_mutex); struct swap_cgroup_ctrl { struct page **map; unsigned long length; + spinlock_t lock; }; struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; @@ -335,6 +336,43 @@ not_enough_page: } /** + * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. + * @end: swap entry to be cmpxchged + * @old: old id + * @new: new id + * + * Returns old id at success, 0 at failure. + * (There is no mem_cgroup useing 0 as its id) + */ +unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new) +{ + int type = swp_type(ent); + unsigned long offset = swp_offset(ent); + unsigned long idx = offset / SC_PER_PAGE; + unsigned long pos = offset & SC_POS_MASK; + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + unsigned long flags; + unsigned short retval; + + ctrl = &swap_cgroup_ctrl[type]; + + mappage = ctrl->map[idx]; + sc = page_address(mappage); + sc += pos; + spin_lock_irqsave(&ctrl->lock, flags); + retval = sc->id; + if (retval == old) + sc->id = new; + else + retval = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + return retval; +} + +/** * swap_cgroup_record - record mem_cgroup for this swp_entry. * @ent: swap entry to be recorded into * @mem: mem_cgroup to be recorded @@ -352,14 +390,17 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) struct page *mappage; struct swap_cgroup *sc; unsigned short old; + unsigned long flags; ctrl = &swap_cgroup_ctrl[type]; mappage = ctrl->map[idx]; sc = page_address(mappage); sc += pos; + spin_lock_irqsave(&ctrl->lock, flags); old = sc->id; sc->id = id; + spin_unlock_irqrestore(&ctrl->lock, flags); return old; } @@ -411,6 +452,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; + spin_lock_init(&ctrl->lock); if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL; diff --git a/mm/page_io.c b/mm/page_io.c index a19af956ee1..31a3b962230 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/kernel_stat.h> +#include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/bio.h> diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7b47a57b664..8b1a2ce21ee 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -80,6 +80,37 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, return err; } +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr, + unsigned long end) +{ + unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h); + return boundary < end ? boundary : end; +} + +static int walk_hugetlb_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + struct hstate *h = hstate_vma(vma); + unsigned long next; + unsigned long hmask = huge_page_mask(h); + pte_t *pte; + int err = 0; + + do { + next = hugetlb_entry_end(h, addr, end); + pte = huge_pte_offset(walk->mm, addr & hmask); + if (pte && walk->hugetlb_entry) + err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + if (err) + return err; + } while (addr = next, addr != end); + + return 0; +} +#endif + /** * walk_page_range - walk a memory map's page tables with a callback * @mm: memory map to walk @@ -128,20 +159,16 @@ int walk_page_range(unsigned long addr, unsigned long end, vma = find_vma(walk->mm, addr); #ifdef CONFIG_HUGETLB_PAGE if (vma && is_vm_hugetlb_page(vma)) { - pte_t *pte; - struct hstate *hs; - if (vma->vm_end < next) next = vma->vm_end; - hs = hstate_vma(vma); - pte = huge_pte_offset(walk->mm, - addr & huge_page_mask(hs)); - if (pte && !huge_pte_none(huge_ptep_get(pte)) - && walk->hugetlb_entry) - err = walk->hugetlb_entry(pte, addr, - next, walk); + /* + * Hugepage is very tightly coupled with vma, so + * walk through hugetlb entries within a given vma. + */ + err = walk_hugetlb_range(vma, addr, next, walk); if (err) break; + pgd = pgd_offset(walk->mm, next); continue; } #endif diff --git a/mm/percpu.c b/mm/percpu.c index 768419d44ad..6e09741ddc6 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1304,6 +1304,32 @@ void free_percpu(void __percpu *ptr) EXPORT_SYMBOL_GPL(free_percpu); /** + * is_kernel_percpu_address - test whether address is from static percpu area + * @addr: address to test + * + * Test whether @addr belongs to in-kernel static percpu area. Module + * static percpu areas are not considered. For those, use + * is_module_percpu_address(). + * + * RETURNS: + * %true if @addr is from in-kernel static percpu area, %false otherwise. + */ +bool is_kernel_percpu_address(unsigned long addr) +{ + const size_t static_size = __per_cpu_end - __per_cpu_start; + void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); + unsigned int cpu; + + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(base, cpu); + + if ((void *)addr >= start && (void *)addr < start + static_size) + return true; + } + return false; +} + +/** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * diff --git a/mm/percpu_up.c b/mm/percpu_up.c new file mode 100644 index 00000000000..c4351c7f57d --- /dev/null +++ b/mm/percpu_up.c @@ -0,0 +1,30 @@ +/* + * mm/percpu_up.c - dummy percpu memory allocator implementation for UP + */ + +#include <linux/module.h> +#include <linux/percpu.h> +#include <linux/slab.h> + +void __percpu *__alloc_percpu(size_t size, size_t align) +{ + /* + * Can't easily make larger alignment work with kmalloc. WARN + * on it. Larger alignment should only be used for module + * percpu sections on SMP for which this path isn't used. + */ + WARN_ON_ONCE(align > SMP_CACHE_BYTES); + return kzalloc(size, GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(__alloc_percpu); + +void free_percpu(void __percpu *p) +{ + kfree(p); +} +EXPORT_SYMBOL_GPL(free_percpu); + +phys_addr_t per_cpu_ptr_to_phys(void *addr) +{ + return __pa(addr); +} diff --git a/mm/quicklist.c b/mm/quicklist.c index 6633965bb27..2876349339a 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c @@ -14,6 +14,7 @@ */ #include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/module.h> diff --git a/mm/readahead.c b/mm/readahead.c index 337b20e946f..dfa9a1a03a1 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/fs.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/blkdev.h> @@ -502,7 +503,7 @@ void page_cache_sync_readahead(struct address_space *mapping, return; /* be dumb */ - if (filp->f_mode & FMODE_RANDOM) { + if (filp && (filp->f_mode & FMODE_RANDOM)) { force_page_cache_readahead(mapping, filp, offset, req_size); return; } diff --git a/mm/rmap.c b/mm/rmap.c index fcd593c9c99..07fc9475879 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -133,8 +133,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) goto out_enomem_free_avc; allocated = anon_vma; } - spin_lock(&anon_vma->lock); + spin_lock(&anon_vma->lock); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { @@ -144,14 +144,15 @@ int anon_vma_prepare(struct vm_area_struct *vma) list_add(&avc->same_vma, &vma->anon_vma_chain); list_add(&avc->same_anon_vma, &anon_vma->head); allocated = NULL; + avc = NULL; } spin_unlock(&mm->page_table_lock); - spin_unlock(&anon_vma->lock); - if (unlikely(allocated)) { + + if (unlikely(allocated)) anon_vma_free(allocated); + if (unlikely(avc)) anon_vma_chain_free(avc); - } } return 0; @@ -182,7 +183,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; - list_for_each_entry(pavc, &src->anon_vma_chain, same_vma) { + list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { avc = anon_vma_chain_alloc(); if (!avc) goto enomem_failure; @@ -232,6 +233,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) out_error_free_anon_vma: anon_vma_free(anon_vma); out_error: + unlink_anon_vmas(vma); return -ENOMEM; } @@ -729,13 +731,29 @@ void page_move_anon_rmap(struct page *page, * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped + * @exclusive: the page is exclusively owned by the current process */ static void __page_set_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) + struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); + + /* + * If the page isn't exclusively mapped into this vma, + * we must use the _oldest_ possible anon_vma for the + * page mapping! + * + * So take the last AVC chain entry in the vma, which is + * the deepest ancestor, and use the anon_vma from that. + */ + if (!exclusive) { + struct anon_vma_chain *avc; + avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); + anon_vma = avc->anon_vma; + } + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); @@ -790,7 +808,7 @@ void page_add_anon_rmap(struct page *page, VM_BUG_ON(!PageLocked(page)); VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); if (first) - __page_set_anon_rmap(page, vma, address); + __page_set_anon_rmap(page, vma, address, 0); else __page_check_anon_rmap(page, vma, address); } @@ -812,7 +830,7 @@ void page_add_new_anon_rmap(struct page *page, SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ __inc_zone_page_state(page, NR_ANON_PAGES); - __page_set_anon_rmap(page, vma, address); + __page_set_anon_rmap(page, vma, address, 1); if (page_evictable(page, vma)) lru_cache_add_lru(page, LRU_ACTIVE_ANON); else diff --git a/mm/slab.c b/mm/slab.c index a9f325b28be..bac0f4fcc21 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3602,21 +3602,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_notrace); */ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) { - unsigned long addr = (unsigned long)ptr; - unsigned long min_addr = PAGE_OFFSET; - unsigned long align_mask = BYTES_PER_WORD - 1; unsigned long size = cachep->buffer_size; struct page *page; - if (unlikely(addr < min_addr)) - goto out; - if (unlikely(addr > (unsigned long)high_memory - size)) - goto out; - if (unlikely(addr & align_mask)) - goto out; - if (unlikely(!kern_addr_valid(addr))) - goto out; - if (unlikely(!kern_addr_valid(addr + size - 1))) + if (unlikely(!kern_ptr_validate(ptr, size))) goto out; page = virt_to_page(ptr); if (unlikely(!PageSlab(page))) diff --git a/mm/slub.c b/mm/slub.c index a2b8969ba6d..7d6c8b1ccf6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2386,6 +2386,9 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object) { struct page *page; + if (!kern_ptr_validate(object, s->size)) + return 0; + page = get_object_page(object); if (!page || s != page->slab) @@ -2960,7 +2963,7 @@ static void slab_mem_offline_callback(void *arg) /* * if n->nr_slabs > 0, slabs still exist on the node * that is going down. We were unable to free them, - * and offline_pages() function shoudn't call this + * and offline_pages() function shouldn't call this * callback. So, we must fail. */ BUG_ON(slabs_node(s, offline_node)); diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 392b9bb5bc0..aa33fd67fa4 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -22,6 +22,7 @@ #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/sched.h> diff --git a/mm/sparse.c b/mm/sparse.c index 22896d58913..dc0cc4d43ff 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -2,6 +2,7 @@ * sparse memory mappings. */ #include <linux/mm.h> +#include <linux/slab.h> #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/highmem.h> diff --git a/mm/swap.c b/mm/swap.c index 9036b89813a..7cd60bf0a97 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -30,6 +30,7 @@ #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> +#include <linux/gfp.h> #include "internal.h" diff --git a/mm/swap_state.c b/mm/swap_state.c index 6d1daeb1cb4..e10f5833167 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -8,6 +8,7 @@ */ #include <linux/module.h> #include <linux/mm.h> +#include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/swapops.h> diff --git a/mm/swapfile.c b/mm/swapfile.c index 84374d8cf81..6cd0a8f90dc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -723,6 +723,37 @@ int free_swap_and_cache(swp_entry_t entry) return p != NULL; } +#ifdef CONFIG_CGROUP_MEM_RES_CTLR +/** + * mem_cgroup_count_swap_user - count the user of a swap entry + * @ent: the swap entry to be checked + * @pagep: the pointer for the swap cache page of the entry to be stored + * + * Returns the number of the user of the swap entry. The number is valid only + * for swaps of anonymous pages. + * If the entry is found on swap cache, the page is stored to pagep with + * refcount of it being incremented. + */ +int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) +{ + struct page *page; + struct swap_info_struct *p; + int count = 0; + + page = find_get_page(&swapper_space, ent.val); + if (page) + count += page_mapcount(page); + p = swap_info_get(ent); + if (p) { + count += swap_count(p->swap_map[swp_offset(ent)]); + spin_unlock(&swap_lock); + } + + *pagep = page; + return count; +} +#endif + #ifdef CONFIG_HIBERNATION /* * Find the swap type that corresponds to given device (if any). diff --git a/mm/truncate.c b/mm/truncate.c index e87e3724482..f42675a3615 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -9,6 +9,7 @@ #include <linux/kernel.h> #include <linux/backing-dev.h> +#include <linux/gfp.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/module.h> diff --git a/mm/util.c b/mm/util.c index 834db7be240..f5712e8964b 100644 --- a/mm/util.c +++ b/mm/util.c @@ -186,6 +186,27 @@ void kzfree(const void *p) } EXPORT_SYMBOL(kzfree); +int kern_ptr_validate(const void *ptr, unsigned long size) +{ + unsigned long addr = (unsigned long)ptr; + unsigned long min_addr = PAGE_OFFSET; + unsigned long align_mask = sizeof(void *) - 1; + + if (unlikely(addr < min_addr)) + goto out; + if (unlikely(addr > (unsigned long)high_memory - size)) + goto out; + if (unlikely(addr & align_mask)) + goto out; + if (unlikely(!kern_addr_valid(addr))) + goto out; + if (unlikely(!kern_addr_valid(addr + size - 1))) + goto out; + return 1; +out: + return 0; +} + /* * strndup_user - duplicate an existing string from user space * @s: The string to duplicate diff --git a/mm/vmscan.c b/mm/vmscan.c index 79c809895fb..3ff3311447f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -13,7 +13,7 @@ #include <linux/mm.h> #include <linux/module.h> -#include <linux/slab.h> +#include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> @@ -1535,13 +1535,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, unsigned long ap, fp; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); - /* If we have no swap space, do not bother scanning anon pages. */ - if (!sc->may_swap || (nr_swap_pages <= 0)) { - percent[0] = 0; - percent[1] = 100; - return; - } - anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + @@ -1639,20 +1632,22 @@ static void shrink_zone(int priority, struct zone *zone, unsigned long nr_reclaimed = sc->nr_reclaimed; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); + int noswap = 0; - get_scan_ratio(zone, sc, percent); + /* If we have no swap space, do not bother scanning anon pages. */ + if (!sc->may_swap || (nr_swap_pages <= 0)) { + noswap = 1; + percent[0] = 0; + percent[1] = 100; + } else + get_scan_ratio(zone, sc, percent); for_each_evictable_lru(l) { int file = is_file_lru(l); unsigned long scan; - if (percent[file] == 0) { - nr[l] = 0; - continue; - } - scan = zone_nr_lru_pages(zone, sc, l); - if (priority) { + if (priority || noswap) { scan >>= priority; scan = (scan * percent[file]) / 100; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 7f760cbc73f..fa12ea3051f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -12,6 +12,7 @@ #include <linux/mm.h> #include <linux/err.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/cpu.h> #include <linux/vmstat.h> #include <linux/sched.h> |