From 5a838c3b60e3a36ade764cf7751b8f17d7c9c2da Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Mon, 14 Apr 2014 13:47:40 +0800 Subject: percpu: make pcpu_alloc_chunk() use pcpu_mem_free() instead of kfree() pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long) It hardly could be ever bigger than PAGE_SIZE even for large-scale machine, but for consistency with its couterpart pcpu_mem_zalloc(), use pcpu_mem_free() instead. Commit b4916cb17c26 ("percpu: make pcpu_free_chunk() use pcpu_mem_free() instead of kfree()") addressed this problem, but missed this one. tj: commit message updated Signed-off-by: Jianyu Zhan Signed-off-by: Tejun Heo Fixes: 099a19d91ca4 ("percpu: allow limited allocation before slab is online) Cc: stable@vger.kernel.org --- mm/percpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/percpu.c b/mm/percpu.c index 63e24fb4387..2ddf9a990db 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); if (!chunk->map) { - kfree(chunk); + pcpu_mem_free(chunk, pcpu_chunk_struct_size); return NULL; } -- cgit v1.2.3-70-g09d2 From 3551a9280bcb728980a13783ff295e9f0bdedd9a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 9 May 2014 15:36:59 -0700 Subject: mm: postpone the disabling of kmemleak early logging Commit 8910ae896c8c ("kmemleak: change some global variables to int"), in addition to the atomic -> int conversion, moved the disabling of kmemleak_early_log to the beginning of the kmemleak_init() function, before the full kmemleak tracing is actually enabled. In this small window, kmem_cache_create() is called by kmemleak which triggers additional memory allocation that are not traced. This patch restores the original logic with kmemleak_early_log disabling when kmemleak is fully functional. Fixes: 8910ae896c8c (kmemleak: change some global variables to int) Signed-off-by: Catalin Marinas Cc: Sasha Levin Cc: Li Zefan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/kmemleak.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 91d67eaee05..8d2fcdfeff7 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1775,10 +1775,9 @@ void __init kmemleak_init(void) int i; unsigned long flags; - kmemleak_early_log = 0; - #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { + kmemleak_early_log = 0; kmemleak_disable(); return; } @@ -1796,6 +1795,7 @@ void __init kmemleak_init(void) /* the kernel is still in UP mode, so disabling the IRQs is enough */ local_irq_save(flags); + kmemleak_early_log = 0; if (kmemleak_error) { local_irq_restore(flags); return; -- cgit v1.2.3-70-g09d2 From dd18dbc2d42af75fffa60c77e0f02220bc329829 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 9 May 2014 15:37:00 -0700 Subject: mm, thp: close race between mremap() and split_huge_page() It's critical for split_huge_page() (and migration) to catch and freeze all PMDs on rmap walk. It gets tricky if there's concurrent fork() or mremap() since usually we copy/move page table entries on dup_mm() or move_page_tables() without rmap lock taken. To get it work we rely on rmap walk order to not miss any entry. We expect to see destination VMA after source one to work correctly. But after switching rmap implementation to interval tree it's not always possible to preserve expected walk order. It works fine for dup_mm() since new VMA has the same vma_start_pgoff() / vma_last_pgoff() and explicitly insert dst VMA after src one with vma_interval_tree_insert_after(). But on move_vma() destination VMA can be merged into adjacent one and as result shifted left in interval tree. Fortunately, we can detect the situation and prevent race with rmap walk by moving page table entries under rmap lock. See commit 38a76013ad80. Problem is that we miss the lock when we move transhuge PMD. Most likely this bug caused the crash[1]. [1] http://thread.gmane.org/gmane.linux.kernel.mm/96473 Fixes: 108d6642ad81 ("mm anon rmap: remove anon_vma_moveto_tail") Signed-off-by: Kirill A. Shutemov Reviewed-by: Andrea Arcangeli Cc: Rik van Riel Acked-by: Michel Lespinasse Cc: Dave Jones Cc: David Miller Acked-by: Johannes Weiner Cc: [3.7+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mremap.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mremap.c b/mm/mremap.c index 0843feb66f3..05f1180e9f2 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma, break; if (pmd_trans_huge(*old_pmd)) { int err = 0; - if (extent == HPAGE_PMD_SIZE) + if (extent == HPAGE_PMD_SIZE) { + VM_BUG_ON(vma->vm_file || !vma->anon_vma); + /* See comment in move_ptes() */ + if (need_rmap_locks) + anon_vma_lock_write(vma->anon_vma); err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); + if (need_rmap_locks) + anon_vma_unlock_write(vma->anon_vma); + } if (err > 0) { need_flush = true; continue; -- cgit v1.2.3-70-g09d2 From 042d27acb64924a0e8a43e972485913a32407beb Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 30 Apr 2014 23:26:02 +0200 Subject: parisc,metag: Do not hardcode maximum userspace stack size This patch affects only architectures where the stack grows upwards (currently parisc and metag only). On those do not hardcode the maximum initial stack size to 1GB for 32-bit processes, but make it configurable via a config option. The main problem with the hardcoded stack size is, that we have two memory regions which grow upwards: stack and heap. To keep most of the memory available for heap in a flexmap memory layout, it makes no sense to hard allocate up to 1GB of the memory for stack which can't be used as heap then. This patch makes the stack size for 32-bit processes configurable and uses 80MB as default value which has been in use during the last few years on parisc and which hasn't showed any problems yet. Signed-off-by: Helge Deller Signed-off-by: James Hogan Cc: "James E.J. Bottomley" Cc: linux-parisc@vger.kernel.org Cc: linux-metag@vger.kernel.org Cc: John David Anglin --- arch/metag/include/asm/processor.h | 2 +- arch/parisc/include/asm/processor.h | 5 ++++- arch/parisc/kernel/sys_parisc.c | 6 +++--- mm/Kconfig | 15 +++++++++++++++ 4 files changed, 23 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 3be8581af49..a8a37477c66 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -23,7 +23,7 @@ #define STACK_TOP (TASK_SIZE - PAGE_SIZE) #define STACK_TOP_MAX STACK_TOP /* Maximum virtual space for stack */ -#define STACK_SIZE_MAX (1 << 28) /* 256 MB */ +#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 86522ef09d5..d951c9681ab 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -55,7 +55,10 @@ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE -#define STACK_SIZE_MAX (1 << 30) /* 1 GB */ +/* Allow bigger stacks for 64-bit processes */ +#define STACK_SIZE_MAX (USER_WIDE_MODE \ + ? (1 << 30) /* 1 GB */ \ + : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) #endif diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 31ffa9b5532..e1ffea2f9a0 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void) { unsigned long stack_base; - /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ stack_base = rlimit_max(RLIMIT_STACK); - if (stack_base > (1 << 30)) - stack_base = 1 << 30; + if (stack_base > STACK_SIZE_MAX) + stack_base = STACK_SIZE_MAX; return PAGE_ALIGN(STACK_TOP - stack_base); } diff --git a/mm/Kconfig b/mm/Kconfig index ebe5880c29d..1b5a95f0fa0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -581,3 +581,18 @@ config PGTABLE_MAPPING config GENERIC_EARLY_IOREMAP bool + +config MAX_STACK_SIZE_MB + int "Maximum user stack size for 32-bit processes (MB)" + default 80 + range 8 256 if METAG + range 8 2048 + depends on STACK_GROWSUP && (!64BIT || COMPAT) + help + This is the maximum stack size in Megabytes in the VM layout of 32-bit + user processes when the stack grows upwards (currently only on parisc + and metag arch). The stack will be located at the highest memory + address minus the given value, unless the RLIMIT_STACK hard limit is + changed to a smaller value in which case that is used. + + A sane initial value is 80 MB. -- cgit v1.2.3-70-g09d2 From b985194c8c0a130ed155b71662e39f7eaea4876f Mon Sep 17 00:00:00 2001 From: Chen Yucong Date: Thu, 22 May 2014 11:54:15 -0700 Subject: hwpoison, hugetlb: lock_page/unlock_page does not match for handling a free hugepage For handling a free hugepage in memory failure, the race will happen if another thread hwpoisoned this hugepage concurrently. So we need to check PageHWPoison instead of !PageHWPoison. If hwpoison_filter(p) returns true or a race happens, then we need to unlock_page(hpage). Signed-off-by: Chen Yucong Reviewed-by: Naoya Horiguchi Tested-by: Naoya Horiguchi Reviewed-by: Andi Kleen Cc: [2.6.36+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 35ef28acf13..dbf8922216a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags) return 0; } else if (PageHuge(hpage)) { /* - * Check "just unpoisoned", "filter hit", and - * "race with other subpage." + * Check "filter hit" and "race with other subpage." */ lock_page(hpage); - if (!PageHWPoison(hpage) - || (hwpoison_filter(p) && TestClearPageHWPoison(p)) - || (p != hpage && TestSetPageHWPoison(hpage))) { - atomic_long_sub(nr_pages, &num_poisoned_pages); - return 0; + if (PageHWPoison(hpage)) { + if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) + || (p != hpage && TestSetPageHWPoison(hpage))) { + atomic_long_sub(nr_pages, &num_poisoned_pages); + unlock_page(hpage); + return 0; + } } set_page_hwpoison_huge_page(hpage); res = dequeue_hwpoisoned_huge_page(hpage); -- cgit v1.2.3-70-g09d2 From 7fcbbaf18392f0b17c95e2f033c8ccf87eecde1d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 22 May 2014 11:54:16 -0700 Subject: mm/filemap.c: avoid always dirtying mapping->flags on O_DIRECT In some testing I ran today (some fio jobs that spread over two nodes), we end up spending 40% of the time in filemap_check_errors(). That smells fishy. Looking further, this is basically what happens: blkdev_aio_read() generic_file_aio_read() filemap_write_and_wait_range() if (!mapping->nr_pages) filemap_check_errors() and filemap_check_errors() always attempts two test_and_clear_bit() on the mapping flags, thus dirtying it for every single invocation. The patch below tests each of these bits before clearing them, avoiding this issue. In my test case (4-socket box), performance went from 1.7M IOPS to 4.0M IOPS. Signed-off-by: Jens Axboe Acked-by: Jeff Moyer Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index 000a220e2a4..088358c8006 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping) { int ret = 0; /* Check for outstanding write errors */ - if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + if (test_bit(AS_ENOSPC, &mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; - if (test_and_clear_bit(AS_EIO, &mapping->flags)) + if (test_bit(AS_EIO, &mapping->flags) && + test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } -- cgit v1.2.3-70-g09d2 From 55231e5c898c5c03c14194001e349f40f59bd300 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 22 May 2014 11:54:17 -0700 Subject: mm: madvise: fix MADV_WILLNEED on shmem swapouts MADV_WILLNEED currently does not read swapped out shmem pages back in. Commit 0cd6144aadd2 ("mm + fs: prepare for non-page entries in page cache radix trees") made find_get_page() filter exceptional radix tree entries but failed to convert all find_get_page() callers that WANT exceptional entries over to find_get_entry(). One of them is shmem swap readahead in madvise, which now skips over any swap-out records. Convert it to find_get_entry(). Fixes: 0cd6144aadd2 ("mm + fs: prepare for non-page entries in page cache radix trees") Signed-off-by: Johannes Weiner Reported-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/madvise.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index 539eeb96b32..a402f8fdc68 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, for (; start < end; start += PAGE_SIZE) { index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - page = find_get_page(mapping, index); + page = find_get_entry(mapping, index); if (!radix_tree_exceptional_entry(page)) { if (page) page_cache_release(page); -- cgit v1.2.3-70-g09d2 From 6f6acb00514c10be35529402f36ad7a288f08c2e Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Thu, 22 May 2014 11:54:19 -0700 Subject: memcg: fix swapcache charge from kernel thread context Commit 284f39afeaa4 ("mm: memcg: push !mm handling out to page cache charge function") explicitly checks for page cache charges without any mm context (from kernel thread context[1]). This seemed to be the only possible case where memory could be charged without mm context so commit 03583f1a631c ("memcg: remove unnecessary !mm check from try_get_mem_cgroup_from_mm()") removed the mm check from get_mem_cgroup_from_mm(). This however caused another NULL ptr dereference during early boot when loopback kernel thread splices to tmpfs as reported by Stephan Kulow: BUG: unable to handle kernel NULL pointer dereference at 0000000000000360 IP: get_mem_cgroup_from_mm.isra.42+0x2b/0x60 Oops: 0000 [#1] SMP Modules linked in: btrfs dm_multipath dm_mod scsi_dh multipath raid10 raid456 async_raid6_recov async_memcpy async_pq raid6_pq async_xor xor async_tx raid1 raid0 md_mod parport_pc parport nls_utf8 isofs usb_storage iscsi_ibft iscsi_boot_sysfs arc4 ecb fan thermal nfs lockd fscache nls_iso8859_1 nls_cp437 sg st hid_generic usbhid af_packet sunrpc sr_mod cdrom ata_generic uhci_hcd virtio_net virtio_blk ehci_hcd usbcore ata_piix floppy processor button usb_common virtio_pci virtio_ring virtio edd squashfs loop ppa] CPU: 0 PID: 97 Comm: loop1 Not tainted 3.15.0-rc5-5-default #1 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 Call Trace: __mem_cgroup_try_charge_swapin+0x40/0xe0 mem_cgroup_charge_file+0x8b/0xd0 shmem_getpage_gfp+0x66b/0x7b0 shmem_file_splice_read+0x18f/0x430 splice_direct_to_actor+0xa2/0x1c0 do_lo_receive+0x5a/0x60 [loop] loop_thread+0x298/0x720 [loop] kthread+0xc6/0xe0 ret_from_fork+0x7c/0xb0 Also Branimir Maksimovic reported the following oops which is tiggered for the swapcache charge path from the accounting code for kernel threads: CPU: 1 PID: 160 Comm: kworker/u8:5 Tainted: P OE 3.15.0-rc5-core2-custom #159 Hardware name: System manufacturer System Product Name/MAXIMUSV GENE, BIOS 1903 08/19/2013 task: ffff880404e349b0 ti: ffff88040486a000 task.ti: ffff88040486a000 RIP: get_mem_cgroup_from_mm.isra.42+0x2b/0x60 Call Trace: __mem_cgroup_try_charge_swapin+0x45/0xf0 mem_cgroup_charge_file+0x9c/0xe0 shmem_getpage_gfp+0x62c/0x770 shmem_write_begin+0x38/0x40 generic_perform_write+0xc5/0x1c0 __generic_file_aio_write+0x1d1/0x3f0 generic_file_aio_write+0x4f/0xc0 do_sync_write+0x5a/0x90 do_acct_process+0x4b1/0x550 acct_process+0x6d/0xa0 do_exit+0x827/0xa70 kthread+0xc3/0xf0 This patch fixes the issue by reintroducing mm check into get_mem_cgroup_from_mm. We could do the same trick in __mem_cgroup_try_charge_swapin as we do for the regular page cache path but it is not worth troubles. The check is not that expensive and it is better to have get_mem_cgroup_from_mm more robust. [1] - http://marc.info/?l=linux-mm&m=139463617808941&w=2 Fixes: 03583f1a631c ("memcg: remove unnecessary !mm check from try_get_mem_cgroup_from_mm()") Reported-and-tested-by: Stephan Kulow Reported-by: Branimir Maksimovic Signed-off-by: Michal Hocko Acked-by: Johannes Weiner Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c47dffdcb24..5177c6d4a2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) rcu_read_lock(); do { - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!memcg)) + /* + * Page cache insertions can happen withou an + * actual mm context, e.g. during disk probing + * on boot, loopback IO, acct() writes etc. + */ + if (unlikely(!mm)) memcg = root_mem_cgroup; + else { + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!memcg)) + memcg = root_mem_cgroup; + } } while (!css_tryget(&memcg->css)); rcu_read_unlock(); return memcg; @@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm, return 0; } - /* - * Page cache insertions can happen without an actual mm - * context, e.g. during disk probing on boot. - */ - if (unlikely(!mm)) - memcg = root_mem_cgroup; - else { - memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); - if (!memcg) - return -ENOMEM; - } + memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); + if (!memcg) + return -ENOMEM; __mem_cgroup_commit_charge(memcg, page, 1, type, false); return 0; } -- cgit v1.2.3-70-g09d2 From 3e030ecc0fc7de10fd0da10c1c19939872a31717 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Thu, 22 May 2014 11:54:21 -0700 Subject: mm/memory-failure.c: fix memory leak by race between poison and unpoison When a memory error happens on an in-use page or (free and in-use) hugepage, the victim page is isolated with its refcount set to one. When you try to unpoison it later, unpoison_memory() calls put_page() for it twice in order to bring the page back to free page pool (buddy or free hugepage list). However, if another memory error occurs on the page which we are unpoisoning, memory_failure() returns without releasing the refcount which was incremented in the same call at first, which results in memory leak and unconsistent num_poisoned_pages statistics. This patch fixes it. Signed-off-by: Naoya Horiguchi Cc: Andi Kleen Cc: [2.6.32+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index dbf8922216a..9ccef39a9de 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1153,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); + atomic_long_sub(nr_pages, &num_poisoned_pages); + put_page(hpage); res = 0; goto out; } -- cgit v1.2.3-70-g09d2