summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-05 15:50:25 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-05 15:50:25 -0800
commit3e85fb9cd4f711d70c5d26baa86e438390731eab (patch)
tree85f0abea7e932a7e7c75cef2773fb648b35db1f4 /mm
parent055bf38d3d6069707e2d555cffdde629b8404ff2 (diff)
parentb24823e61bfd93d0e72088e4f5245287582ed289 (diff)
Merge branch 'akpm' (Andrew's patch bomb)
Merge the emailed seties of 19 patches from Andrew Morton * akpm: rapidio/tsi721: fix queue wrapping bug in inbound doorbell handler memcg: fix mapcount check in move charge code for anonymous page mm: thp: fix BUG on mm->nr_ptes alpha: fix 32/64-bit bug in futex support memcg: fix GPF when cgroup removal races with last exit debugobjects: Fix selftest for static warnings floppy/scsi: fix setting of BIO flags memcg: fix deadlock by inverting lrucare nesting drivers/rtc/rtc-r9701.c: fix crash in r9701_remove() c2port: class_create() returns an ERR_PTR pps: class_create() returns an ERR_PTR, not NULL hung_task: fix the broken rcu_lock_break() logic vfork: kill PF_STARTING coredump_wait: don't call complete_vfork_done() vfork: make it killable vfork: introduce complete_vfork_done() aio: wake up waiters when freeing unused kiocbs kprobes: return proper error code from register_kprobe() kmsg_dump: don't run on non-error paths by default
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/ksm.c11
-rw-r--r--mm/memcontrol.c104
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c10
6 files changed, 59 insertions, 82 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 91d3efb25d1..8f7fc394f63 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm->nr_ptes++;
spin_unlock(&mm->page_table_lock);
}
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
+ dst_mm->nr_ptes++;
ret = 0;
out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
}
kfree(pages);
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
+ tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
pte_unmap(pte);
}
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, _pmd);
prepare_pmd_huge_pte(pgtable, mm);
- mm->nr_ptes--;
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
diff --git a/mm/ksm.c b/mm/ksm.c
index 1925ffbfb27..310544a379a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/memcontrol.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
- /*
- * The memcg-specific accounting when moving
- * pages around the LRU lists relies on the
- * page's owner (memcg) to be valid. Usually,
- * pages are assigned to a new owner before
- * being put on the LRU list, but since this
- * is not the case here, the stale owner from
- * a previous allocation cycle must be reset.
- */
- mem_cgroup_reset_owner(new_page);
copy_user_highpage(new_page, page, address, vma);
SetPageDirty(new_page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 228d6461c12..5585dc3d364 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
+
+ /*
+ * Surreptitiously switch any uncharged page to root:
+ * an uncharged page off lru does nothing to secure
+ * its former mem_cgroup from sudden removal.
+ *
+ * Our caller holds lru_lock, and PageCgroupUsed is updated
+ * under page_cgroup lock: between them, they make all uses
+ * of pc->mem_cgroup safe.
+ */
+ if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+ pc->mem_cgroup = memcg = root_mem_cgroup;
+
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
- enum charge_type ctype)
+ enum charge_type ctype,
+ bool lrucare)
{
+ struct zone *uninitialized_var(zone);
+ bool was_on_lru = false;
+
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
+
+ /*
+ * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
+ * may already be on some other mem_cgroup's LRU. Take care of it.
+ */
+ if (lrucare) {
+ zone = page_zone(page);
+ spin_lock_irq(&zone->lru_lock);
+ if (PageLRU(page)) {
+ ClearPageLRU(page);
+ del_page_from_lru_list(zone, page, page_lru(page));
+ was_on_lru = true;
+ }
+ }
+
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
break;
}
+ if (lrucare) {
+ if (was_on_lru) {
+ VM_BUG_ON(PageLRU(page));
+ SetPageLRU(page);
+ add_page_to_lru_list(zone, page, page_lru(page));
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
- WARN_ON_ONCE(PageLRU(page));
+
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM)
return ret;
- __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
+ __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
return 0;
}
@@ -2663,35 +2704,6 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype);
-static void
-__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
- enum charge_type ctype)
-{
- struct page_cgroup *pc = lookup_page_cgroup(page);
- struct zone *zone = page_zone(page);
- unsigned long flags;
- bool removed = false;
-
- /*
- * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
- * is already on LRU. It means the page may on some other page_cgroup's
- * LRU. Take care of it.
- */
- spin_lock_irqsave(&zone->lru_lock, flags);
- if (PageLRU(page)) {
- del_page_from_lru_list(zone, page, page_lru(page));
- ClearPageLRU(page);
- removed = true;
- }
- __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
- if (removed) {
- add_page_to_lru_list(zone, page, page_lru(page));
- SetPageLRU(page);
- }
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- return;
-}
-
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
@@ -2769,13 +2781,16 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
+ struct page_cgroup *pc;
+
if (mem_cgroup_disabled())
return;
if (!memcg)
return;
cgroup_exclude_rmdir(&memcg->css);
- __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+ pc = lookup_page_cgroup(page);
+ __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
batch->memcg = NULL;
}
-/*
- * A function for resetting pc->mem_cgroup for newly allocated pages.
- * This function should be called if the newpage will be added to LRU
- * before start accounting.
- */
-void mem_cgroup_reset_owner(struct page *newpage)
-{
- struct page_cgroup *pc;
-
- if (mem_cgroup_disabled())
- return;
-
- pc = lookup_page_cgroup(newpage);
- VM_BUG_ON(PageCgroupUsed(pc));
- pc->mem_cgroup = root_mem_cgroup;
-}
-
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
return ret;
}
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
- __mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
}
#ifdef CONFIG_DEBUG_VM
@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return NULL;
if (PageAnon(page)) {
/* we don't move shared anon */
- if (!move_anon() || page_mapcount(page) > 2)
+ if (!move_anon() || page_mapcount(page) > 1)
return NULL;
} else if (!move_file())
/* we ignore mapcount for file pages */
diff --git a/mm/migrate.c b/mm/migrate.c
index df141f60289..1503b6b54ec 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!newpage)
return -ENOMEM;
- mem_cgroup_reset_owner(newpage);
-
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
diff --git a/mm/swap.c b/mm/swap.c
index fff1ff7fb9a..14380e9fbe3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail)
{
- int active;
+ int uninitialized_var(active);
enum lru_list lru;
const int file = 0;
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
active = 0;
lru = LRU_INACTIVE_ANON;
}
- update_page_reclaim_stat(zone, page_tail, file, active);
} else {
SetPageUnevictable(page_tail);
lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
+
+ if (!PageUnevictable(page))
+ update_page_reclaim_stat(zone, page_tail, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
- update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(zone, page, lru);
+ update_page_reclaim_stat(zone, page, file, active);
}
/*
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 470038a9187..ea6b32d6187 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
- /*
- * The memcg-specific accounting when moving
- * pages around the LRU lists relies on the
- * page's owner (memcg) to be valid. Usually,
- * pages are assigned to a new owner before
- * being put on the LRU list, but since this
- * is not the case here, the stale owner from
- * a previous allocation cycle must be reset.
- */
- mem_cgroup_reset_owner(new_page);
}
/*