summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh.dickins@tiscali.co.uk>2009-12-14 17:58:59 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 08:53:17 -0800
commitaf8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba (patch)
tree8dc0ece80878d00409d4662c5fd1e28cd7fbbdd8
parent53f79acb6ecb648afd63e0f13deba167f1a934df (diff)
mm: CONFIG_MMU for PG_mlocked
Remove three degrees of obfuscation, left over from when we had CONFIG_UNEVICTABLE_LRU. MLOCK_PAGES is CONFIG_HAVE_MLOCKED_PAGE_BIT is CONFIG_HAVE_MLOCK is CONFIG_MMU. rmap.o (and memory-failure.o) are only built when CONFIG_MMU, so don't need such conditions at all. Somehow, I feel no compulsion to remove the CONFIG_HAVE_MLOCK* lines from 169 defconfigs: leave those to evolve in due course. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/Kconfig8
-rw-r--r--mm/internal.h26
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/rmap.c15
6 files changed, 19 insertions, 44 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b17395..49e907bd067 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,7 +99,7 @@ enum pageflags {
PG_buddy, /* Page is free, on buddy lists */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
TESTCLEARFLAG(Unevictable, unevictable)
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
-#define MLOCK_PAGES 1
+#ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
#else
-#define MLOCK_PAGES 0
PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
#endif
@@ -393,7 +391,7 @@ static inline void __ClearPageTail(struct page *page)
#endif /* !PAGEFLAGS_EXTENDED */
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
#define __PG_MLOCKED 0
diff --git a/mm/Kconfig b/mm/Kconfig
index 44cf6f0a3a6..77b4980d614 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -200,14 +200,6 @@ config VIRT_TO_BUS
def_bool y
depends on !ARCH_NO_VIRT_TO_BUS
-config HAVE_MLOCK
- bool
- default y if MMU=y
-
-config HAVE_MLOCKED_PAGE_BIT
- bool
- default y if HAVE_MLOCK=y
-
config MMU_NOTIFIER
bool
diff --git a/mm/internal.h b/mm/internal.h
index 22ec8d2b0fb..cb7d92d0a46 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,17 +63,6 @@ static inline unsigned long page_order(struct page *page)
return page_private(page);
}
-#ifdef CONFIG_HAVE_MLOCK
-extern long mlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
-extern void munlock_vma_pages_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
-static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
-{
- munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
-}
-#endif
-
/*
* unevictable_migrate_page() called only from migrate_page_copy() to
* migrate unevictable flag to new page.
@@ -86,7 +75,16 @@ static inline void unevictable_migrate_page(struct page *new, struct page *old)
SetPageUnevictable(new);
}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+#ifdef CONFIG_MMU
+extern long mlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void munlock_vma_pages_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
+{
+ munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
+}
+
/*
* Called only in fault path via page_evictable() for a new page
* to determine if it's being mapped into a LOCKED vma.
@@ -144,7 +142,7 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
}
}
-#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#else /* !CONFIG_MMU */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
return 0;
@@ -153,7 +151,7 @@ static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
+#endif /* !CONFIG_MMU */
/*
* Return the mem_map entry representing the 'offset' subpage within
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 1ac49fef95a..50d4f8d7024 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -582,10 +582,8 @@ static struct page_state {
{ unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty},
{ unevict, unevict, "unevictable LRU", me_pagecache_clean},
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
{ mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty },
{ mlock, mlock, "mlocked LRU", me_pagecache_clean },
-#endif
{ lru|dirty, lru|dirty, "LRU", me_pagecache_dirty },
{ lru|dirty, lru, "clean LRU", me_pagecache_clean },
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bc2ac63f41..59d2e88fb47 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -486,7 +486,6 @@ static inline void __free_one_page(struct page *page,
zone->free_area[order].nr_free++;
}
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
/*
* free_page_mlock() -- clean up attempts to free and mlocked() page.
* Page should not be on lru, so no need to fix that up.
@@ -497,9 +496,6 @@ static inline void free_page_mlock(struct page *page)
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
-#else
-static void free_page_mlock(struct page *page) { }
-#endif
static inline int free_pages_check(struct page *page)
{
diff --git a/mm/rmap.c b/mm/rmap.c
index c3d6dc4223a..eb3dfc8355e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -788,7 +788,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
ret = SWAP_MLOCK;
goto out_unmap;
}
- if (MLOCK_PAGES && TTU_ACTION(flags) == TTU_MUNLOCK)
+ if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out_unmap;
}
if (!(flags & TTU_IGNORE_ACCESS)) {
@@ -861,7 +861,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
out_unmap:
pte_unmap_unlock(pte, ptl);
- if (MLOCK_PAGES && ret == SWAP_MLOCK) {
+ if (ret == SWAP_MLOCK) {
ret = SWAP_AGAIN;
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) {
@@ -938,11 +938,10 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
return ret;
/*
- * MLOCK_PAGES => feature is configured.
- * if we can acquire the mmap_sem for read, and vma is VM_LOCKED,
+ * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
- if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
+ if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
@@ -1075,9 +1074,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
@@ -1110,9 +1106,6 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
do {
list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
shared.vm_set.list) {
- if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
- (vma->vm_flags & VM_LOCKED))
- continue;
cursor = (unsigned long) vma->vm_private_data;
while ( cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {