summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/internal.h2
-rw-r--r--mm/mlock.c34
2 files changed, 24 insertions, 12 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 1c0c4cc0fcf..8562de0a519 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -195,7 +195,7 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
extern void mlock_vma_page(struct page *page);
-extern void munlock_vma_page(struct page *page);
+extern unsigned int munlock_vma_page(struct page *page);
/*
* Clear the page's PageMlocked(). This can be useful in a situation where
diff --git a/mm/mlock.c b/mm/mlock.c
index e6638f565d4..1c5e33fce63 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -102,13 +102,16 @@ void mlock_vma_page(struct page *page)
* can't isolate the page, we leave it for putback_lru_page() and vmscan
* [page_referenced()/try_to_unmap()] to deal with.
*/
-void munlock_vma_page(struct page *page)
+unsigned int munlock_vma_page(struct page *page)
{
+ unsigned int page_mask = 0;
+
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- mod_zone_page_state(page_zone(page), NR_MLOCK,
- -hpage_nr_pages(page));
+ unsigned int nr_pages = hpage_nr_pages(page);
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ page_mask = nr_pages - 1;
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;
@@ -141,6 +144,8 @@ void munlock_vma_page(struct page *page)
count_vm_event(UNEVICTABLE_PGMUNLOCKED);
}
}
+
+ return page_mask;
}
/**
@@ -159,7 +164,6 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, int *nonblocking)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long addr = start;
unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags;
@@ -189,7 +193,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
* We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here.
*/
- return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
+ return __get_user_pages(current, mm, start, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
@@ -226,13 +230,12 @@ static int __mlock_posix_error_return(long retval)
void munlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long addr;
-
- lru_add_drain();
vma->vm_flags &= ~VM_LOCKED;
- for (addr = start; addr < end; addr += PAGE_SIZE) {
+ while (start < end) {
struct page *page;
+ unsigned int page_mask, page_increm;
+
/*
* Although FOLL_DUMP is intended for get_dump_page(),
* it just so happens that its special treatment of the
@@ -240,13 +243,22 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
* suits munlock very well (and if somehow an abnormal page
* has sneaked into the range, we won't oops here: great).
*/
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
+ &page_mask);
if (page && !IS_ERR(page)) {
lock_page(page);
- munlock_vma_page(page);
+ lru_add_drain();
+ /*
+ * Any THP page found by follow_page_mask() may have
+ * gotten split before reaching munlock_vma_page(),
+ * so we need to recompute the page_mask here.
+ */
+ page_mask = munlock_vma_page(page);
unlock_page(page);
put_page(page);
}
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ start += page_increm * PAGE_SIZE;
cond_resched();
}
}