diff options
-rw-r--r-- | drivers/char/mem.c | 2 | ||||
-rw-r--r-- | mm/memory.c | 14 |
2 files changed, 13 insertions, 3 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 91dd669273e..29c3b631445 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) goto out_up; - if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) + if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED)) break; count = vma->vm_end - addr; if (count > size) diff --git a/mm/memory.c b/mm/memory.c index 3666a4c6dd2..d1f46f4e4c8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1812,7 +1812,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; - if (write_access) { + /* + * A VM_UNPAGED vma will normally be filled with present ptes + * by remap_pfn_range, and never arrive here; but it might have + * holes, or if !VM_DONTEXPAND, mremap might have expanded it. + * It's weird enough handling anon pages in unpaged vmas, we do + * not want to worry about ZERO_PAGEs too (it may or may not + * matter if their counts wrap): just give them anon pages. + */ + + if (write_access || (vma->vm_flags & VM_UNPAGED)) { /* Allocate our own private page. */ pte_unmap(page_table); @@ -1887,6 +1896,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, int anon = 0; pte_unmap(page_table); + BUG_ON(vma->vm_flags & VM_UNPAGED); if (vma->vm_file) { mapping = vma->vm_file->f_mapping; @@ -1962,7 +1972,7 @@ retry: inc_mm_counter(mm, anon_rss); lru_cache_add_active(new_page); page_add_anon_rmap(new_page, vma, address); - } else if (!(vma->vm_flags & VM_UNPAGED)) { + } else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); } |