summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hugetlb.h11
-rw-r--r--mm/hugetlb.c28
-rw-r--r--mm/memory.c2
3 files changed, 38 insertions, 3 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index f9db20bfa9f..225164842ab 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -48,6 +48,10 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
unsigned long *, int *, int, unsigned int flags);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *);
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page);
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page);
@@ -130,6 +134,13 @@ static inline void copy_huge_page(struct page *dst, struct page *src)
#define hugetlb_change_protection(vma, address, end, newprot)
+static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ BUG();
+}
+
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c39e4beeb63..bc727122dd4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2429,6 +2429,25 @@ again:
tlb_end_vma(tlb, vma);
}
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ __unmap_hugepage_range(tlb, vma, start, end, ref_page);
+
+ /*
+ * Clear this flag so that x86's huge_pmd_share page_table_shareable
+ * test will fail on a vma being torn down, and not grab a page table
+ * on its way out. We're lucky that the flag has such an appropriate
+ * name, and can in fact be safely cleared here. We could clear it
+ * before the __unmap_hugepage_range above, but all that's necessary
+ * is to clear it before releasing the i_mmap_mutex. This works
+ * because in the context this is called, the VMA is about to be
+ * destroyed and the i_mmap_mutex is held.
+ */
+ vma->vm_flags &= ~VM_MAYSHARE;
+}
+
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
@@ -3012,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
}
}
spin_unlock(&mm->page_table_lock);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+ /*
+ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+ * may have cleared our pud entry and done put_page on the page table:
+ * once we release i_mmap_mutex, another task can do the final put_page
+ * and that page table be reused and filled with junk.
+ */
flush_tlb_range(vma, start, end);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
int hugetlb_reserve_pages(struct inode *inode,
diff --git a/mm/memory.c b/mm/memory.c
index ec72a616ccd..482f089765f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1345,7 +1345,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/
if (vma->vm_file) {
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
- __unmap_hugepage_range(tlb, vma, start, end, NULL);
+ __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
} else