summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2006-01-06 00:10:43 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 08:33:22 -0800
commit86e5216f8d8aa258ba836caffe2613d79cc9aead (patch)
tree33669c0194490700b575fceb0e5e010a4468a5fe /mm/hugetlb.c
parent85ef47f74afe96c8c23eaa605f28cc01443c905f (diff)
[PATCH] Hugetlb: Reorganize hugetlb_fault to prepare for COW
This patch splits the "no_page()" type activity into its own function, hugetlb_no_page(). hugetlb_fault() becomes the entry point for hugetlb faults and delegates to the appropriate handler depending on the type of fault. Right now we still have only hugetlb_no_page() but a later patch introduces a COW fault. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Adam Litke <agl@us.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: "Seth, Rohit" <rohit.seth@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fdbbbb90caa..cf8225108b2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -376,20 +376,15 @@ out:
return page;
}
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long address, int write_access)
+int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
int ret = VM_FAULT_SIGBUS;
unsigned long idx;
unsigned long size;
- pte_t *pte;
struct page *page;
struct address_space *mapping;
- pte = huge_pte_alloc(mm, address);
- if (!pte)
- goto out;
-
mapping = vma->vm_file->f_mapping;
idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
+ (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
@@ -408,11 +403,11 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto backout;
ret = VM_FAULT_MINOR;
- if (!pte_none(*pte))
+ if (!pte_none(*ptep))
goto backout;
add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
- set_huge_pte_at(mm, address, pte, make_huge_pte(vma, page));
+ set_huge_pte_at(mm, address, ptep, make_huge_pte(vma, page));
spin_unlock(&mm->page_table_lock);
unlock_page(page);
out:
@@ -426,6 +421,27 @@ backout:
goto out;
}
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ pte_t *ptep;
+ pte_t entry;
+
+ ptep = huge_pte_alloc(mm, address);
+ if (!ptep)
+ return VM_FAULT_OOM;
+
+ entry = *ptep;
+ if (pte_none(entry))
+ return hugetlb_no_page(mm, vma, address, ptep);
+
+ /*
+ * We could get here if another thread instantiated the pte
+ * before the test above.
+ */
+ return VM_FAULT_MINOR;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)