summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-03-31 15:34:58 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2006-03-31 15:34:58 -0500
commit86579dd06deecfa6ac88d5e84e4d63c397cd6f6d (patch)
treeb4475d3ccde53015ad84a06e4e55e64591171b75 /arch/sparc64/mm/hugetlbpage.c
parent7ea9ea832212c4a755650f7c7cc1ff0b63292a41 (diff)
parenta0f067802576d4eb4c65d40b8ee7d6ea3c81dd61 (diff)
Merge branch 'master'
Diffstat (limited to 'arch/sparc64/mm/hugetlbpage.c')
-rw-r--r--arch/sparc64/mm/hugetlbpage.c219
1 files changed, 194 insertions, 25 deletions
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 625cbb336a2..074620d413d 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -1,7 +1,7 @@
/*
* SPARC64 Huge TLB page support.
*
- * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
*/
#include <linux/config.h>
@@ -22,6 +22,175 @@
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+/* Slightly simplified from the non-hugepage variant because by
+ * definition we don't have to worry about any page coloring stuff
+ */
+#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct * vma;
+ unsigned long task_size = TASK_SIZE;
+ unsigned long start_addr;
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+ if (unlikely(len >= VA_EXCLUDE_START))
+ return -ENOMEM;
+
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
+
+ task_size -= len;
+
+full_search:
+ addr = ALIGN(addr, HPAGE_SIZE);
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (addr < VA_EXCLUDE_START &&
+ (addr + len) >= VA_EXCLUDE_START) {
+ addr = VA_EXCLUDE_END;
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+ if (likely(!vma || addr + len <= vma->vm_start)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ addr = ALIGN(vma->vm_end, HPAGE_SIZE);
+ }
+}
+
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+
+ /* This should only ever run for 32-bit processes. */
+ BUG_ON(!test_thread_flag(TIF_32BIT));
+
+ /* check if free_area_cache is useful for us */
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
+ /* either no address requested or can't fit in requested address hole */
+ addr = mm->free_area_cache & HPAGE_MASK;
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+ vma = find_vma(mm, addr-len);
+ if (!vma || addr <= vma->vm_start) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr-len);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+ addr = (mm->mmap_base-len) & HPAGE_MASK;
+
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+ if (likely(!vma || addr+len <= vma->vm_start)) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+
+ /* remember the largest hole we saw so far */
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = (vma->vm_start-len) & HPAGE_MASK;
+ } while (likely(len < vma->vm_start));
+
+bottomup:
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->cached_hole_size = ~0UL;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+
+ return addr;
+}
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (len > task_size)
+ return -ENOMEM;
+
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+ if (task_size - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+ pgoff, flags);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+ pgoff, flags);
+}
+
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
@@ -30,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- if (pgd) {
- pud = pud_offset(pgd, addr);
- if (pud) {
- pmd = pmd_alloc(mm, pud, addr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, addr);
- }
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, addr);
}
return pte;
}
@@ -48,25 +215,28 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pmd_t *pmd;
pte_t *pte = NULL;
+ addr &= HPAGE_MASK;
+
pgd = pgd_offset(mm, addr);
- if (pgd) {
+ if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
- if (pud) {
+ if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
- if (pmd)
+ if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
return pte;
}
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
int i;
+ if (!pte_present(*ptep) && pte_present(entry))
+ mm->context.huge_pte_count++;
+
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
set_pte_at(mm, addr, ptep, entry);
ptep++;
@@ -82,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
int i;
entry = *ptep;
+ if (pte_present(entry))
+ mm->context.huge_pte_count--;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
pte_clear(mm, addr, ptep);
@@ -92,18 +264,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return entry;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
struct page *follow_huge_addr(struct mm_struct *mm,
unsigned long address, int write)
{
@@ -131,6 +291,15 @@ static void context_reload(void *__data)
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
{
+ struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+ if (likely(tp->tsb != NULL))
+ return;
+
+ tsb_grow(mm, MM_TSB_HUGE, 0);
+ tsb_context_switch(mm);
+ smp_tsb_sync(mm);
+
/* On UltraSPARC-III+ and later, configure the second half of
* the Data-TLB for huge pages.
*/