summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-05-08 16:27:27 +1000
committerPaul Mackerras <paulus@samba.org>2007-05-09 16:35:00 +1000
commitd0f13e3c20b6fb73ccb467bdca97fa7cf5a574cd (patch)
treea2de01a21dbb28449893102742e6b516a519c03e /arch/powerpc/mm/hugetlbpage.c
parent16f1c746755836aa823658000493cdab8ce7b098 (diff)
[POWERPC] Introduce address space "slices"
The basic issue is to be able to do what hugetlbfs does but with different page sizes for some other special filesystems; more specifically, my need is: - Huge pages - SPE local store mappings using 64K pages on a 4K base page size kernel on Cell - Some special 4K segments in 64K-page kernels for mapping a dodgy type of powerpc-specific infiniband hardware that requires 4K MMU mappings for various reasons I won't explain here. The main issues are: - To maintain/keep track of the page size per "segment" (as we can only have one page size per segment on powerpc, which are 256MB divisions of the address space). - To make sure special mappings stay within their allotted "segments" (including MAP_FIXED crap) - To make sure everybody else doesn't mmap/brk/grow_stack into a "segment" that is used for a special mapping Some of the necessary mechanisms to handle that were present in the hugetlbfs code, but mostly in ways not suitable for anything else. The patch relies on some changes to the generic get_unmapped_area() that just got merged. It still hijacks hugetlb callbacks here or there as the generic code hasn't been entirely cleaned up yet but that shouldn't be a problem. So what is a slice ? Well, I re-used the mechanism used formerly by our hugetlbfs implementation which divides the address space in "meta-segments" which I called "slices". The division is done using 256MB slices below 4G, and 1T slices above. Thus the address space is divided currently into 16 "low" slices and 16 "high" slices. (Special case: high slice 0 is the area between 4G and 1T). Doing so simplifies significantly the tracking of segments and avoids having to keep track of all the 256MB segments in the address space. While I used the "concepts" of hugetlbfs, I mostly re-implemented everything in a more generic way and "ported" hugetlbfs to it. Slices can have an associated page size, which is encoded in the mmu context and used by the SLB miss handler to set the segment sizes. The hash code currently doesn't care, it has a specific check for hugepages, though I might add a mechanism to provide per-slice hash mapping functions in the future. The slice code provide a pair of "generic" get_unmapped_area() (bottomup and topdown) functions that should work with any slice size. There is some trickiness here so I would appreciate people to have a look at the implementation of these and let me know if I got something wrong. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c548
1 files changed, 6 insertions, 542 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fb959264c10..92a1b16fb7e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -91,7 +91,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pgd_t *pg;
pud_t *pu;
- BUG_ON(! in_hugepage_area(mm->context, addr));
+ BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
addr &= HPAGE_MASK;
@@ -119,7 +119,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pud_t *pu;
hugepd_t *hpdp = NULL;
- BUG_ON(! in_hugepage_area(mm->context, addr));
+ BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
addr &= HPAGE_MASK;
@@ -302,7 +302,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
start = addr;
pgd = pgd_offset((*tlb)->mm, addr);
do {
- BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
+ BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize);
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
@@ -331,203 +331,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return __pte(old);
}
-struct slb_flush_info {
- struct mm_struct *mm;
- u16 newareas;
-};
-
-static void flush_low_segments(void *parm)
-{
- struct slb_flush_info *fi = parm;
- unsigned long i;
-
- BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
-
- if (current->active_mm != fi->mm)
- return;
-
- /* Only need to do anything if this CPU is working in the same
- * mm as the one which has changed */
-
- /* update the paca copy of the context struct */
- get_paca()->context = current->active_mm->context;
-
- asm volatile("isync" : : : "memory");
- for (i = 0; i < NUM_LOW_AREAS; i++) {
- if (! (fi->newareas & (1U << i)))
- continue;
- asm volatile("slbie %0"
- : : "r" ((i << SID_SHIFT) | SLBIE_C));
- }
- asm volatile("isync" : : : "memory");
-}
-
-static void flush_high_segments(void *parm)
-{
- struct slb_flush_info *fi = parm;
- unsigned long i, j;
-
-
- BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
-
- if (current->active_mm != fi->mm)
- return;
-
- /* Only need to do anything if this CPU is working in the same
- * mm as the one which has changed */
-
- /* update the paca copy of the context struct */
- get_paca()->context = current->active_mm->context;
-
- asm volatile("isync" : : : "memory");
- for (i = 0; i < NUM_HIGH_AREAS; i++) {
- if (! (fi->newareas & (1U << i)))
- continue;
- for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
- asm volatile("slbie %0"
- :: "r" (((i << HTLB_AREA_SHIFT)
- + (j << SID_SHIFT)) | SLBIE_C));
- }
- asm volatile("isync" : : : "memory");
-}
-
-static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
-{
- unsigned long start = area << SID_SHIFT;
- unsigned long end = (area+1) << SID_SHIFT;
- struct vm_area_struct *vma;
-
- BUG_ON(area >= NUM_LOW_AREAS);
-
- /* Check no VMAs are in the region */
- vma = find_vma(mm, start);
- if (vma && (vma->vm_start < end))
- return -EBUSY;
-
- return 0;
-}
-
-static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
-{
- unsigned long start = area << HTLB_AREA_SHIFT;
- unsigned long end = (area+1) << HTLB_AREA_SHIFT;
- struct vm_area_struct *vma;
-
- BUG_ON(area >= NUM_HIGH_AREAS);
-
- /* Hack, so that each addresses is controlled by exactly one
- * of the high or low area bitmaps, the first high area starts
- * at 4GB, not 0 */
- if (start == 0)
- start = 0x100000000UL;
-
- /* Check no VMAs are in the region */
- vma = find_vma(mm, start);
- if (vma && (vma->vm_start < end))
- return -EBUSY;
-
- return 0;
-}
-
-static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
-{
- unsigned long i;
- struct slb_flush_info fi;
-
- BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
- BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
-
- newareas &= ~(mm->context.low_htlb_areas);
- if (! newareas)
- return 0; /* The segments we want are already open */
-
- for (i = 0; i < NUM_LOW_AREAS; i++)
- if ((1 << i) & newareas)
- if (prepare_low_area_for_htlb(mm, i) != 0)
- return -EBUSY;
-
- mm->context.low_htlb_areas |= newareas;
-
- /* the context change must make it to memory before the flush,
- * so that further SLB misses do the right thing. */
- mb();
-
- fi.mm = mm;
- fi.newareas = newareas;
- on_each_cpu(flush_low_segments, &fi, 0, 1);
-
- return 0;
-}
-
-static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
-{
- struct slb_flush_info fi;
- unsigned long i;
-
- BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
- BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
- != NUM_HIGH_AREAS);
-
- newareas &= ~(mm->context.high_htlb_areas);
- if (! newareas)
- return 0; /* The areas we want are already open */
-
- for (i = 0; i < NUM_HIGH_AREAS; i++)
- if ((1 << i) & newareas)
- if (prepare_high_area_for_htlb(mm, i) != 0)
- return -EBUSY;
-
- mm->context.high_htlb_areas |= newareas;
-
- /* the context change must make it to memory before the flush,
- * so that further SLB misses do the right thing. */
- mb();
-
- fi.mm = mm;
- fi.newareas = newareas;
- on_each_cpu(flush_high_segments, &fi, 0, 1);
-
- return 0;
-}
-
-int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
-{
- int err = 0;
-
- if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
- return -EINVAL;
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
-
- if (addr < 0x100000000UL)
- err = open_low_hpage_areas(current->mm,
- LOW_ESID_MASK(addr, len));
- if ((addr + len) > 0x100000000UL)
- err = open_high_hpage_areas(current->mm,
- HTLB_AREA_MASK(addr, len));
-#ifdef CONFIG_SPE_BASE
- spu_flush_all_slbs(current->mm);
-#endif
- if (err) {
- printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
- " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
- addr, len,
- LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
- return err;
- }
-
- return 0;
-}
-
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
pte_t *ptep;
struct page *page;
- if (! in_hugepage_area(mm->context, address))
+ if (get_slice_psize(mm, address) != mmu_huge_psize)
return ERR_PTR(-EINVAL);
ptep = huge_pte_offset(mm, address);
@@ -551,359 +361,13 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
}
-/* Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions. */
-unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long start_addr;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /* handle fixed mapping: prevent overlap with huge pages */
- if (flags & MAP_FIXED) {
- if (is_hugepage_only_range(mm, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (((TASK_SIZE - len) >= addr)
- && (!vma || (addr+len) <= vma->vm_start)
- && !is_hugepage_only_range(mm, addr,len))
- return addr;
- }
- if (len > mm->cached_hole_size) {
- start_addr = addr = mm->free_area_cache;
- } else {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- }
-
-full_search:
- vma = find_vma(mm, addr);
- while (TASK_SIZE - len >= addr) {
- BUG_ON(vma && (addr >= vma->vm_end));
-
- if (touches_hugepage_low_range(mm, addr, len)) {
- addr = ALIGN(addr+1, 1<<SID_SHIFT);
- vma = find_vma(mm, addr);
- continue;
- }
- if (touches_hugepage_high_range(mm, addr, len)) {
- addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
- vma = find_vma(mm, addr);
- continue;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Remember the place where we stopped the search:
- */
- mm->free_area_cache = addr + len;
- return addr;
- }
- if (addr + mm->cached_hole_size < vma->vm_start)
- mm->cached_hole_size = vma->vm_start - addr;
- addr = vma->vm_end;
- vma = vma->vm_next;
- }
-
- /* Make sure we didn't miss any holes */
- if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = 0;
- goto full_search;
- }
- return -ENOMEM;
-}
-
-/*
- * This mmap-allocator allocates new areas top-down from below the
- * stack's low limit (the base):
- *
- * Because we have an exclusive hugepage region which lies within the
- * normal user address space, we have to take special measures to make
- * non-huge mmap()s evade the hugepage reserved regions.
- */
-unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
- const unsigned long len, const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma, *prev_vma;
- struct mm_struct *mm = current->mm;
- unsigned long base = mm->mmap_base, addr = addr0;
- unsigned long largest_hole = mm->cached_hole_size;
- int first_time = 1;
-
- /* requested length too big for entire address space */
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- /* handle fixed mapping: prevent overlap with huge pages */
- if (flags & MAP_FIXED) {
- if (is_hugepage_only_range(mm, addr, len))
- return -EINVAL;
- return addr;
- }
-
- /* dont allow allocations above current base */
- if (mm->free_area_cache > base)
- mm->free_area_cache = base;
-
- /* requesting a specific address */
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)
- && !is_hugepage_only_range(mm, addr,len))
- return addr;
- }
-
- if (len <= largest_hole) {
- largest_hole = 0;
- mm->free_area_cache = base;
- }
-try_again:
- /* make sure it can fit in the remaining address space */
- if (mm->free_area_cache < len)
- goto fail;
-
- /* either no address requested or cant fit in requested address hole */
- addr = (mm->free_area_cache - len) & PAGE_MASK;
- do {
-hugepage_recheck:
- if (touches_hugepage_low_range(mm, addr, len)) {
- addr = (addr & ((~0) << SID_SHIFT)) - len;
- goto hugepage_recheck;
- } else if (touches_hugepage_high_range(mm, addr, len)) {
- addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
- goto hugepage_recheck;
- }
-
- /*
- * Lookup failure means no vma is above this address,
- * i.e. return with success:
- */
- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
- return addr;
-
- /*
- * new region fits between prev_vma->vm_end and
- * vma->vm_start, use it:
- */
- if (addr+len <= vma->vm_start &&
- (!prev_vma || (addr >= prev_vma->vm_end))) {
- /* remember the address as a hint for next time */
- mm->cached_hole_size = largest_hole;
- return (mm->free_area_cache = addr);
- } else {
- /* pull free_area_cache down to the first hole */
- if (mm->free_area_cache == vma->vm_end) {
- mm->free_area_cache = vma->vm_start;
- mm->cached_hole_size = largest_hole;
- }
- }
-
- /* remember the largest hole we saw so far */
- if (addr + largest_hole < vma->vm_start)
- largest_hole = vma->vm_start - addr;
-
- /* try just below the current vma->vm_start */
- addr = vma->vm_start-len;
- } while (len <= vma->vm_start);
-
-fail:
- /*
- * if hint left us with no space for the requested
- * mapping then try again:
- */
- if (first_time) {
- mm->free_area_cache = base;
- largest_hole = 0;
- first_time = 0;
- goto try_again;
- }
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- mm->free_area_cache = TASK_UNMAPPED_BASE;
- mm->cached_hole_size = ~0UL;
- addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- /*
- * Restore the topdown base:
- */
- mm->free_area_cache = base;
- mm->cached_hole_size = ~0UL;
-
- return addr;
-}
-
-static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
-{
- struct vm_area_struct *vma;
-
- vma = find_vma(current->mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || ((addr + len) <= vma->vm_start)))
- return 0;
-
- return -ENOMEM;
-}
-
-static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
-{
- unsigned long addr = 0;
- struct vm_area_struct *vma;
-
- vma = find_vma(current->mm, addr);
- while (addr + len <= 0x100000000UL) {
- BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
- if (! __within_hugepage_low_range(addr, len, segmask)) {
- addr = ALIGN(addr+1, 1<<SID_SHIFT);
- vma = find_vma(current->mm, addr);
- continue;
- }
-
- if (!vma || (addr + len) <= vma->vm_start)
- return addr;
- addr = ALIGN(vma->vm_end, HPAGE_SIZE);
- /* Depending on segmask this might not be a confirmed
- * hugepage region, so the ALIGN could have skipped
- * some VMAs */
- vma = find_vma(current->mm, addr);
- }
-
- return -ENOMEM;
-}
-
-static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
-{
- unsigned long addr = 0x100000000UL;
- struct vm_area_struct *vma;
-
- vma = find_vma(current->mm, addr);
- while (addr + len <= TASK_SIZE_USER64) {
- BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
-
- if (! __within_hugepage_high_range(addr, len, areamask)) {
- addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
- vma = find_vma(current->mm, addr);
- continue;
- }
-
- if (!vma || (addr + len) <= vma->vm_start)
- return addr;
- addr = ALIGN(vma->vm_end, HPAGE_SIZE);
- /* Depending on segmask this might not be a confirmed
- * hugepage region, so the ALIGN could have skipped
- * some VMAs */
- vma = find_vma(current->mm, addr);
- }
-
- return -ENOMEM;
-}
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- int lastshift;
- u16 areamask, curareas;
-
- if (HPAGE_SHIFT == 0)
- return -EINVAL;
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!cpu_has_feature(CPU_FTR_16M_PAGE))
- return -EINVAL;
-
- /* Paranoia, caller should have dealt with this */
- BUG_ON((addr + len) < addr);
-
- /* Handle MAP_FIXED */
- if (flags & MAP_FIXED) {
- if (prepare_hugepage_range(addr, len, pgoff))
- return -EINVAL;
- return addr;
- }
-
- if (test_thread_flag(TIF_32BIT)) {
- curareas = current->mm->context.low_htlb_areas;
-
- /* First see if we can use the hint address */
- if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
- areamask = LOW_ESID_MASK(addr, len);
- if (open_low_hpage_areas(current->mm, areamask) == 0)
- return addr;
- }
-
- /* Next see if we can map in the existing low areas */
- addr = htlb_get_low_area(len, curareas);
- if (addr != -ENOMEM)
- return addr;
-
- /* Finally go looking for areas to open */
- lastshift = 0;
- for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
- ! lastshift; areamask >>=1) {
- if (areamask & 1)
- lastshift = 1;
-
- addr = htlb_get_low_area(len, curareas | areamask);
- if ((addr != -ENOMEM)
- && open_low_hpage_areas(current->mm, areamask) == 0)
- return addr;
- }
- } else {
- curareas = current->mm->context.high_htlb_areas;
-
- /* First see if we can use the hint address */
- /* We discourage 64-bit processes from doing hugepage
- * mappings below 4GB (must use MAP_FIXED) */
- if ((addr >= 0x100000000UL)
- && (htlb_check_hinted_area(addr, len) == 0)) {
- areamask = HTLB_AREA_MASK(addr, len);
- if (open_high_hpage_areas(current->mm, areamask) == 0)
- return addr;
- }
-
- /* Next see if we can map in the existing high areas */
- addr = htlb_get_high_area(len, curareas);
- if (addr != -ENOMEM)
- return addr;
-
- /* Finally go looking for areas to open */
- lastshift = 0;
- for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
- ! lastshift; areamask >>=1) {
- if (areamask & 1)
- lastshift = 1;
-
- addr = htlb_get_high_area(len, curareas | areamask);
- if ((addr != -ENOMEM)
- && open_high_hpage_areas(current->mm, areamask) == 0)
- return addr;
- }
- }
- printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
- " enough areas\n");
- return -ENOMEM;
+ return slice_get_unmapped_area(addr, len, flags,
+ mmu_huge_psize, 1, 0);
}
/*