summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--arch/powerpc/mm/slb_low.S13
-rw-r--r--include/asm-ppc64/pgtable-4k.h3
-rw-r--r--include/asm-ppc64/pgtable-64k.h3
5 files changed, 25 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b2f3dbca695..f15dfb92dec 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -329,12 +329,14 @@ static void __init htab_init_page_sizes(void)
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
mmu_huge_psize = MMU_PAGE_16M;
+ /* With 4k/4level pagetables, we can't (for now) cope with a
+ * huge page size < PMD_SIZE */
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_huge_psize = MMU_PAGE_1M;
/* Calculate HPAGE_SHIFT and sanity check it */
- if (mmu_psize_defs[mmu_huge_psize].shift > 16 &&
- mmu_psize_defs[mmu_huge_psize].shift < 28)
+ if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
+ mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
else
HPAGE_SHIFT = 0; /* No huge pages dude ! */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0073a04047e..426c269e552 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -212,6 +212,12 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
BUG_ON(area >= NUM_HIGH_AREAS);
+ /* Hack, so that each addresses is controlled by exactly one
+ * of the high or low area bitmaps, the first high area starts
+ * at 4GB, not 0 */
+ if (start == 0)
+ start = 0x100000000UL;
+
/* Check no VMAs are in the region */
vma = find_vma(mm, start);
if (vma && (vma->vm_start < end))
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 3e18241b6f3..950ffc5848c 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -80,12 +80,17 @@ _GLOBAL(slb_miss_kernel_load_virtual)
BEGIN_FTR_SECTION
b 1f
END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+ cmpldi r10,16
+
+ lhz r9,PACALOWHTLBAREAS(r13)
+ mr r11,r10
+ blt 5f
+
lhz r9,PACAHIGHHTLBAREAS(r13)
srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
- srd r9,r9,r11
- lhz r11,PACALOWHTLBAREAS(r13)
- srd r11,r11,r10
- or. r9,r9,r11
+
+5: srd r9,r9,r11
+ andi. r9,r9,1
beq 1f
_GLOBAL(slb_miss_user_load_huge)
li r11,0
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h
index c883a274855..e9590c06ad9 100644
--- a/include/asm-ppc64/pgtable-4k.h
+++ b/include/asm-ppc64/pgtable-4k.h
@@ -23,6 +23,9 @@
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT PMD_SHIFT
+
/* PUD_SHIFT determines what a third-level page table entry can map */
#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
#define PUD_SIZE (1UL << PUD_SHIFT)
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h
index c5f437c86b3..154f1840ece 100644
--- a/include/asm-ppc64/pgtable-64k.h
+++ b/include/asm-ppc64/pgtable-64k.h
@@ -14,6 +14,9 @@
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT PAGE_SHIFT
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)