summaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm/pgtable.h
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2009-01-18 19:16:16 +0100
committerKyle McMartin <kyle@mcmartin.ca>2009-03-31 02:51:33 +0000
commit48d27cb2299c0b2fc4d551bddb6a1005828dc0c6 (patch)
tree3f1d8df3a8a417264053425e706e4acce64116a9 /arch/parisc/include/asm/pgtable.h
parent15f7176eb1cccec0a332541285ee752b935c1c85 (diff)
parisc: fix usage of 32bit PTE page table entries on 32bit kernels
This patch fixes a long outstanding bug on 32bit parisc linux kernels which prevented us from using 32bit PTE table entries (instead of 64bit entries of which 32bit were unused). The problem was caused by this assembler statement in the L2_ptep macro in arch/parisc/kernel/entry.S:447: EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index which expanded to extrw,u r8,9,11,r1 and which has undefined behavior since the length value (11) extends beyond the leftmost bit (11-1 > 9). Interestingly PA2.0 processors seem to don't care and just zero-extend the value, while PA1.1 processors don't. Fix this problem by detecting an address space overflow with ASM_BITS_PER_PGD and adjusting it accordingly. To prevent such problems in the future, some compile time sanity checks in arch/parisc/mm/init.c were added. Since the page table now only consumes half of it's old size, we can use the freed memory to harmonize 32- and 64bit kernels and let both map 16MB for the initial page table. Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
Diffstat (limited to 'arch/parisc/include/asm/pgtable.h')
-rw-r--r--arch/parisc/include/asm/pgtable.h15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 470a4b88124..a27d2e200fb 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -50,11 +50,7 @@
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#ifdef CONFIG_64BIT
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
-#else
-#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */
-#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
@@ -91,16 +87,25 @@
/* Definitions for 1st level */
#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
+#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
+#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
+#else
#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
+#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
#define USER_PTRS_PER_PGD PTRS_PER_PGD
+#ifdef CONFIG_64BIT
#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
-
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
+#else
+#define MAX_ADDRBITS (BITS_PER_LONG)
+#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
+#define SPACEID_SHIFT 0
+#endif
/* This calculates the number of initial pages we need for the initial
* page tables */