summaryrefslogtreecommitdiffstats
path: root/arch/microblaze
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2012-07-31 11:30:57 +0200
committerMichal Simek <monstr@monstr.eu>2012-10-04 14:47:01 +0200
commit9f78d3b5ab97a22a7e836312c495804ee4bca4ab (patch)
tree2f4173792ef4b0eda7283de4a604dd002bb60122 /arch/microblaze
parent88d23b4462c9c9b8c8876d60f5b57d39b53c6227 (diff)
microblaze: Do not used hardcoded value in exception handler
Use predefined macros to support more page sizes. Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze')
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S53
2 files changed, 32 insertions, 24 deletions
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 287c5485d28..dd9ea9d6b76 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -37,6 +37,8 @@
#define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_KERNEL_BASE_ADDR))
+#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
+
#ifndef __ASSEMBLY__
/* MS be sure that SLAB allocates aligned objects */
@@ -71,7 +73,6 @@ extern unsigned int __page_offset;
* The basic type of a PTE - 32 bit physical addressing.
*/
typedef unsigned long pte_basic_t;
-#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
#define PTE_FMT "%.8lx"
#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index aa510f450ac..76a069dc13c 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -602,18 +602,19 @@ ex_handler_done:
lwi r4, r4, TASK_THREAD+PGDIR
ex4:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ BSRLI(r5,r3, PGDIR_SHIFT - 2)
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex2 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ andi r5, r5, PAGE_MASK + 0x3
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -632,7 +633,9 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ori r4, r4, _PAGE_HWEXEC /* make it executable */
/* find the TLB index that caused the fault. It has to be here*/
@@ -701,18 +704,19 @@ ex_handler_done:
lwi r4, r4, TASK_THREAD+PGDIR
ex6:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ BSRLI(r5,r3, PGDIR_SHIFT - 2)
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex7 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ andi r5, r5, PAGE_MASK + 0x3
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -731,7 +735,8 @@ ex_handler_done:
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -771,18 +776,19 @@ ex_handler_done:
lwi r4, r4, TASK_THREAD+PGDIR
ex9:
tophys(r4,r4)
- BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */
- andi r5, r5, 0xffc
+ /* Create L1 (pgdir/pmd) address */
+ BSRLI(r5,r3, PGDIR_SHIFT - 2)
+ andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
- andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex10 /* Bail if no table */
tophys(r5,r5)
- BSRLI(r6,r3,10) /* Compute PTE address */
- andi r6, r6, 0xffc
- andi r5, r5, 0xfffff003
+ BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ andi r5, r5, PAGE_MASK + 0x3
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
@@ -801,7 +807,8 @@ ex_handler_done:
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
- andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -854,8 +861,8 @@ ex_handler_done:
* set of bits. These are size, valid, E, U0, and ensure
* bits 20 and 21 are zero.
*/
- andi r3, r3, 0xfffff000
- ori r3, r3, 0x0c0
+ andi r3, r3, PAGE_MASK
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
mts rtlbhi, r3 /* Load TLB HI */
nop