summaryrefslogtreecommitdiffstats
path: root/arch/blackfin/include/asm
diff options
context:
space:
mode:
authorGraf Yang <graf.yang@analog.com>2009-07-21 02:26:57 +0000
committerMike Frysinger <vapier@gentoo.org>2009-09-16 21:31:55 -0400
commit36b841288656b9b30b5d2add2fd136ac7eab5867 (patch)
treeb3faec0b85a2814216c791de99435e74a43b6148 /arch/blackfin/include/asm
parent8fc4dd9e876cbda4dfe09ca85e4e1520b36dce77 (diff)
Blackfin: fix MPU handling of invalid memory accesses
The protect_page() function was incorrectly setting up the hardware tables based on possible access capabilities rather than the actual requested values. This means we would grant more access to mmap-ed pages than we should have. Once we fix this, we need to tweak the signal generated by such accesses to aline ourselves with other ports. This allows the LTP mmap0{5,6,7} cases to run properly. Signed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/include/asm')
-rw-r--r--arch/blackfin/include/asm/mmu_context.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/blackfin/include/asm/mmu_context.h b/arch/blackfin/include/asm/mmu_context.h
index 944e29faae4..040410bb07e 100644
--- a/arch/blackfin/include/asm/mmu_context.h
+++ b/arch/blackfin/include/asm/mmu_context.h
@@ -127,17 +127,17 @@ static inline void protect_page(struct mm_struct *mm, unsigned long addr,
unsigned long idx = page >> 5;
unsigned long bit = 1 << (page & 31);
- if (flags & VM_MAYREAD)
+ if (flags & VM_READ)
mask[idx] |= bit;
else
mask[idx] &= ~bit;
mask += page_mask_nelts;
- if (flags & VM_MAYWRITE)
+ if (flags & VM_WRITE)
mask[idx] |= bit;
else
mask[idx] &= ~bit;
mask += page_mask_nelts;
- if (flags & VM_MAYEXEC)
+ if (flags & VM_EXEC)
mask[idx] |= bit;
else
mask[idx] &= ~bit;