summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@linux.vnet.ibm.com>2008-07-08 00:28:54 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-09 16:30:45 +1000
commitef3d3246a0d06be622867d21af25f997aeeb105f (patch)
tree9f0ae1913e0e637ec3aa104cc5e81557e5661b3c
parent379070491e1e744a59e69e5bcf3765012d15ecb4 (diff)
powerpc/mm: Add Strong Access Ordering support
Allow an application to enable Strong Access Ordering on specific pages of memory on Power 7 hardware. Currently, power has a weaker memory model than x86. Implementing a stronger memory model allows an emulator to more efficiently translate x86 code into power code, resulting in faster code execution. On Power 7 hardware, storing 0b1110 in the WIMG bits of the hpte enables strong access ordering mode for the memory page. This patchset allows a user to specify which pages are thus enabled by passing a new protection bit through mmap() and mprotect(). I have defined PROT_SAO to be 0x10. Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/kernel/syscalls.c3
-rw-r--r--include/asm-powerpc/mman.h30
2 files changed, 33 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 4fe69ca2448..c04832c4a02 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -143,6 +143,9 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
struct file * file = NULL;
unsigned long ret = -EINVAL;
+ if (!arch_validate_prot(prot))
+ goto out;
+
if (shift) {
if (off & ((1 << shift) - 1))
goto out;
diff --git a/include/asm-powerpc/mman.h b/include/asm-powerpc/mman.h
index 0c46bf2c7d5..f8a32e20ba0 100644
--- a/include/asm-powerpc/mman.h
+++ b/include/asm-powerpc/mman.h
@@ -1,7 +1,9 @@
#ifndef _ASM_POWERPC_MMAN_H
#define _ASM_POWERPC_MMAN_H
+#include <asm/cputable.h>
#include <asm-generic/mman.h>
+#include <linux/mm.h>
/*
* This program is free software; you can redistribute it and/or
@@ -26,4 +28,32 @@
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#ifdef CONFIG_PPC64
+/*
+ * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
+ * here. How important is the optimization?
+ */
+static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+{
+ return (prot & PROT_SAO) ? VM_SAO : 0;
+}
+#define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
+{
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0;
+}
+#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
+
+static inline int arch_validate_prot(unsigned long prot)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
+ return 0;
+ if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
+ return 0;
+ return 1;
+}
+#define arch_validate_prot(prot) arch_validate_prot(prot)
+
+#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_MMAN_H */