summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-23 21:27:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 10:47:15 -0700
commita1f242ff460e4b50a045fa237c3c56cce9eabf83 (patch)
tree657766b55251042b38967422dc9c3ea893b98747 /include
parent7ae8ed5053a39082d224a3f48409e016baca9c16 (diff)
powerpc ioremap_prot
This adds ioremap_prot and pte_pgprot() so that one can extract protection bits from a PTE and use them to ioremap_prot() (in order to support ptrace of VM_IO | VM_PFNMAP as per Rik's patch). This moves a couple of flag checks around in the ioremap implementations of arch/powerpc. There's a side effect of allowing non-cacheable and non-guarded mappings on ppc32 which before would always have _PAGE_GUARDED set whenever _PAGE_NO_CACHE is. (standard ioremap will still set _PAGE_GUARDED, but ioremap_prot will be capable of setting such a non guarded mapping). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Dave Airlie <airlied@linux.ie> Cc: Hugh Dickins <hugh@veritas.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-powerpc/io.h5
-rw-r--r--include/asm-powerpc/pgtable-4k.h3
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h16
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h8
4 files changed, 31 insertions, 1 deletions
diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h
index 8b627823f5f..77c7fa025e6 100644
--- a/include/asm-powerpc/io.h
+++ b/include/asm-powerpc/io.h
@@ -617,7 +617,8 @@ static inline void iosync(void)
* and can be hooked by the platform via ppc_md
*
* * ioremap_flags allows to specify the page flags as an argument and can
- * also be hooked by the platform via ppc_md
+ * also be hooked by the platform via ppc_md. ioremap_prot is the exact
+ * same thing as ioremap_flags.
*
* * ioremap_nocache is identical to ioremap
*
@@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size,
unsigned long flags);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
+#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot))
+
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index fd2090dc1dc..c9601dfb4a1 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -51,6 +51,9 @@
#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
_PAGE_SECONDARY | _PAGE_GROUP_IX)
+/* There is no 4K PFN hack on 4K pages */
+#define _PAGE_4K_PFN 0
+
/* PAGE_MASK gives the right answer below, but only by accident */
/* It should be preserving the high 48 bits and then specifically */
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 3a96d001cb7..bdbab72f3eb 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -395,6 +395,12 @@ extern int icache_44x_need_flush;
#ifndef _PAGE_EXEC
#define _PAGE_EXEC 0
#endif
+#ifndef _PAGE_ENDIAN
+#define _PAGE_ENDIAN 0
+#endif
+#ifndef _PAGE_COHERENT
+#define _PAGE_COHERENT 0
+#endif
#ifndef _PMD_PRESENT_MASK
#define _PMD_PRESENT_MASK _PMD_PRESENT
#endif
@@ -405,6 +411,12 @@ extern int icache_44x_need_flush;
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU | _PAGE_ENDIAN | \
+ _PAGE_USER | _PAGE_ACCESSED | \
+ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
+ _PAGE_EXEC | _PAGE_HWEXEC)
/*
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
@@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
return pte; }
+static inline unsigned long pte_pgprot(pte_t pte)
+{
+ return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index ab98a9c80b2..ba8000352b9 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -117,6 +117,10 @@
#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
#define HAVE_PAGE_AGP
+#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \
+ _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
+ _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8
#define _PTEIDX_GROUP_IX 0x7
@@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) {
return pte; }
static inline pte_t pte_mkspecial(pte_t pte) {
return pte; }
+static inline unsigned long pte_pgprot(pte_t pte)
+{
+ return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
+}
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,