From c605782b1c3f1c18a55dc1a75b19ed0288f61ac3 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 10 Mar 2009 17:53:29 +0000 Subject: powerpc/mm: Split the various pgtable-* headers based on MMU type This patch moves the definition of the PTE format for each MMU type to separate files instead of all in one file. This improves overall maintainability and will make it easier to add new types. On 64-bit, additionally, I've separated the headers relative to the format of the page table tree (3 vs. 4 levels for 64K vs 4K pages) from the headers specific to the PTE format for hash based processors, this will make it easier to add support for Book3 "E" 64-bit implementations. There are still some type-related ifdef's in the generic headers, we might remove them in the long run, but this patch shouldn't result in any code change, -hopefully- just definitions being moved around. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pte-8xx.h | 64 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 arch/powerpc/include/asm/pte-8xx.h (limited to 'arch/powerpc/include/asm/pte-8xx.h') diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h new file mode 100644 index 00000000000..b07acfd330b --- /dev/null +++ b/arch/powerpc/include/asm/pte-8xx.h @@ -0,0 +1,64 @@ +#ifndef _ASM_POWERPC_PTE_8xx_H +#define _ASM_POWERPC_PTE_8xx_H +#ifdef __KERNEL__ + +/* + * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. + * We also use the two level tables, but we can put the real bits in them + * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, + * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has + * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit + * based upon user/super access. The TLB does not have accessed nor write + * protect. We assume that if the TLB get loaded with an entry it is + * accessed, and overload the changed bit for write protect. We use + * two bits in the software pte that are supposed to be set to zero in + * the TLB entry (24 and 25) for these indicators. Although the level 1 + * descriptor contains the guarded and writethrough/copyback bits, we can + * set these at the page level since they get copied from the Mx_TWC + * register when the TLB entry is loaded. We will use bit 27 for guard, since + * that is where it exists in the MD_TWC, and bit 26 for writethrough. + * These will get masked from the level 2 descriptor at TLB load time, and + * copied to the MD_TWC before it gets loaded. + * Large page sizes added. We currently support two sizes, 4K and 8M. + * This also allows a TLB hander optimization because we can directly + * load the PMD into MD_TWC. The 8M pages are only used for kernel + * mapping of well known areas. The PMD (PGD) entries contain control + * flags in addition to the address, so care must be taken that the + * software no longer assumes these are only pointers. + */ + +/* Definitions for 8xx embedded chips. */ +#define _PAGE_PRESENT 0x0001 /* Page is valid */ +#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ +#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ +#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ + +/* These five software bits must be masked out when the entry is loaded + * into the TLB. + */ +#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ +#define _PAGE_GUARDED 0x0010 /* software: guarded access */ +#define _PAGE_DIRTY 0x0020 /* software: page changed */ +#define _PAGE_RW 0x0040 /* software: user write access allowed */ +#define _PAGE_ACCESSED 0x0080 /* software: page referenced */ + +/* Setting any bits in the nibble with the follow two controls will + * require a TLB exception handler change. It is assumed unused bits + * are always zero. + */ +#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ +#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ + +#define _PMD_PRESENT 0x0001 +#define _PMD_BAD 0x0ff0 +#define _PMD_PAGE_MASK 0x000c +#define _PMD_PAGE_8M 0x000c + +#define _PTE_NONE_MASK _PAGE_ACCESSED + +/* Until my rework is finished, 8xx still needs atomic PTE updates */ +#define PTE_ATOMIC_UPDATES 1 + + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_PTE_8xx_H */ -- cgit v1.2.3-70-g09d2 From 8d1cf34e7ad5c7738ce20d20bd7f002f562cb8b5 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 19 Mar 2009 19:34:08 +0000 Subject: powerpc/mm: Tweak PTE bit combination definitions This patch tweaks the way some PTE bit combinations are defined, in such a way that the 32 and 64-bit variant become almost identical and that will make it easier to bring in a new common pte-* file for the new variant of the Book3-E support. The combination of bits defining access to kernel pages are now clearly separated from the combination used by userspace and the core VM. The resulting generated code should remain identical unless I made a mistake. Note: While at it, I removed a non-sensical statement related to CONFIG_KGDB in ppc_mmu_32.c which could cause kernel mappings to be user accessible when that option is enabled. Probably something that bitrot. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/fixmap.h | 2 +- arch/powerpc/include/asm/pgtable-ppc32.h | 41 ++++++++++++++-------------- arch/powerpc/include/asm/pgtable-ppc64.h | 46 +++++++++++++++++++------------ arch/powerpc/include/asm/pgtable.h | 4 +++ arch/powerpc/include/asm/pte-8xx.h | 3 ++ arch/powerpc/include/asm/pte-hash32.h | 1 - arch/powerpc/include/asm/pte-hash64-4k.h | 3 -- arch/powerpc/include/asm/pte-hash64.h | 47 ++++++++++++++++++-------------- arch/powerpc/mm/fsl_booke_mmu.c | 2 +- arch/powerpc/mm/pgtable_32.c | 4 +-- arch/powerpc/mm/ppc_mmu_32.c | 10 ++----- arch/powerpc/sysdev/cpm_common.c | 2 +- 12 files changed, 89 insertions(+), 76 deletions(-) (limited to 'arch/powerpc/include/asm/pte-8xx.h') diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h index 8428b38a3d3..d60fd18f428 100644 --- a/arch/powerpc/include/asm/fixmap.h +++ b/arch/powerpc/include/asm/fixmap.h @@ -61,7 +61,7 @@ extern void __set_fixmap (enum fixed_addresses idx, * Some hardware wants to get fixmapped without caching. */ #define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) + __set_fixmap(idx, phys, PAGE_KERNEL_NCG) #define clear_fixmap(idx) \ __set_fixmap(idx, 0, __pgprot(0)) diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 67ceffc01b4..7ce331e51f9 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -144,6 +144,13 @@ extern int icache_44x_need_flush; #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif +#ifndef _PAGE_KERNEL_RO +#define _PAGE_KERNEL_RO 0 +#endif +#ifndef _PAGE_KERNEL_RW +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) +#endif + #define _PAGE_HPTEFLAGS _PAGE_HASHPTE /* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT @@ -186,30 +193,25 @@ extern int icache_44x_need_flush; #else #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) #endif -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) - -#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) -#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) -#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) - -#ifdef CONFIG_PPC_STD_MMU -/* On standard PPC MMU, no user access implies kernel read/write access, - * so to write-protect kernel memory we must turn on user access */ -#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) -#else -#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) -#endif - -#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) -#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) + +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ + _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC) #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ defined(CONFIG_KPROBES) /* We want the debuggers to be able to set breakpoints anywhere, so * don't write protect the kernel text */ -#define _PAGE_RAM_TEXT _PAGE_RAM +#define PAGE_KERNEL_TEXT PAGE_KERNEL_X #else -#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) +#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX #endif #define PAGE_NONE __pgprot(_PAGE_BASE) @@ -220,9 +222,6 @@ extern int icache_44x_need_flush; #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) -#define PAGE_KERNEL __pgprot(_PAGE_RAM) -#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) - /* * The PowerPC can only do execute protection on a segment (256MB) basis, * not on a page basis. So we consider execute permission the same as read. diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 542073836b2..5a575f2905f 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -81,11 +81,6 @@ */ #include -/* To make some generic powerpc code happy */ -#ifndef _PAGE_HWEXEC -#define _PAGE_HWEXEC 0 -#endif - /* Some other useful definitions */ #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) #define PTE_RPN_MASK (~((1UL< #endif +/* Special mapping for AGP */ +#define PAGE_AGP (PAGE_KERNEL_NC) +#define HAVE_PAGE_AGP + #ifndef __ASSEMBLY__ /* Insert a PTE, top-level function is out of line. It uses an inline diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h index b07acfd330b..8c6e3125103 100644 --- a/arch/powerpc/include/asm/pte-8xx.h +++ b/arch/powerpc/include/asm/pte-8xx.h @@ -59,6 +59,9 @@ /* Until my rework is finished, 8xx still needs atomic PTE updates */ #define PTE_ATOMIC_UPDATES 1 +/* We need to add _PAGE_SHARED to kernel pages */ +#define _PAGE_KERNEL_RO (_PAGE_SHARED) +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PTE_8xx_H */ diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h index 6afe22b02f2..16e571c7f9e 100644 --- a/arch/powerpc/include/asm/pte-hash32.h +++ b/arch/powerpc/include/asm/pte-hash32.h @@ -44,6 +44,5 @@ /* Hash table based platforms need atomic updates of the linux PTE */ #define PTE_ATOMIC_UPDATES 1 - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PTE_HASH32_H */ diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h index 29fdc158fe3..c134e809aac 100644 --- a/arch/powerpc/include/asm/pte-hash64-4k.h +++ b/arch/powerpc/include/asm/pte-hash64-4k.h @@ -8,9 +8,6 @@ #define _PAGE_F_GIX _PAGE_GROUP_IX #define _PAGE_SPECIAL 0x10000 /* software: special page */ -/* There is no 4K PFN hack on 4K pages */ -#define _PAGE_4K_PFN 0 - /* PTE flags to conserve for HPTE identification */ #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ _PAGE_SECONDARY | _PAGE_GROUP_IX) diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h index 62766636cc1..b61b7e4a18d 100644 --- a/arch/powerpc/include/asm/pte-hash64.h +++ b/arch/powerpc/include/asm/pte-hash64.h @@ -6,36 +6,41 @@ * Common bits between 4K and 64K pages in a linux-style PTE. * These match the bits in the (hardware-defined) PowerPC PTE as closely * as possible. Additional bits may be defined in pgtable-hash64-*.h + * + * Note: We only support user read/write permissions. Supervisor always + * have full read/write to pages above PAGE_OFFSET (pages below that + * always use the user access permissions). + * + * We could create separate kernel read-only if we used the 3 PP bits + * combinations that newer processors provide but we currently don't. */ -#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ -#define _PAGE_USER 0x0002 /* matches one of the PP bits */ -#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ -#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ -#define _PAGE_GUARDED 0x0008 -#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ -#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ -#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ -#define _PAGE_DIRTY 0x0080 /* C: page changed */ -#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ -#define _PAGE_RW 0x0200 /* software: user write access allowed */ -#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ +#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ +#define _PAGE_USER 0x0002 /* matches one of the PP bits */ +#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ +#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ +#define _PAGE_GUARDED 0x0008 +#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ +#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ +#define _PAGE_DIRTY 0x0080 /* C: page changed */ +#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ +#define _PAGE_RW 0x0200 /* software: user write access allowed */ +#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ + +/* No separate kernel read-only */ +#define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ +#define _PAGE_KERNEL_RO _PAGE_KERNEL_RW /* Strong Access Ordering */ -#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) +#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) -#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) - -#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 -#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ - _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ - _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) - #ifdef CONFIG_PPC_64K_PAGES #include diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 985b6c361ab..bb3d65998e6 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(void) phys_addr_t phys = memstart_addr; while (cam[tlbcam_index] && tlbcam_index < ARRAY_SIZE(cam)) { - settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], _PAGE_KERNEL, 0); + settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], PAGE_KERNEL_X, 0); virt += cam[tlbcam_index]; phys += cam[tlbcam_index]; tlbcam_index++; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 0f8c4371dfa..430d0908fa5 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -164,7 +164,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, /* Make sure we have the base flags */ if ((flags & _PAGE_PRESENT) == 0) - flags |= _PAGE_KERNEL; + flags |= PAGE_KERNEL; /* Non-cacheable page cannot be coherent */ if (flags & _PAGE_NO_CACHE) @@ -296,7 +296,7 @@ void __init mapin_ram(void) p = memstart_addr + s; for (; s < total_lowmem; s += PAGE_SIZE) { ktext = ((char *) v >= _stext && (char *) v < etext); - f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM; + f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; map_page(v, p, f); #ifdef CONFIG_PPC_STD_MMU_32 if (ktext) diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index fe65c405412..2d2a87e1015 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -74,9 +74,6 @@ unsigned long p_mapped_by_bats(phys_addr_t pa) unsigned long __init mmu_mapin_ram(void) { -#ifdef CONFIG_POWER4 - return 0; -#else unsigned long tot, bl, done; unsigned long max_size = (256<<20); @@ -95,7 +92,7 @@ unsigned long __init mmu_mapin_ram(void) break; } - setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM); + setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X); done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; if ((done < tot) && !bat_addrs[3].limit) { /* use BAT3 to cover a bit more */ @@ -103,12 +100,11 @@ unsigned long __init mmu_mapin_ram(void) for (bl = 128<<10; bl < max_size; bl <<= 1) if (bl * 2 > tot) break; - setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM); + setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X); done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; } return done; -#endif } /* @@ -136,9 +132,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; -#ifndef CONFIG_KGDB /* want user access for breakpoints */ if (flags & _PAGE_USER) -#endif bat[1].batu |= 1; /* Vp = 1 */ if (flags & _PAGE_GUARDED) { /* G bit must be zero in IBATs */ diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c index 00d3d17c84a..e4b6d66d93d 100644 --- a/arch/powerpc/sysdev/cpm_common.c +++ b/arch/powerpc/sysdev/cpm_common.c @@ -56,7 +56,7 @@ void __init udbg_init_cpm(void) { if (cpm_udbg_txdesc) { #ifdef CONFIG_CPM2 - setbat(1, 0xf0000000, 0xf0000000, 1024*1024, _PAGE_IO); + setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); #endif udbg_putc = udbg_putc_cpm; } -- cgit v1.2.3-70-g09d2