From 0a26b1364f14852bc9a51db0ca63c5250c775627 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 28 Mar 2006 10:22:10 +1100 Subject: ppc: Remove CHRP, POWER3 and POWER4 support from arch/ppc 32-bit CHRP machines are now supported only in arch/powerpc, as are all 64-bit PowerPC processors. This means that we don't use Open Firmware on any platform in arch/ppc any more. This makes PReP support a single-platform option like every other platform support option in arch/ppc now, thus CONFIG_PPC_MULTIPLATFORM is gone from arch/ppc. CONFIG_PPC_PREP is the option that selects PReP support and is generally what has replaced CONFIG_PPC_MULTIPLATFORM within arch/ppc. _machine is all but dead now, being #defined to 0. Updated Makefiles, comments and Kconfig options generally to reflect these changes. Signed-off-by: Paul Mackerras --- arch/ppc/mm/hashtable.S | 34 ---------------------------------- arch/ppc/mm/init.c | 13 ------------- arch/ppc/mm/mmu_context.c | 2 +- arch/ppc/mm/pgtable.c | 2 +- arch/ppc/mm/ppc_mmu.c | 28 +--------------------------- 5 files changed, 3 insertions(+), 76 deletions(-) (limited to 'arch/ppc/mm') diff --git a/arch/ppc/mm/hashtable.S b/arch/ppc/mm/hashtable.S index f09fa88db35..31d0a924317 100644 --- a/arch/ppc/mm/hashtable.S +++ b/arch/ppc/mm/hashtable.S @@ -74,12 +74,6 @@ _GLOBAL(hash_page_sync) */ .text _GLOBAL(hash_page) -#ifdef CONFIG_PPC64BRIDGE - mfmsr r0 - clrldi r0,r0,1 /* make sure it's in 32-bit mode */ - MTMSRD(r0) - isync -#endif tophys(r7,0) /* gets -KERNELBASE into r7 */ #ifdef CONFIG_SMP addis r8,r7,mmu_hash_lock@h @@ -303,7 +297,6 @@ Hash_base = 0xc0180000 Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) -#ifndef CONFIG_PPC64BRIDGE /* defines for the PTE format for 32-bit PPCs */ #define PTE_SIZE 8 #define PTEG_SIZE 64 @@ -317,21 +310,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) #define SET_V(r) oris r,r,PTE_V@h #define CLR_V(r,t) rlwinm r,r,0,1,31 -#else -/* defines for the PTE format for 64-bit PPCs */ -#define PTE_SIZE 16 -#define PTEG_SIZE 128 -#define LG_PTEG_SIZE 7 -#define LDPTEu ldu -#define STPTE std -#define CMPPTE cmpd -#define PTE_H 2 -#define PTE_V 1 -#define TST_V(r) andi. r,r,PTE_V -#define SET_V(r) ori r,r,PTE_V -#define CLR_V(r,t) li t,PTE_V; andc r,r,t -#endif /* CONFIG_PPC64BRIDGE */ - #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE @@ -349,14 +327,8 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) /* Construct the high word of the PPC-style PTE (r5) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r5,r3,12 /* shift vsid into position */ - rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r5) /* set V (valid) bit */ /* Get the address of the primary PTE group in the hash table (r3) */ @@ -540,14 +512,8 @@ _GLOBAL(flush_hash_pages) add r3,r3,r0 /* note code below trims to 24 bits */ /* Construct the high word of the PPC-style PTE (r11) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r11,r3,12 /* shift vsid into position */ - rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r11) /* set V (valid) bit */ #ifdef CONFIG_SMP diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index cb1c294fb93..386e000bcb7 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c @@ -412,14 +412,6 @@ void __init mem_init(void) } #endif /* CONFIG_BLK_DEV_INITRD */ -#ifdef CONFIG_PPC_OF - /* mark the RTAS pages as reserved */ - if ( rtas_data ) - for (addr = (ulong)__va(rtas_data); - addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ; - addr += PAGE_SIZE) - SetPageReserved(virt_to_page(addr)); -#endif for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory; addr += PAGE_SIZE) { if (!PageReserved(virt_to_page(addr))) @@ -494,11 +486,6 @@ set_phys_avail(unsigned long total_memory) initrd_end - initrd_start, 1); } #endif /* CONFIG_BLK_DEV_INITRD */ -#ifdef CONFIG_PPC_OF - /* remove the RTAS pages from the available memory */ - if (rtas_data) - mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1); -#endif } /* Mark some memory as reserved by removing it from phys_avail. */ diff --git a/arch/ppc/mm/mmu_context.c b/arch/ppc/mm/mmu_context.c index a8816e0f6a8..b4a4b3f02a1 100644 --- a/arch/ppc/mm/mmu_context.c +++ b/arch/ppc/mm/mmu_context.c @@ -2,7 +2,7 @@ * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU substantially follows the * architecture specification. This includes the 6xx, 7xx, 7xxx, - * 8260, and POWER3 implementations but excludes the 8xx and 4xx. + * 8260, and 83xx implementations but excludes the 8xx and 4xx. * -- paulus * * Derived from arch/ppc/mm/init.c: diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index 6ea9185fd12..a1924876cad 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -39,7 +39,7 @@ unsigned long ioremap_base; unsigned long ioremap_bot; int io_bat_index; -#if defined(CONFIG_6xx) || defined(CONFIG_POWER3) +#if defined(CONFIG_6xx) #define HAVE_BATS 1 #endif diff --git a/arch/ppc/mm/ppc_mmu.c b/arch/ppc/mm/ppc_mmu.c index 9a381ed5eb2..25bb6f3347c 100644 --- a/arch/ppc/mm/ppc_mmu.c +++ b/arch/ppc/mm/ppc_mmu.c @@ -2,7 +2,7 @@ * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU substantially follows the * architecture specification. This includes the 6xx, 7xx, 7xxx, - * 8260, and POWER3 implementations but excludes the 8xx and 4xx. + * 8260, and 83xx implementations but excludes the 8xx and 4xx. * -- paulus * * Derived from arch/ppc/mm/init.c: @@ -42,11 +42,7 @@ unsigned long _SDR1; union ubat { /* BAT register values to be loaded */ BAT bat; -#ifdef CONFIG_PPC64BRIDGE - u64 word[2]; -#else u32 word[2]; -#endif } BATS[4][2]; /* 4 pairs of IBAT, DBAT */ struct batrange { /* stores address ranges mapped by BATs */ @@ -83,9 +79,6 @@ unsigned long p_mapped_by_bats(unsigned long pa) unsigned long __init mmu_mapin_ram(void) { -#ifdef CONFIG_POWER4 - return 0; -#else unsigned long tot, bl, done; unsigned long max_size = (256<<20); unsigned long align; @@ -122,7 +115,6 @@ unsigned long __init mmu_mapin_ram(void) } return done; -#endif } /* @@ -205,27 +197,10 @@ void __init MMU_init_hw(void) if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); -#ifdef CONFIG_PPC64BRIDGE -#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */ -#define SDR1_LOW_BITS (lg_n_hpteg - 11) -#define MIN_N_HPTEG 2048 /* min 256kB hash table */ -#else #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */ #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10) #define MIN_N_HPTEG 1024 /* min 64kB hash table */ -#endif - -#ifdef CONFIG_POWER4 - /* The hash table has already been allocated and initialized - in prom.c */ - n_hpteg = Hash_size >> LG_HPTEG_SIZE; - lg_n_hpteg = __ilog2(n_hpteg); - - /* Remove the hash table from the available memory */ - if (Hash) - reserve_phys_mem(__pa(Hash), Hash_size); -#else /* CONFIG_POWER4 */ /* * Allow 1 HPTE (1/8 HPTEG) for each page of memory. * This is less than the recommended amount, but then @@ -248,7 +223,6 @@ void __init MMU_init_hw(void) Hash = mem_pieces_find(Hash_size, Hash_size); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; -#endif /* CONFIG_POWER4 */ Hash_end = (PTE *) ((unsigned long)Hash + Hash_size); -- cgit v1.2.3-70-g09d2 From bab70a4af737f623de5b034976a311055308ab86 Mon Sep 17 00:00:00 2001 From: Eugene Surovegin Date: Tue, 28 Mar 2006 10:13:12 -0800 Subject: [PATCH] lock PTE before updating it in 440/BookE page fault handler Fix 44x and BookE page fault handler to correctly lock PTE before trying to pte_update() it, otherwise this PTE might be swapped out after pte_present() check but before pte_uptdate() call, resulting in corrupted PTE. This can happen with enabled preemption and low memory condition. Signed-off-by: Eugene Surovegin Signed-off-by: Paul Mackerras --- arch/powerpc/mm/fault.c | 30 +++++++++++++++++------------- arch/powerpc/mm/pgtable_32.c | 6 ++++-- arch/ppc/mm/fault.c | 30 +++++++++++++++++------------- arch/ppc/mm/pgtable.c | 6 ++++-- include/asm-ppc/pgtable.h | 3 ++- 5 files changed, 44 insertions(+), 31 deletions(-) (limited to 'arch/ppc/mm') diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ec4adcb4bc2..5aea0909a5e 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -267,25 +267,29 @@ good_area: #endif #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) pte_t *ptep; + pmd_t *pmdp; /* Since 4xx/Book-E supports per-page execute permission, * we lazily flush dcache to icache. */ ptep = NULL; - if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { - struct page *page = pte_page(*ptep); - - if (! test_bit(PG_arch_1, &page->flags)) { - flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + if (get_pteptr(mm, address, &ptep, &pmdp)) { + spinlock_t *ptl = pte_lockptr(mm, pmdp); + spin_lock(ptl); + if (pte_present(*ptep)) { + struct page *page = pte_page(*ptep); + + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } + pte_update(ptep, 0, _PAGE_HWEXEC); + _tlbie(address); + pte_unmap_unlock(ptep, ptl); + up_read(&mm->mmap_sem); + return 0; } - pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); - pte_unmap(ptep); - up_read(&mm->mmap_sem); - return 0; + pte_unmap_unlock(ptep, ptl); } - if (ptep != NULL) - pte_unmap(ptep); #endif /* a write */ } else if (is_write) { diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index d296eb6b454..90628601fac 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, * the PTE pointer is unmodified if PTE is not found. */ int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; pmd_t *pmd; @@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) if (pte) { retval = 1; *ptep = pte; + if (pmdp) + *pmdp = pmd; /* XXX caller needs to do pte_unmap, yuck */ } } @@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr) mm = &init_mm; pa = 0; - if (get_pteptr(mm, addr, &pte)) { + if (get_pteptr(mm, addr, &pte, NULL)) { pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pte_unmap(pte); } diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 0217188ef46..8e08ca32531 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c @@ -202,6 +202,7 @@ good_area: /* an exec - 4xx/Book-E allows for per-page execute permission */ } else if (TRAP(regs) == 0x400) { pte_t *ptep; + pmd_t *pmdp; #if 0 /* It would be nice to actually enforce the VM execute @@ -215,21 +216,24 @@ good_area: /* Since 4xx/Book-E supports per-page execute permission, * we lazily flush dcache to icache. */ ptep = NULL; - if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { - struct page *page = pte_page(*ptep); - - if (! test_bit(PG_arch_1, &page->flags)) { - flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); + if (get_pteptr(mm, address, &ptep, &pmdp)) { + spinlock_t *ptl = pte_lockptr(mm, pmdp); + spin_lock(ptl); + if (pte_present(*ptep)) { + struct page *page = pte_page(*ptep); + + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } + pte_update(ptep, 0, _PAGE_HWEXEC); + _tlbie(address); + pte_unmap_unlock(ptep, ptl); + up_read(&mm->mmap_sem); + return 0; } - pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); - pte_unmap(ptep); - up_read(&mm->mmap_sem); - return 0; + pte_unmap_unlock(ptep, ptl); } - if (ptep != NULL) - pte_unmap(ptep); #endif /* a read */ } else { diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index a1924876cad..706bca8eb14 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, * the PTE pointer is unmodified if PTE is not found. */ int -get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) +get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) { pgd_t *pgd; pmd_t *pmd; @@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) if (pte) { retval = 1; *ptep = pte; + if (pmdp) + *pmdp = pmd; /* XXX caller needs to do pte_unmap, yuck */ } } @@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr) mm = &init_mm; pa = 0; - if (get_pteptr(mm, addr, &pte)) { + if (get_pteptr(mm, addr, &pte, NULL)) { pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pte_unmap(pte); } diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h index e1c62da12e7..570b355162f 100644 --- a/include/asm-ppc/pgtable.h +++ b/include/asm-ppc/pgtable.h @@ -837,7 +837,8 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, */ #define pgtable_cache_init() do { } while (0) -extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); +extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, + pmd_t **pmdp); #include -- cgit v1.2.3-70-g09d2