diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-30 08:02:35 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-30 08:02:35 +1030 |
commit | 33edcf133ba93ecba2e4b6472e97b689895d805c (patch) | |
tree | 327d7a20acef64005e7c5ccbfa1265be28aeb6ac /arch/powerpc/mm/mmu_decl.h | |
parent | be4d638c1597580ed2294d899d9f1a2cd10e462c (diff) | |
parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/powerpc/mm/mmu_decl.h')
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 65 |
1 files changed, 48 insertions, 17 deletions
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index fab3cfad409..4314b39b6fa 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -22,10 +22,58 @@ #include <asm/tlbflush.h> #include <asm/mmu.h> +#ifdef CONFIG_PPC_MMU_NOHASH + +/* + * On 40x and 8xx, we directly inline tlbia and tlbivax + */ +#if defined(CONFIG_40x) || defined(CONFIG_8xx) +static inline void _tlbil_all(void) +{ + asm volatile ("sync; tlbia; isync" : : : "memory") +} +static inline void _tlbil_pid(unsigned int pid) +{ + asm volatile ("sync; tlbia; isync" : : : "memory") +} +#else /* CONFIG_40x || CONFIG_8xx */ +extern void _tlbil_all(void); +extern void _tlbil_pid(unsigned int pid); +#endif /* !(CONFIG_40x || CONFIG_8xx) */ + +/* + * On 8xx, we directly inline tlbie, on others, it's extern + */ +#ifdef CONFIG_8xx +static inline void _tlbil_va(unsigned long address, unsigned int pid) +{ + asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") +} +#else /* CONFIG_8xx */ +extern void _tlbil_va(unsigned long address, unsigned int pid); +#endif /* CONIFG_8xx */ + +/* + * As of today, we don't support tlbivax broadcast on any + * implementation. When that becomes the case, this will be + * an extern. + */ +static inline void _tlbivax_bcast(unsigned long address, unsigned int pid) +{ + BUG(); +} + +#else /* CONFIG_PPC_MMU_NOHASH */ + extern void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +#endif /* CONFIG_PPC_MMU_NOHASH */ + #ifdef CONFIG_PPC32 extern void mapin_ram(void); extern int map_page(unsigned long va, phys_addr_t pa, int flags); @@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr; * architectures. -- Dan */ #if defined(CONFIG_8xx) -#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */) #define MMU_init_hw() do { } while(0) #define mmu_mapin_ram() (0UL) #elif defined(CONFIG_4xx) -#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); #elif defined(CONFIG_FSL_BOOKE) -#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); @@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void); /* anything 32-bit except 4xx or 8xx */ extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); - -/* Be careful....this needs to be updated if we ever encounter 603 SMPs, - * which includes all new 82xx processors. We need tlbie/tlbsync here - * in that case (I think). -- Dan. - */ -static inline void flush_HPTE(unsigned context, unsigned long va, - unsigned long pdval) -{ - if ((Hash != 0) && - cpu_has_feature(CPU_FTR_HPTE_TABLE)) - flush_hash_pages(0, va, pdval, 1); - else - _tlbie(va); -} #endif |