diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-04 16:58:59 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-04 16:58:59 +1100 |
commit | 1970282f3b453b7aac3b192a44705dcb5277fd82 (patch) | |
tree | 03993bd7b9e69619313da19c137cdde2df9416c5 /include | |
parent | 9a0f78f63bc05d195aab781486ed57d1418d5f59 (diff) |
powerpc: merge tlbflush.h
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/tlbflush.h | 146 | ||||
-rw-r--r-- | include/asm-ppc/tlbflush.h | 115 | ||||
-rw-r--r-- | include/asm-ppc64/tlbflush.h | 52 |
3 files changed, 146 insertions, 167 deletions
diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h new file mode 100644 index 00000000000..ca3655672bb --- /dev/null +++ b/include/asm-powerpc/tlbflush.h @@ -0,0 +1,146 @@ +#ifndef _ASM_POWERPC_TLBFLUSH_H +#define _ASM_POWERPC_TLBFLUSH_H +/* + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#ifdef __KERNEL__ + +#include <linux/config.h> + +struct mm_struct; + +#ifdef CONFIG_PPC64 + +#include <linux/percpu.h> +#include <asm/page.h> + +#define PPC64_TLB_BATCH_NR 192 + +struct ppc64_tlb_batch { + unsigned long index; + struct mm_struct *mm; + pte_t pte[PPC64_TLB_BATCH_NR]; + unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int large; +}; +DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); + +extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); + +static inline void flush_tlb_pending(void) +{ + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); + + if (batch->index) + __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); +} + +extern void flush_hash_page(unsigned long va, pte_t pte, int local); +void flush_hash_range(unsigned long number, int local); + +#else /* CONFIG_PPC64 */ + +#include <linux/mm.h> + +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +/* + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) +#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") +#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) +#define flush_tlb_pending() _tlbia() +#endif + +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + +#endif /* CONFIG_PPC64 */ + +#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ + defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + flush_tlb_pending(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +#ifdef CONFIG_PPC64 + flush_tlb_pending(); +#else + _tlbie(vmaddr); +#endif +} + +static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +#ifndef CONFIG_PPC64 + _tlbie(vmaddr); +#endif +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + flush_tlb_pending(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_pending(); +} + +#else /* 6xx, 7xx, 7xxx cpus */ + +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#endif + +/* + * This is called in munmap when we have freed up some page-table + * pages. We don't need to do anything here, there's nothing special + * about our page-table pages. -- paulus + */ +static inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +#endif /*__KERNEL__ */ +#endif /* _ASM_POWERPC_TLBFLUSH_H */ diff --git a/include/asm-ppc/tlbflush.h b/include/asm-ppc/tlbflush.h deleted file mode 100644 index 9afee4ffc83..00000000000 --- a/include/asm-ppc/tlbflush.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * include/asm-ppc/tlbflush.h - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifdef __KERNEL__ -#ifndef _PPC_TLBFLUSH_H -#define _PPC_TLBFLUSH_H - -#include <linux/config.h> -#include <linux/mm.h> - -extern void _tlbie(unsigned long address); -extern void _tlbia(void); - -#if defined(CONFIG_4xx) - -#ifndef CONFIG_44x -#define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory") -#else -#define __tlbia _tlbia -#endif - -static inline void flush_tlb_mm(struct mm_struct *mm) - { __tlbia(); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) - { __tlbia(); } -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) - { __tlbia(); } - -#elif defined(CONFIG_FSL_BOOKE) - -/* TODO: determine if flush_tlb_range & flush_tlb_kernel_range - * are best implemented as tlbia vs specific tlbie's */ - -#define __tlbia() _tlbia() - -static inline void flush_tlb_mm(struct mm_struct *mm) - { __tlbia(); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) - { __tlbia(); } -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) - { __tlbia(); } - -#elif defined(CONFIG_8xx) -#define __tlbia() asm volatile ("tlbia; sync" : : : "memory") - -static inline void flush_tlb_mm(struct mm_struct *mm) - { __tlbia(); } -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, - unsigned long vmaddr) - { _tlbie(vmaddr); } -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) - { __tlbia(); } -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) - { __tlbia(); } - -#else /* 6xx, 7xx, 7xxx cpus */ -struct mm_struct; -struct vm_area_struct; -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -#endif - -/* - * This is called in munmap when we have freed up some page-table - * pages. We don't need to do anything here, there's nothing special - * about our page-table pages. -- paulus - */ -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ -} - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - -#endif /* _PPC_TLBFLUSH_H */ -#endif /*__KERNEL__ */ diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h deleted file mode 100644 index 626f505c6ee..00000000000 --- a/include/asm-ppc64/tlbflush.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef _PPC64_TLBFLUSH_H -#define _PPC64_TLBFLUSH_H - -/* - * TLB flushing: - * - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables - */ - -#include <linux/percpu.h> -#include <asm/page.h> - -#define PPC64_TLB_BATCH_NR 192 - -struct mm_struct; -struct ppc64_tlb_batch { - unsigned long index; - struct mm_struct *mm; - pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long vaddr[PPC64_TLB_BATCH_NR]; - unsigned int large; -}; -DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); - -extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); - -static inline void flush_tlb_pending(void) -{ - struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); - - if (batch->index) - __flush_tlb_pending(batch); - put_cpu_var(ppc64_tlb_batch); -} - -#define flush_tlb_mm(mm) flush_tlb_pending() -#define flush_tlb_page(vma, addr) flush_tlb_pending() -#define flush_tlb_page_nohash(vma, addr) do { } while (0) -#define flush_tlb_range(vma, start, end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_kernel_range(start, end) flush_tlb_pending() -#define flush_tlb_pgtables(mm, start, end) do { } while (0) - -extern void flush_hash_page(unsigned long va, pte_t pte, int local); -void flush_hash_range(unsigned long number, int local); - -#endif /* _PPC64_TLBFLUSH_H */ |