diff options
author | Russell King <rmk@dyn-67.arm.linux.org.uk> | 2008-08-02 10:55:55 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2008-08-02 21:32:35 +0100 |
commit | 4baa9922430662431231ac637adedddbb0cfb2d7 (patch) | |
tree | e8fb765ce3e41c01f33de34a0bc9494f0ae19818 /include/asm-arm/tlb.h | |
parent | ff4db0a043a5dee7180bdffd178e61cd02812c68 (diff) |
[ARM] move include/asm-arm to arch/arm/include/asm
Move platform independent header files to arch/arm/include/asm, leaving
those in asm/arch* and asm/plat* alone.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'include/asm-arm/tlb.h')
-rw-r--r-- | include/asm-arm/tlb.h | 94 |
1 files changed, 0 insertions, 94 deletions
diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h deleted file mode 100644 index 36bd402a21c..00000000000 --- a/include/asm-arm/tlb.h +++ /dev/null @@ -1,94 +0,0 @@ -/* - * linux/include/asm-arm/tlb.h - * - * Copyright (C) 2002 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Experimentation shows that on a StrongARM, it appears to be faster - * to use the "invalidate whole tlb" rather than "invalidate single - * tlb" for this. - * - * This appears true for both the process fork+exit case, as well as - * the munmap-large-area case. - */ -#ifndef __ASMARM_TLB_H -#define __ASMARM_TLB_H - -#include <asm/cacheflush.h> -#include <asm/tlbflush.h> - -#ifndef CONFIG_MMU - -#include <linux/pagemap.h> -#include <asm-generic/tlb.h> - -#else /* !CONFIG_MMU */ - -#include <asm/pgalloc.h> - -/* - * TLB handling. This allows us to remove pages from the page - * tables, and efficiently handle the TLB issues. - */ -struct mmu_gather { - struct mm_struct *mm; - unsigned int fullmm; -}; - -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) -{ - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); - - tlb->mm = mm; - tlb->fullmm = full_mm_flush; - - return tlb; -} - -static inline void -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) -{ - if (tlb->fullmm) - flush_tlb_mm(tlb->mm); - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - put_cpu_var(mmu_gathers); -} - -#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) - -/* - * In the case of tlb vma handling, we can optimise these away in the - * case where we're doing a full MM flush. When we're doing a munmap, - * the vmas are adjusted to only cover the region to be torn down. - */ -static inline void -tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm) - flush_cache_range(vma, vma->vm_start, vma->vm_end); -} - -static inline void -tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) -{ - if (!tlb->fullmm) - flush_tlb_range(vma, vma->vm_start, vma->vm_end); -} - -#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) -#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) -#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) - -#define tlb_migrate_finish(mm) do { } while (0) - -#endif /* CONFIG_MMU */ -#endif |