diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2012-09-10 02:52:57 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2012-09-17 16:31:53 +1000 |
commit | 78f1dbde9fd020419313c2a0c3b602ea2427118f (patch) | |
tree | 9a5c34555b6fba6809ec33798e1aa53ce1ce0f53 /arch/powerpc/include | |
parent | f033d659c3b931d8b2a16625155e20304e173c9f (diff) |
powerpc/mm: Make some of the PGTABLE_RANGE dependency explicit
slice array size and slice mask size depend on PGTABLE_RANGE.
Reviewed-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 15 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 9 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page_64.h | 12 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 17 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 10 | ||||
-rw-r--r-- | arch/powerpc/include/asm/tlbflush.h | 3 |
6 files changed, 31 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 3e887467a6d..9673f73eb8d 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -16,6 +16,13 @@ #include <asm/page.h> /* + * This is necessary to get the definition of PGTABLE_RANGE which we + * need for various slices related matters. Note that this isn't the + * complete pgtable.h but only a portion of it. + */ +#include <asm/pgtable-ppc64.h> + +/* * Segment table */ @@ -414,6 +421,8 @@ extern void slb_set_size(u16 size); srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ add rt,rt,rx +/* 4 bits per slice and we have one slice per 1TB */ +#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41) #ifndef __ASSEMBLY__ @@ -458,11 +467,7 @@ typedef struct { #ifdef CONFIG_PPC_MM_SLICES u64 low_slices_psize; /* SLB page size encodings */ - /* - * Right now we support 64TB and 4 bits for each - * 1TB slice we need 32 bytes for 64TB. - */ - unsigned char high_slices_psize[32]; /* 4 bits per slice for now */ + unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; #else u16 sllp; /* SLB page size encoding */ #endif diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e8a26db2e8f..5e38eedea21 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, extern u64 ppc64_rma_size; #endif /* CONFIG_PPC64 */ +struct mm_struct; +#ifdef CONFIG_DEBUG_VM +extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); +#else /* CONFIG_DEBUG_VM */ +static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) +{ +} +#endif /* !CONFIG_DEBUG_VM */ + #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 6c9bef4cb6a..cd915d6b093 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -78,14 +78,18 @@ extern u64 ppc64_pft_size; #define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) #define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) +/* + * 1 bit per slice and we have one slice per 1TB + * Right now we support only 64TB. + * IF we change this we will have to change the type + * of high_slices + */ +#define SLICE_MASK_SIZE 8 + #ifndef __ASSEMBLY__ struct slice_mask { u16 low_slices; - /* - * This should be derived out of PGTABLE_RANGE. For the current - * max 64TB, u64 should be ok. - */ u64 high_slices; }; diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 8af1cf27fd4..0182c203e41 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -21,17 +21,6 @@ #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) -/* Some sanity checking */ -#if TASK_SIZE_USER64 > PGTABLE_RANGE -#error TASK_SIZE_USER64 exceeds pagetable range -#endif - -#ifdef CONFIG_PPC_STD_MMU_64 -#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) -#error TASK_SIZE_USER64 exceeds user VSID range -#endif -#endif - /* * Define the address range of the kernel non-linear virtual area */ @@ -117,9 +106,6 @@ #ifndef __ASSEMBLY__ -#include <linux/stddef.h> -#include <asm/tlbflush.h> - /* * This is the default implementation of various PTE accessors, it's * used in all cases except Book3S with 64K pages where we have a @@ -198,7 +184,8 @@ /* to find an entry in a kernel page-table-directory */ /* This now only contains the vmalloc pages */ #define pgd_offset_k(address) pgd_offset(&init_mm, address) - +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 2e0e4110f7a..a9cbd3ba5c3 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -9,14 +9,6 @@ struct mm_struct; -#ifdef CONFIG_DEBUG_VM -extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); -#else /* CONFIG_DEBUG_VM */ -static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) -{ -} -#endif /* !CONFIG_DEBUG_VM */ - #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC64) @@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) #ifndef __ASSEMBLY__ +#include <asm/tlbflush.h> + /* Generic accessors to PTE bits */ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index fc02d1dee95..61a59271665 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h @@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); -extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); - #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE static inline void arch_enter_lazy_mmu_mode(void) |