diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-02 09:28:35 +1100 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-02 09:28:35 +1100 |
commit | 5a7b4193e564d1611ecf1cd859aed60d5612d78f (patch) | |
tree | 8831669121df3d50845718b848d7c6e4bc51be26 /arch/powerpc/include/asm | |
parent | 86f9e097f340fd0fbd37afe92bd5453f5a84cbca (diff) |
Revert "powerpc/mm: Fix bug in pagetable cache cleanup with CONFIG_PPC_SUBPAGE_PROT"
This reverts commit c045256d146800ea1d741a8e9e377dada6b7e195.
It breaks build when CONFIG_PPC_SUBPAGE_PROT is not set. I will
commit a fixed version separately
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 35 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-64.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-hash64-64k.h | 37 |
3 files changed, 42 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 9d9551840f4..7514ec2f854 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -373,38 +373,6 @@ extern void slb_set_size(u16 size); #ifndef __ASSEMBLY__ -#ifdef CONFIG_PPC_SUBPAGE_PROT -/* - * For the sub-page protection option, we extend the PGD with one of - * these. Basically we have a 3-level tree, with the top level being - * the protptrs array. To optimize speed and memory consumption when - * only addresses < 4GB are being protected, pointers to the first - * four pages of sub-page protection words are stored in the low_prot - * array. - * Each page of sub-page protection words protects 1GB (4 bytes - * protects 64k). For the 3-level tree, each page of pointers then - * protects 8TB. - */ -struct subpage_prot_table { - unsigned long maxaddr; /* only addresses < this are protected */ - unsigned int **protptrs[2]; - unsigned int *low_prot[4]; -}; - -#define SBP_L1_BITS (PAGE_SHIFT - 2) -#define SBP_L2_BITS (PAGE_SHIFT - 3) -#define SBP_L1_COUNT (1 << SBP_L1_BITS) -#define SBP_L2_COUNT (1 << SBP_L2_BITS) -#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) -#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) - -extern void subpage_prot_free(struct mm_struct *mm); -extern void subpage_prot_init_new_context(struct mm_struct *mm); -#else -static inline void subpage_prot_free(pgd_t *pgd) {} -static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } -#endif /* CONFIG_PPC_SUBPAGE_PROT */ - typedef unsigned long mm_context_id_t; typedef struct { @@ -418,9 +386,6 @@ typedef struct { u16 sllp; /* SLB page size encoding */ #endif unsigned long vdso_base; -#ifdef CONFIG_PPC_SUBPAGE_PROT - struct subpage_prot_table spt; -#endif /* CONFIG_PPC_SUBPAGE_PROT */ } mm_context_t; diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 605f5c5398d..5c1cd73dafa 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -28,6 +28,10 @@ */ #define MAX_PGTABLE_INDEX_SIZE 0xf +#ifndef CONFIG_PPC_SUBPAGE_PROT +static inline void subpage_prot_free(pgd_t *pgd) {} +#endif + extern struct kmem_cache *pgtable_cache[]; #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) @@ -38,6 +42,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { + subpage_prot_free(pgd); kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index c4490f9c67c..82b72207c51 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -76,4 +76,41 @@ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) + +#ifdef CONFIG_PPC_SUBPAGE_PROT +/* + * For the sub-page protection option, we extend the PGD with one of + * these. Basically we have a 3-level tree, with the top level being + * the protptrs array. To optimize speed and memory consumption when + * only addresses < 4GB are being protected, pointers to the first + * four pages of sub-page protection words are stored in the low_prot + * array. + * Each page of sub-page protection words protects 1GB (4 bytes + * protects 64k). For the 3-level tree, each page of pointers then + * protects 8TB. + */ +struct subpage_prot_table { + unsigned long maxaddr; /* only addresses < this are protected */ + unsigned int **protptrs[2]; + unsigned int *low_prot[4]; +}; + +#undef PGD_TABLE_SIZE +#define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ + sizeof(struct subpage_prot_table)) + +#define SBP_L1_BITS (PAGE_SHIFT - 2) +#define SBP_L2_BITS (PAGE_SHIFT - 3) +#define SBP_L1_COUNT (1 << SBP_L1_BITS) +#define SBP_L2_COUNT (1 << SBP_L2_BITS) +#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) +#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) + +extern void subpage_prot_free(pgd_t *pgd); + +static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) +{ + return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); +} +#endif /* CONFIG_PPC_SUBPAGE_PROT */ #endif /* __ASSEMBLY__ */ |