diff options
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/hugetlb.h | 10 | ||||
-rw-r--r-- | include/asm-powerpc/io.h | 5 | ||||
-rw-r--r-- | include/asm-powerpc/mmu-hash64.h | 6 | ||||
-rw-r--r-- | include/asm-powerpc/page.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/page_64.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/pgalloc-64.h | 4 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-4k.h | 3 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-64k.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc32.h | 16 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-ppc64.h | 8 |
10 files changed, 45 insertions, 13 deletions
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index be32ff02f4a..26f0d0ab27a 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h @@ -7,7 +7,7 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len); -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, +void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); @@ -21,11 +21,13 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, * If the arch doesn't supply something else, assume that hugepage * size aligned regions are ok without further preparation. */ -static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) +static inline int prepare_hugepage_range(struct file *file, + unsigned long addr, unsigned long len) { - if (len & ~HPAGE_MASK) + struct hstate *h = hstate_file(file); + if (len & ~huge_page_mask(h)) return -EINVAL; - if (addr & ~HPAGE_MASK) + if (addr & ~huge_page_mask(h)) return -EINVAL; return 0; } diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 8b627823f5f..77c7fa025e6 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -617,7 +617,8 @@ static inline void iosync(void) * and can be hooked by the platform via ppc_md * * * ioremap_flags allows to specify the page flags as an argument and can - * also be hooked by the platform via ppc_md + * also be hooked by the platform via ppc_md. ioremap_prot is the exact + * same thing as ioremap_flags. * * * ioremap_nocache is identical to ioremap * @@ -639,6 +640,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, unsigned long flags); #define ioremap_nocache(addr, size) ioremap((addr), (size)) +#define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot)) + extern void iounmap(volatile void __iomem *addr); extern void __iomem *__ioremap(phys_addr_t, unsigned long size, diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index d1dc16afb11..19c7a940349 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -194,9 +194,9 @@ extern int mmu_ci_restrictions; #ifdef CONFIG_HUGETLB_PAGE /* - * The page size index of the huge pages for use by hugetlbfs + * The page size indexes of the huge pages for use by hugetlbfs */ -extern int mmu_huge_psize; +extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT]; #endif /* CONFIG_HUGETLB_PAGE */ @@ -281,6 +281,8 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long mode, int psize, int ssize); extern void set_huge_psize(int psize); +extern void add_gpage(unsigned long addr, unsigned long page_size, + unsigned long number_of_pages); extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); extern void htab_initialize(void); diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index cffdf0eb0df..e088545cb3f 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -119,9 +119,6 @@ extern phys_addr_t kernstart_addr; /* align addr on a size boundary - adjust address up if needed */ #define _ALIGN(addr,size) _ALIGN_UP(addr,size) -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) - /* * Don't compare things with KERNELBASE or PAGE_OFFSET to test for * "kernelness", use is_kernel_addr() - it should do what you want. diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 02fd80710e9..043bfdfe4f7 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h @@ -90,6 +90,7 @@ extern unsigned int HPAGE_SHIFT; #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE 3 #endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h index 68980990f62..812a1d8f35c 100644 --- a/include/asm-powerpc/pgalloc-64.h +++ b/include/asm-powerpc/pgalloc-64.h @@ -22,7 +22,7 @@ extern struct kmem_cache *pgtable_cache[]; #define PUD_CACHE_NUM 1 #define PMD_CACHE_NUM 1 #define HUGEPTE_CACHE_NUM 2 -#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */ +#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { @@ -119,7 +119,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) __free_page(ptepage); } -#define PGF_CACHENUM_MASK 0x3 +#define PGF_CACHENUM_MASK 0x7 typedef struct pgtable_free { unsigned long val; diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index fd2090dc1dc..c9601dfb4a1 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -51,6 +51,9 @@ #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ _PAGE_SECONDARY | _PAGE_GROUP_IX) +/* There is no 4K PFN hack on 4K pages */ +#define _PAGE_4K_PFN 0 + /* PAGE_MASK gives the right answer below, but only by accident */ /* It should be preserving the high 48 bits and then specifically */ /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index c5007712473..7e54adb3559 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -138,7 +138,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) unsigned __split = (psize == MMU_PAGE_4K || \ psize == MMU_PAGE_64K_AP); \ shift = mmu_psize_defs[psize].shift; \ - for (index = 0; va < __end; index++, va += (1 << shift)) { \ + for (index = 0; va < __end; index++, va += (1L << shift)) { \ if (!__split || __rpte_sub_valid(rpte, index)) do { \ #define pte_iterate_hashed_end() } while(0); } } while(0) diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h index 3a96d001cb7..bdbab72f3eb 100644 --- a/include/asm-powerpc/pgtable-ppc32.h +++ b/include/asm-powerpc/pgtable-ppc32.h @@ -395,6 +395,12 @@ extern int icache_44x_need_flush; #ifndef _PAGE_EXEC #define _PAGE_EXEC 0 #endif +#ifndef _PAGE_ENDIAN +#define _PAGE_ENDIAN 0 +#endif +#ifndef _PAGE_COHERENT +#define _PAGE_COHERENT 0 +#endif #ifndef _PMD_PRESENT_MASK #define _PMD_PRESENT_MASK _PMD_PRESENT #endif @@ -405,6 +411,12 @@ extern int icache_44x_need_flush; #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) + +#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU | _PAGE_ENDIAN | \ + _PAGE_USER | _PAGE_ACCESSED | \ + _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ + _PAGE_EXEC | _PAGE_HWEXEC) /* * Note: the _PAGE_COHERENT bit automatically gets set in the hardware * PTE if CONFIG_SMP is defined (hash_page does this); there is no need @@ -538,6 +550,10 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +static inline unsigned long pte_pgprot(pte_t pte) +{ + return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; +} static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h index ab98a9c80b2..ba8000352b9 100644 --- a/include/asm-powerpc/pgtable-ppc64.h +++ b/include/asm-powerpc/pgtable-ppc64.h @@ -117,6 +117,10 @@ #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define HAVE_PAGE_AGP +#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ + _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ + _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) /* PTEIDX nibble */ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 @@ -262,6 +266,10 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } +static inline unsigned long pte_pgprot(pte_t pte) +{ + return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; +} /* Atomic PTE updates */ static inline unsigned long pte_update(struct mm_struct *mm, |