From bf72aeba2ffef599d1d386425c9e46b82be657cd Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 15 Jun 2006 10:45:18 +1000 Subject: powerpc: Use 64k pages without needing cache-inhibited large pages Some POWER5+ machines can do 64k hardware pages for normal memory but not for cache-inhibited pages. This patch lets us use 64k hardware pages for most user processes on such machines (assuming the kernel has been configured with CONFIG_PPC_64K_PAGES=y). User processes start out using 64k pages and get switched to 4k pages if they use any non-cacheable mappings. With this, we use 64k pages for the vmalloc region and 4k pages for the imalloc region. If anything creates a non-cacheable mapping in the vmalloc region, the vmalloc region will get switched to 4k pages. I don't know of any driver other than the DRM that would do this, though, and these machines don't have AGP. When a region gets switched from 64k pages to 4k pages, we do not have to clear out all the 64k HPTEs from the hash table immediately. We use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page was hashed in as a 64k page or a set of 4k pages. If hash_page is trying to insert a 4k page for a Linux PTE and it sees that it has already been inserted as a 64k page, it first invalidates the 64k HPTE before inserting the 4k HPTE. The hash invalidation routines also use the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a set of 4k HPTEs to remove. With those two changes, we can tolerate a mix of 4k and 64k HPTEs in the hash table, and they will all get removed when the address space is torn down. Signed-off-by: Paul Mackerras --- include/asm-powerpc/mmu.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/asm-powerpc/mmu.h') diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 88539742010..3a5ebe229af 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -165,6 +165,16 @@ struct mmu_psize_def extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern int mmu_linear_psize; extern int mmu_virtual_psize; +extern int mmu_vmalloc_psize; +extern int mmu_io_psize; + +/* + * If the processor supports 64k normal pages but not 64k cache + * inhibited pages, we have to be prepared to switch processes + * to use 4k pages when they create cache-inhibited mappings. + * If this is the case, mmu_ci_restrictions will be set to 1. + */ +extern int mmu_ci_restrictions; #ifdef CONFIG_HUGETLB_PAGE /* @@ -256,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group, extern void stabs_alloc(void); extern void slb_initialize(void); +extern void slb_flush_and_rebolt(void); extern void stab_initialize(unsigned long stab); #endif /* __ASSEMBLY__ */ @@ -359,6 +370,8 @@ typedef unsigned long mm_context_id_t; typedef struct { mm_context_id_t id; + u16 user_psize; /* page size index */ + u16 sllp; /* SLB entry page size encoding */ #ifdef CONFIG_HUGETLB_PAGE u16 low_htlb_areas, high_htlb_areas; #endif -- cgit v1.2.3-70-g09d2