summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-05-08 16:27:27 +1000
committerPaul Mackerras <paulus@samba.org>2007-05-09 16:35:00 +1000
commit16f1c746755836aa823658000493cdab8ce7b098 (patch)
tree32f534c771b69400839f5795f979c47eac51f923 /arch/powerpc/mm
parentb15f792fafb7e0524907ddd9e035d73dddeed89c (diff)
[POWERPC] Small fixes & cleanups in segment page size demotion
The code for demoting segments to 4K had some issues, like for example, when using _PAGE_4K_PFN flag, the first CPU to hit it would do the demotion, but other CPUs hitting the same page wouldn't properly flush their SLBs if mmu_ci_restriction isn't set. There are also potential issues with hash_preload not handling _PAGE_4K_PFN. All of these are non issues on current hardware but might bite us in the future. This patch thus fixes it by: - Taking the test comparing the mm and current CPU context page sizes to decide to flush SLBs out of the mmu_ci_restrictions test since that can also be triggered by _PAGE_4K_PFN pages - Due to the above being done all the time, demote_segment_4k doesn't need update the context and flush the SLB - demote_segment_4k can be static and doesn't need an EXPORT_SYMBOL - Making hash_preload ignore anything that has either _PAGE_4K_PFN or _PAGE_NO_CACHE set, thus avoiding duplication of the complicated logic in hash_page() (and possibly making hash_preload a little bit faster for the normal case). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c87
1 files changed, 46 insertions, 41 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 9b226fa7006..71092c2f65c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -596,22 +596,18 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
* Demote a segment to using 4k pages.
* For now this makes the whole process use 4k pages.
*/
-void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
-{
#ifdef CONFIG_PPC_64K_PAGES
+static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
+{
if (mm->context.user_psize == MMU_PAGE_4K)
return;
mm->context.user_psize = MMU_PAGE_4K;
mm->context.sllp = SLB_VSID_USER | mmu_psize_defs[MMU_PAGE_4K].sllp;
- get_paca()->context = mm->context;
- slb_flush_and_rebolt();
#ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm);
#endif
-#endif
}
-
-EXPORT_SYMBOL_GPL(demote_segment_4k);
+#endif /* CONFIG_PPC_64K_PAGES */
/* Result code is:
* 0 - handled
@@ -711,40 +707,42 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
psize = MMU_PAGE_4K;
}
- if (mmu_ci_restrictions) {
- /* If this PTE is non-cacheable, switch to 4k */
- if (psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE)) {
- if (user_region) {
- demote_segment_4k(mm, ea);
- psize = MMU_PAGE_4K;
- } else if (ea < VMALLOC_END) {
- /*
- * some driver did a non-cacheable mapping
- * in vmalloc space, so switch vmalloc
- * to 4k pages
- */
- printk(KERN_ALERT "Reducing vmalloc segment "
- "to 4kB pages because of "
- "non-cacheable mapping\n");
- psize = mmu_vmalloc_psize = MMU_PAGE_4K;
- }
+ /* If this PTE is non-cacheable and we have restrictions on
+ * using non cacheable large pages, then we switch to 4k
+ */
+ if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ if (user_region) {
+ demote_segment_4k(mm, ea);
+ psize = MMU_PAGE_4K;
+ } else if (ea < VMALLOC_END) {
+ /*
+ * some driver did a non-cacheable mapping
+ * in vmalloc space, so switch vmalloc
+ * to 4k pages
+ */
+ printk(KERN_ALERT "Reducing vmalloc segment "
+ "to 4kB pages because of "
+ "non-cacheable mapping\n");
+ psize = mmu_vmalloc_psize = MMU_PAGE_4K;
#ifdef CONFIG_SPE_BASE
spu_flush_all_slbs(mm);
#endif
}
- if (user_region) {
- if (psize != get_paca()->context.user_psize) {
- get_paca()->context = mm->context;
- slb_flush_and_rebolt();
- }
- } else if (get_paca()->vmalloc_sllp !=
- mmu_psize_defs[mmu_vmalloc_psize].sllp) {
- get_paca()->vmalloc_sllp =
- mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ }
+ if (user_region) {
+ if (psize != get_paca()->context.user_psize) {
+ get_paca()->context.user_psize =
+ mm->context.user_psize;
slb_flush_and_rebolt();
}
+ } else if (get_paca()->vmalloc_sllp !=
+ mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+ get_paca()->vmalloc_sllp =
+ mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ slb_flush_and_rebolt();
}
+
if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
@@ -780,13 +778,26 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
" trap=%lx\n", mm, mm->pgd, ea, access, trap);
- /* Get PTE, VSID, access mask */
+ /* Get Linux PTE if available */
pgdir = mm->pgd;
if (pgdir == NULL)
return;
ptep = find_linux_pte(pgdir, ea);
if (!ptep)
return;
+
+#ifdef CONFIG_PPC_64K_PAGES
+ /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
+ * a 64K kernel), then we don't preload, hash_page() will take
+ * care of it once we actually try to access the page.
+ * That way we don't have to duplicate all of the logic for segment
+ * page size demotion here
+ */
+ if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
+ return;
+#endif /* CONFIG_PPC_64K_PAGES */
+
+ /* Get VSID */
vsid = get_vsid(mm->context.id, ea);
/* Hash it in */
@@ -797,12 +808,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_ci_restrictions) {
- /* If this PTE is non-cacheable, switch to 4k */
- if (mm->context.user_psize == MMU_PAGE_64K &&
- (pte_val(*ptep) & _PAGE_NO_CACHE))
- demote_segment_4k(mm, ea);
- }
if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else