diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/Kconfig.debug | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 52 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 4 |
4 files changed, 68 insertions, 1 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 50f48f0c563..0f8bb86995b 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -18,6 +18,15 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. +config DEBUG_PAGEALLOC + bool "Debug page memory allocations" + depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && PPC32 + help + Unmap pages from the kernel linear mapping after free_pages(). + This results in a large slowdown, but helps to find certain types + of memory corruptions. + + config HCALL_STATS bool "Hypervisor call instrumentation" depends on PPC_PSERIES && DEBUG_FS diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 0e53ca8f02f..5fce6ccecb8 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -115,6 +115,10 @@ void MMU_setup(void) if (strstr(cmd_line, "noltlbs")) { __map_without_ltlbs = 1; } +#ifdef CONFIG_DEBUG_PAGEALLOC + __map_without_bats = 1; + __map_without_ltlbs = 1; +#endif } /* diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index f75f2fc7bc7..8a2fc16ee0a 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -451,3 +451,55 @@ exit: return ret; } +#ifdef CONFIG_DEBUG_PAGEALLOC + +static int __change_page_attr(struct page *page, pgprot_t prot) +{ + pte_t *kpte; + pmd_t *kpmd; + unsigned long address; + + BUG_ON(PageHighMem(page)); + address = (unsigned long)page_address(page); + + if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address)) + return 0; + if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) + return -EINVAL; + set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); + wmb(); + flush_HPTE(0, address, pmd_val(*kpmd)); + pte_unmap(kpte); + + return 0; +} + +/* + * Change the page attributes of an page in the linear mapping. + * + * THIS CONFLICTS WITH BAT MAPPINGS, DEBUG USE ONLY + */ +static int change_page_attr(struct page *page, int numpages, pgprot_t prot) +{ + int i, err = 0; + unsigned long flags; + + local_irq_save(flags); + for (i = 0; i < numpages; i++, page++) { + err = __change_page_attr(page, prot); + if (err) + break; + } + local_irq_restore(flags); + return err; +} + + +void kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (PageHighMem(page)) + return; + + change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); +} +#endif /* CONFIG_DEBUG_PAGEALLOC */ diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 7cceb2c44cb..05066674a7a 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -85,8 +85,10 @@ unsigned long __init mmu_mapin_ram(void) unsigned long max_size = (256<<20); unsigned long align; - if (__map_without_bats) + if (__map_without_bats) { + printk(KERN_DEBUG "RAM mapped without BATs\n"); return 0; + } /* Set up BAT2 and if necessary BAT3 to cover RAM. */ |