summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-04-27 21:22:55 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-05-06 16:49:26 +1000
commit75c1d539ea13117cbe95e2c343e52af67d735145 (patch)
treef45295016163b1e1914dc1f48c307e64215f5f2c /arch/powerpc
parentaef40e87d866355ffd279ab21021de733242d0d5 (diff)
powerpc: Fix CONFIG_DEBUG_PAGEALLOC on 603/e300
So we tried to speed things up a bit using flush_hash_pages() directly but that falls over on 603 of course meaning we fail to flush the TLB properly and we may even end up having it corrupt memory randomly by accessing a hash table that doesn't exist. This removes the "optimization" by always going through flush_tlb_page() for now at least. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 767b0cf17d3..9fc02dc72ce 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -393,11 +393,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
-#ifdef CONFIG_PPC_STD_MMU
- flush_hash_pages(0, address, pmd_val(*kpmd), 1);
-#else
flush_tlb_page(NULL, address);
-#endif
pte_unmap(kpte);
return 0;