summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v6.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/copypage-v6.c')
-rw-r--r--arch/arm/mm/copypage-v6.c61
1 files changed, 37 insertions, 24 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0e21c076758..2ea75d0f504 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -10,8 +10,8 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
@@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
-static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_highpage_nonaliasing(struct page *to,
+ struct page *from, unsigned long vaddr)
{
+ void *kto, *kfrom;
+
+ kfrom = kmap_atomic(from, KM_USER0);
+ kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
+ kunmap_atomic(kto, KM_USER1);
+ kunmap_atomic(kfrom, KM_USER0);
}
/*
@@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
}
/*
- * Copy the page, taking account of the cache colour.
+ * Discard data in the kernel mapping for the new page.
+ * FIXME: needs this MCRR to be supported.
*/
-static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void discard_old_kernel_data(void *kto)
{
- unsigned int offset = CACHE_COLOUR(vaddr);
- unsigned long from, to;
- struct page *page = virt_to_page(kfrom);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- __flush_dcache_page(page_mapping(page), page);
-
- /*
- * Discard data in the kernel mapping for the new page.
- * FIXME: needs this MCRR to be supported.
- */
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
+}
+
+/*
+ * Copy the page, taking account of the cache colour.
+ */
+static void v6_copy_user_highpage_aliasing(struct page *to,
+ struct page *from, unsigned long vaddr)
+{
+ unsigned int offset = CACHE_COLOUR(vaddr);
+ unsigned long kfrom, kto;
+
+ if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
+
+ /* FIXME: not highmem safe */
+ discard_old_kernel_data(page_address(to));
/*
* Now copy the page using the same cache colour as the
@@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
*/
spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
- from = from_address + (offset << PAGE_SHIFT);
- to = to_address + (offset << PAGE_SHIFT);
+ kfrom = from_address + (offset << PAGE_SHIFT);
+ kto = to_address + (offset << PAGE_SHIFT);
- flush_tlb_kernel_page(from);
- flush_tlb_kernel_page(to);
+ flush_tlb_kernel_page(kfrom);
+ flush_tlb_kernel_page(kto);
- copy_page((void *)to, (void *)from);
+ copy_page((void *)kto, (void *)kfrom);
spin_unlock(&v6_lock);
}
@@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page_nonaliasing,
- .cpu_copy_user_page = v6_copy_user_page_nonaliasing,
+ .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
};
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
- cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
+ cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
}
return 0;