summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/copypage-v3.c
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-31 15:08:35 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-11-27 23:53:47 +0000
commit063b0a4207e43acbeff3d4b09f43e750e0212b48 (patch)
treeeb2a2c1faa732c763102040478830111fc13f2a5 /arch/arm/mm/copypage-v3.c
parentd73e60b7144a86baf0fdfcc9537a70bb4f72e11c (diff)
[ARM] copypage: provide our own copy_user_highpage()
We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/copypage-v3.c')
-rw-r--r--arch/arm/mm/copypage-v3.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 184911089e6..52df8f04d3f 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -8,16 +8,15 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>
-
-#include <asm/page.h>
+#include <linux/highmem.h>
/*
- * ARMv3 optimised copy_user_page
+ * ARMv3 optimised copy_user_highpage
*
* FIXME: do we need to handle cache stuff...
*/
-void __attribute__((naked))
-v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
stmfd sp!, {r4, lr} @ 2\n\
@@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
}
+void v3_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ v3_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
/*
* ARMv3 optimised clear_user_page
*
@@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v3_user_fns __initdata = {
.cpu_clear_user_page = v3_clear_user_page,
- .cpu_copy_user_page = v3_copy_user_page,
+ .cpu_copy_user_highpage = v3_copy_user_highpage,
};