diff options
Diffstat (limited to 'arch/arm/mm/copypage-v4wt.c')
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index d8ef39503ff..86c2cfdbde0 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -11,18 +11,17 @@ * the only supported cache operation. */ #include <linux/init.h> - -#include <asm/page.h> +#include <linux/highmem.h> /* - * ARMv4 optimised copy_user_page + * ARMv4 optimised copy_user_highpage * * Since we have writethrough caches, we don't have to worry about * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -void __attribute__((naked)) -v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64)); } +void v4wt_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wt_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv4 optimised clear_user_page * @@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4wt_user_fns __initdata = { .cpu_clear_user_page = v4wt_clear_user_page, - .cpu_copy_user_page = v4wt_copy_user_page, + .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; |