diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-31 08:31:57 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-31 08:31:57 +0100 |
commit | a9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch) | |
tree | 886e75fdfd09690cd262ca69cb7f5d1d42b48602 /arch/arm/mm/copypage-xsc3.c | |
parent | b2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff) | |
parent | 6a94cb73064c952255336cc57731904174b2c58f (diff) |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/include/asm/pda.h
kernel/fork.c
Diffstat (limited to 'arch/arm/mm/copypage-xsc3.c')
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 113 |
1 files changed, 113 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 00000000000..39a994542ca --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c @@ -0,0 +1,113 @@ +/* + * linux/arch/arm/mm/copypage-xsc3.S + * + * Copyright (C) 2004 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adapted for 3rd gen XScale core, no more mini-dcache + * Author: Matt Gilbert (matthew.m.gilbert@intel.com) + */ +#include <linux/init.h> +#include <linux/highmem.h> + +/* + * General note: + * We don't really want write-allocate cache behaviour for these functions + * since that will just eat through 8K of the cache. + */ + +/* + * XSC3 optimised copy_user_highpage + * r0 = destination + * r1 = source + * + * The source page may have some clean entries in the cache already, but we + * can safely ignore them - break_cow() will flush them out of the cache + * if we eventually end up using our copied page. + * + */ +static void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom) +{ + asm("\ + stmfd sp!, {r4, r5, lr} \n\ + mov lr, %0 \n\ + \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r1, #64] \n\ + pld [r1, #96] \n\ + \n\ +2: ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + subs lr, lr, #1 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + bgt 1b \n\ + beq 2b \n\ + \n\ + ldmfd sp!, {r4, r5, pc}" + : + : "I" (PAGE_SIZE / 64 - 1)); +} + +void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + xsc3_mc_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + +/* + * XScale optimised clear_user_page + * r0 = destination + * r1 = virtual user address of ultimate destination page + */ +void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ +1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + subs r1, r1, #1 \n\ + bne 1b" + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3"); + kunmap_atomic(kaddr, KM_USER0); +} + +struct cpu_user_fns xsc3_mc_user_fns __initdata = { + .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, + .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, +}; |