From 17968fbbd19f1bb281ee4eb2548764ac5664c4ec Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 27 May 2012 19:54:03 +0000 Subject: powerpc: 64bit optimised __clear_user I noticed __clear_user high up in a profile of one of my RAID stress tests. The testcase was doing a dd from /dev/zero which ends up calling __clear_user. __clear_user is basically a loop with a single 4 byte store which is horribly slow. We can do much better by aligning the desination and doing 32 bytes of 8 byte stores in a loop. The following testcase was used to verify the patch: http://ozlabs.org/~anton/junkcode/stress_clear_user.c To show the improvement in performance I ran a dd from /dev/zero to /dev/null on a POWER7 box: Before: # dd if=/dev/zero of=/dev/null bs=1M count=10000 10485760000 bytes (10 GB) copied, 3.72379 s, 2.8 GB/s After: # time dd if=/dev/zero of=/dev/null bs=1M count=10000 10485760000 bytes (10 GB) copied, 0.728318 s, 14.4 GB/s Over 5x faster. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/lib/Makefile') diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 7735a2c2e6d..f049e339e45 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o \ checksum_wrappers_64.o hweight_64.o \ - copyuser_power7.o + copyuser_power7.o string_64.o obj-$(CONFIG_XMON) += sstep.o ldstfp.o obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o -- cgit v1.2.3-70-g09d2 From 6f7839e542ee18770288be75114bd2e6771e1421 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 29 May 2012 19:31:24 +0000 Subject: powerpc: Rename copyuser_power7_vmx.c to vmx-helper.c Subsequent patches will add more VMX library functions and it makes sense to keep all the c-code helper functions in the one file. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/copyuser_power7.S | 8 +++--- arch/powerpc/lib/copyuser_power7_vmx.c | 51 ---------------------------------- arch/powerpc/lib/vmx-helper.c | 51 ++++++++++++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 56 deletions(-) delete mode 100644 arch/powerpc/lib/copyuser_power7_vmx.c create mode 100644 arch/powerpc/lib/vmx-helper.c (limited to 'arch/powerpc/lib/Makefile') diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index f049e339e45..1eb94c7af6c 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o ifeq ($(CONFIG_PPC64),y) obj-$(CONFIG_SMP) += locks.o -obj-$(CONFIG_ALTIVEC) += copyuser_power7_vmx.o +obj-$(CONFIG_ALTIVEC) += vmx-helper.o endif obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 9c982cdec3c..f560f83a3ab 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -61,7 +61,7 @@ ld r15,STK_REG(r15)(r1) ld r14,STK_REG(r14)(r1) .Ldo_err3: - bl .exit_vmx_copy + bl .exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit @@ -290,7 +290,7 @@ err1; stb r0,0(r3) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl .enter_vmx_usercopy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) @@ -507,7 +507,7 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_copy /* tail call optimise */ + b .exit_vmx_usercopy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -710,5 +710,5 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_copy /* tail call optimise */ + b .exit_vmx_usercopy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/copyuser_power7_vmx.c deleted file mode 100644 index bf2654f2b68..00000000000 --- a/arch/powerpc/lib/copyuser_power7_vmx.c +++ /dev/null @@ -1,51 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2011 - * - * Authors: Sukadev Bhattiprolu - * Anton Blanchard - */ -#include -#include -#include - -int enter_vmx_copy(void) -{ - if (in_interrupt()) - return 0; - - /* This acts as preempt_disable() as well and will make - * enable_kernel_altivec(). We need to disable page faults - * as they can call schedule and thus make us lose the VMX - * context. So on page faults, we just fail which will cause - * a fallback to the normal non-vmx copy. - */ - pagefault_disable(); - - enable_kernel_altivec(); - - return 1; -} - -/* - * This function must return 0 because we tail call optimise when calling - * from __copy_tofrom_user_power7 which returns 0 on success. - */ -int exit_vmx_copy(void) -{ - pagefault_enable(); - return 0; -} diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c new file mode 100644 index 00000000000..753a839f4a1 --- /dev/null +++ b/arch/powerpc/lib/vmx-helper.c @@ -0,0 +1,51 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2011 + * + * Authors: Sukadev Bhattiprolu + * Anton Blanchard + */ +#include +#include +#include + +int enter_vmx_usercopy(void) +{ + if (in_interrupt()) + return 0; + + /* This acts as preempt_disable() as well and will make + * enable_kernel_altivec(). We need to disable page faults + * as they can call schedule and thus make us lose the VMX + * context. So on page faults, we just fail which will cause + * a fallback to the normal non-vmx copy. + */ + pagefault_disable(); + + enable_kernel_altivec(); + + return 1; +} + +/* + * This function must return 0 because we tail call optimise when calling + * from __copy_tofrom_user_power7 which returns 0 on success. + */ +int exit_vmx_usercopy(void) +{ + pagefault_enable(); + return 0; +} -- cgit v1.2.3-70-g09d2 From fde69282b7ba2701560764b81ebb756deb98cf2b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 29 May 2012 19:33:12 +0000 Subject: powerpc: POWER7 optimised copy_page using VMX and enhanced prefetch Implement a POWER7 optimised copy_page using VMX and enhanced prefetch instructions. We use enhanced prefetch hints to prefetch both the load and store side. We copy a cacheline at a time and fall back to regular loads and stores if we are unable to use VMX (eg we are in an interrupt). The following microbenchmark was used to assess the impact of the patch: http://ozlabs.org/~anton/junkcode/page_fault_file.c We test MAP_PRIVATE page faults across a 1GB file, 100 times: # time ./page_fault_file -p -l 1G -i 100 Before: 22.25s After: 18.89s 17% faster Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/copypage_64.S | 4 + arch/powerpc/lib/copypage_power7.S | 168 +++++++++++++++++++++++++++++++++++++ arch/powerpc/lib/vmx-helper.c | 23 +++++ 4 files changed, 196 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/lib/copypage_power7.S (limited to 'arch/powerpc/lib/Makefile') diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 1eb94c7af6c..873805ec3ed 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o \ checksum_wrappers_64.o hweight_64.o \ - copyuser_power7.o string_64.o + copyuser_power7.o string_64.o copypage_power7.o obj-$(CONFIG_XMON) += sstep.o ldstfp.o obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 53dcb6b1b70..9f9434a8526 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -17,7 +17,11 @@ PPC64_CACHES: .section ".text" _GLOBAL(copy_page) +BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h +FTR_SECTION_ELSE + b .copypage_power7 +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION ld r10,PPC64_CACHES@toc(r2) diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S new file mode 100644 index 00000000000..01e2b5db325 --- /dev/null +++ b/arch/powerpc/lib/copypage_power7.S @@ -0,0 +1,168 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard + */ +#include +#include + +#define STACKFRAMESIZE 256 +#define STK_REG(i) (112 + ((i)-14)*8) + +_GLOBAL(copypage_power7) + /* + * We prefetch both the source and destination using enhanced touch + * instructions. We use a stream ID of 0 for the load side and + * 1 for the store side. Since source and destination are page + * aligned we don't need to clear the bottom 7 bits of either + * address. + */ + ori r9,r3,1 /* stream=1 */ + +#ifdef CONFIG_PPC_64K_PAGES + lis r7,0x0E01 /* depth=7, units=512 */ +#else + lis r7,0x0E00 /* depth=7 */ + ori r7,r7,0x1000 /* units=32 */ +#endif + ori r10,r7,1 /* stream=1 */ + + lis r8,0x8000 /* GO=1 */ + clrldi r8,r8,32 + +.machine push +.machine "power4" + dcbt r0,r4,0b01000 + dcbt r0,r7,0b01010 + dcbtst r0,r9,0b01000 + dcbtst r0,r10,0b01010 + eieio + dcbt r0,r8,0b01010 /* GO */ +.machine pop + +#ifdef CONFIG_ALTIVEC + mflr r0 + std r3,48(r1) + std r4,56(r1) + std r0,16(r1) + stdu r1,-STACKFRAMESIZE(r1) + bl .enter_vmx_copy + cmpwi r3,0 + ld r0,STACKFRAMESIZE+16(r1) + ld r3,STACKFRAMESIZE+48(r1) + ld r4,STACKFRAMESIZE+56(r1) + mtlr r0 + + li r0,(PAGE_SIZE/128) + mtctr r0 + + beq .Lnonvmx_copy + + addi r1,r1,STACKFRAMESIZE + + li r6,16 + li r7,32 + li r8,48 + li r9,64 + li r10,80 + li r11,96 + li r12,112 + + .align 5 +1: lvx vr7,r0,r4 + lvx vr6,r4,r6 + lvx vr5,r4,r7 + lvx vr4,r4,r8 + lvx vr3,r4,r9 + lvx vr2,r4,r10 + lvx vr1,r4,r11 + lvx vr0,r4,r12 + addi r4,r4,128 + stvx vr7,r0,r3 + stvx vr6,r3,r6 + stvx vr5,r3,r7 + stvx vr4,r3,r8 + stvx vr3,r3,r9 + stvx vr2,r3,r10 + stvx vr1,r3,r11 + stvx vr0,r3,r12 + addi r3,r3,128 + bdnz 1b + + b .exit_vmx_copy /* tail call optimise */ + +#else + li r0,(PAGE_SIZE/128) + mtctr r0 + + stdu r1,-STACKFRAMESIZE(r1) +#endif + +.Lnonvmx_copy: + std r14,STK_REG(r14)(r1) + std r15,STK_REG(r15)(r1) + std r16,STK_REG(r16)(r1) + std r17,STK_REG(r17)(r1) + std r18,STK_REG(r18)(r1) + std r19,STK_REG(r19)(r1) + std r20,STK_REG(r20)(r1) + +1: ld r0,0(r4) + ld r5,8(r4) + ld r6,16(r4) + ld r7,24(r4) + ld r8,32(r4) + ld r9,40(r4) + ld r10,48(r4) + ld r11,56(r4) + ld r12,64(r4) + ld r14,72(r4) + ld r15,80(r4) + ld r16,88(r4) + ld r17,96(r4) + ld r18,104(r4) + ld r19,112(r4) + ld r20,120(r4) + addi r4,r4,128 + std r0,0(r3) + std r5,8(r3) + std r6,16(r3) + std r7,24(r3) + std r8,32(r3) + std r9,40(r3) + std r10,48(r3) + std r11,56(r3) + std r12,64(r3) + std r14,72(r3) + std r15,80(r3) + std r16,88(r3) + std r17,96(r3) + std r18,104(r3) + std r19,112(r3) + std r20,120(r3) + addi r3,r3,128 + bdnz 1b + + ld r14,STK_REG(r14)(r1) + ld r15,STK_REG(r15)(r1) + ld r16,STK_REG(r16)(r1) + ld r17,STK_REG(r17)(r1) + ld r18,STK_REG(r18)(r1) + ld r19,STK_REG(r19)(r1) + ld r20,STK_REG(r20)(r1) + addi r1,r1,STACKFRAMESIZE + blr diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index 753a839f4a1..3cf529ceec5 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -49,3 +49,26 @@ int exit_vmx_usercopy(void) pagefault_enable(); return 0; } + +int enter_vmx_copy(void) +{ + if (in_interrupt()) + return 0; + + preempt_disable(); + + enable_kernel_altivec(); + + return 1; +} + +/* + * All calls to this function will be optimised into tail calls. We are + * passed a pointer to the destination which we return as required by a + * memcpy implementation. + */ +void *exit_vmx_copy(void *dest) +{ + preempt_enable(); + return dest; +} -- cgit v1.2.3-70-g09d2 From b3f271e86e5a440713716bb222e1aa1227994c50 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 30 May 2012 20:22:09 +0000 Subject: powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch Implement a POWER7 optimised memcpy using VMX and enhanced prefetch instructions. This is a copy of the POWER7 optimised copy_to_user/copy_from_user loop. Detailed implementation and performance details can be found in commit a66086b8197d (powerpc: POWER7 optimised copy_to_user/copy_from_user using VMX). I noticed memcpy issues when profiling a RAID6 workload: .memcpy .async_memcpy .async_copy_data .__raid_run_ops .handle_stripe .raid5d .md_thread I created a simplified testcase by building a RAID6 array with 4 1GB ramdisks (booting with brd.rd_size=1048576): # mdadm -CR -e 1.2 /dev/md0 --level=6 -n4 /dev/ram[0-3] I then timed how long it took to write to the entire array: # dd if=/dev/zero of=/dev/md0 bs=1M Before: 892 MB/s After: 999 MB/s A 12% improvement. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/Makefile | 3 +- arch/powerpc/lib/memcpy_64.S | 4 + arch/powerpc/lib/memcpy_power7.S | 650 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 656 insertions(+), 1 deletion(-) create mode 100644 arch/powerpc/lib/memcpy_power7.S (limited to 'arch/powerpc/lib/Makefile') diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 873805ec3ed..746e0c895cd 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -17,7 +17,8 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o \ checksum_wrappers_64.o hweight_64.o \ - copyuser_power7.o string_64.o copypage_power7.o + copyuser_power7.o string_64.o copypage_power7.o \ + memcpy_power7.o obj-$(CONFIG_XMON) += sstep.o ldstfp.o obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index 82fea3963e1..d2bbbc8d7dc 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -11,7 +11,11 @@ .align 7 _GLOBAL(memcpy) +BEGIN_FTR_SECTION std r3,48(r1) /* save destination pointer for return value */ +FTR_SECTION_ELSE + b memcpy_power7 +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) PPC_MTOCRF(0x01,r5) cmpldi cr1,r5,16 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S new file mode 100644 index 00000000000..84674d89793 --- /dev/null +++ b/arch/powerpc/lib/memcpy_power7.S @@ -0,0 +1,650 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard + */ +#include + +#define STACKFRAMESIZE 256 +#define STK_REG(i) (112 + ((i)-14)*8) + +_GLOBAL(memcpy_power7) +#ifdef CONFIG_ALTIVEC + cmpldi r5,16 + cmpldi cr1,r5,4096 + + std r3,48(r1) + + blt .Lshort_copy + bgt cr1,.Lvmx_copy +#else + cmpldi r5,16 + + std r3,48(r1) + + blt .Lshort_copy +#endif + +.Lnonvmx_copy: + /* Get the source 8B aligned */ + neg r6,r4 + mtocrf 0x01,r6 + clrldi r6,r6,(64-3) + + bf cr7*4+3,1f + lbz r0,0(r4) + addi r4,r4,1 + stb r0,0(r3) + addi r3,r3,1 + +1: bf cr7*4+2,2f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +2: bf cr7*4+1,3f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +3: sub r5,r5,r6 + cmpldi r5,128 + blt 5f + + mflr r0 + stdu r1,-STACKFRAMESIZE(r1) + std r14,STK_REG(r14)(r1) + std r15,STK_REG(r15)(r1) + std r16,STK_REG(r16)(r1) + std r17,STK_REG(r17)(r1) + std r18,STK_REG(r18)(r1) + std r19,STK_REG(r19)(r1) + std r20,STK_REG(r20)(r1) + std r21,STK_REG(r21)(r1) + std r22,STK_REG(r22)(r1) + std r0,STACKFRAMESIZE+16(r1) + + srdi r6,r5,7 + mtctr r6 + + /* Now do cacheline (128B) sized loads and stores. */ + .align 5 +4: + ld r0,0(r4) + ld r6,8(r4) + ld r7,16(r4) + ld r8,24(r4) + ld r9,32(r4) + ld r10,40(r4) + ld r11,48(r4) + ld r12,56(r4) + ld r14,64(r4) + ld r15,72(r4) + ld r16,80(r4) + ld r17,88(r4) + ld r18,96(r4) + ld r19,104(r4) + ld r20,112(r4) + ld r21,120(r4) + addi r4,r4,128 + std r0,0(r3) + std r6,8(r3) + std r7,16(r3) + std r8,24(r3) + std r9,32(r3) + std r10,40(r3) + std r11,48(r3) + std r12,56(r3) + std r14,64(r3) + std r15,72(r3) + std r16,80(r3) + std r17,88(r3) + std r18,96(r3) + std r19,104(r3) + std r20,112(r3) + std r21,120(r3) + addi r3,r3,128 + bdnz 4b + + clrldi r5,r5,(64-7) + + ld r14,STK_REG(r14)(r1) + ld r15,STK_REG(r15)(r1) + ld r16,STK_REG(r16)(r1) + ld r17,STK_REG(r17)(r1) + ld r18,STK_REG(r18)(r1) + ld r19,STK_REG(r19)(r1) + ld r20,STK_REG(r20)(r1) + ld r21,STK_REG(r21)(r1) + ld r22,STK_REG(r22)(r1) + addi r1,r1,STACKFRAMESIZE + + /* Up to 127B to go */ +5: srdi r6,r5,4 + mtocrf 0x01,r6 + +6: bf cr7*4+1,7f + ld r0,0(r4) + ld r6,8(r4) + ld r7,16(r4) + ld r8,24(r4) + ld r9,32(r4) + ld r10,40(r4) + ld r11,48(r4) + ld r12,56(r4) + addi r4,r4,64 + std r0,0(r3) + std r6,8(r3) + std r7,16(r3) + std r8,24(r3) + std r9,32(r3) + std r10,40(r3) + std r11,48(r3) + std r12,56(r3) + addi r3,r3,64 + + /* Up to 63B to go */ +7: bf cr7*4+2,8f + ld r0,0(r4) + ld r6,8(r4) + ld r7,16(r4) + ld r8,24(r4) + addi r4,r4,32 + std r0,0(r3) + std r6,8(r3) + std r7,16(r3) + std r8,24(r3) + addi r3,r3,32 + + /* Up to 31B to go */ +8: bf cr7*4+3,9f + ld r0,0(r4) + ld r6,8(r4) + addi r4,r4,16 + std r0,0(r3) + std r6,8(r3) + addi r3,r3,16 + +9: clrldi r5,r5,(64-4) + + /* Up to 15B to go */ +.Lshort_copy: + mtocrf 0x01,r5 + bf cr7*4+0,12f + lwz r0,0(r4) /* Less chance of a reject with word ops */ + lwz r6,4(r4) + addi r4,r4,8 + stw r0,0(r3) + stw r6,4(r3) + addi r3,r3,8 + +12: bf cr7*4+1,13f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +13: bf cr7*4+2,14f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +14: bf cr7*4+3,15f + lbz r0,0(r4) + stb r0,0(r3) + +15: ld r3,48(r1) + blr + +.Lunwind_stack_nonvmx_copy: + addi r1,r1,STACKFRAMESIZE + b .Lnonvmx_copy + +#ifdef CONFIG_ALTIVEC +.Lvmx_copy: + mflr r0 + std r4,56(r1) + std r5,64(r1) + std r0,16(r1) + stdu r1,-STACKFRAMESIZE(r1) + bl .enter_vmx_copy + cmpwi r3,0 + ld r0,STACKFRAMESIZE+16(r1) + ld r3,STACKFRAMESIZE+48(r1) + ld r4,STACKFRAMESIZE+56(r1) + ld r5,STACKFRAMESIZE+64(r1) + mtlr r0 + + /* + * We prefetch both the source and destination using enhanced touch + * instructions. We use a stream ID of 0 for the load side and + * 1 for the store side. + */ + clrrdi r6,r4,7 + clrrdi r9,r3,7 + ori r9,r9,1 /* stream=1 */ + + srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ + cmpldi cr1,r7,0x3FF + ble cr1,1f + li r7,0x3FF +1: lis r0,0x0E00 /* depth=7 */ + sldi r7,r7,7 + or r7,r7,r0 + ori r10,r7,1 /* stream=1 */ + + lis r8,0x8000 /* GO=1 */ + clrldi r8,r8,32 + +.machine push +.machine "power4" + dcbt r0,r6,0b01000 + dcbt r0,r7,0b01010 + dcbtst r0,r9,0b01000 + dcbtst r0,r10,0b01010 + eieio + dcbt r0,r8,0b01010 /* GO */ +.machine pop + + beq .Lunwind_stack_nonvmx_copy + + /* + * If source and destination are not relatively aligned we use a + * slower permute loop. + */ + xor r6,r4,r3 + rldicl. r6,r6,0,(64-4) + bne .Lvmx_unaligned_copy + + /* Get the destination 16B aligned */ + neg r6,r3 + mtocrf 0x01,r6 + clrldi r6,r6,(64-4) + + bf cr7*4+3,1f + lbz r0,0(r4) + addi r4,r4,1 + stb r0,0(r3) + addi r3,r3,1 + +1: bf cr7*4+2,2f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +2: bf cr7*4+1,3f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +3: bf cr7*4+0,4f + ld r0,0(r4) + addi r4,r4,8 + std r0,0(r3) + addi r3,r3,8 + +4: sub r5,r5,r6 + + /* Get the desination 128B aligned */ + neg r6,r3 + srdi r7,r6,4 + mtocrf 0x01,r7 + clrldi r6,r6,(64-7) + + li r9,16 + li r10,32 + li r11,48 + + bf cr7*4+3,5f + lvx vr1,r0,r4 + addi r4,r4,16 + stvx vr1,r0,r3 + addi r3,r3,16 + +5: bf cr7*4+2,6f + lvx vr1,r0,r4 + lvx vr0,r4,r9 + addi r4,r4,32 + stvx vr1,r0,r3 + stvx vr0,r3,r9 + addi r3,r3,32 + +6: bf cr7*4+1,7f + lvx vr3,r0,r4 + lvx vr2,r4,r9 + lvx vr1,r4,r10 + lvx vr0,r4,r11 + addi r4,r4,64 + stvx vr3,r0,r3 + stvx vr2,r3,r9 + stvx vr1,r3,r10 + stvx vr0,r3,r11 + addi r3,r3,64 + +7: sub r5,r5,r6 + srdi r6,r5,7 + + std r14,STK_REG(r14)(r1) + std r15,STK_REG(r15)(r1) + std r16,STK_REG(r16)(r1) + + li r12,64 + li r14,80 + li r15,96 + li r16,112 + + mtctr r6 + + /* + * Now do cacheline sized loads and stores. By this stage the + * cacheline stores are also cacheline aligned. + */ + .align 5 +8: + lvx vr7,r0,r4 + lvx vr6,r4,r9 + lvx vr5,r4,r10 + lvx vr4,r4,r11 + lvx vr3,r4,r12 + lvx vr2,r4,r14 + lvx vr1,r4,r15 + lvx vr0,r4,r16 + addi r4,r4,128 + stvx vr7,r0,r3 + stvx vr6,r3,r9 + stvx vr5,r3,r10 + stvx vr4,r3,r11 + stvx vr3,r3,r12 + stvx vr2,r3,r14 + stvx vr1,r3,r15 + stvx vr0,r3,r16 + addi r3,r3,128 + bdnz 8b + + ld r14,STK_REG(r14)(r1) + ld r15,STK_REG(r15)(r1) + ld r16,STK_REG(r16)(r1) + + /* Up to 127B to go */ + clrldi r5,r5,(64-7) + srdi r6,r5,4 + mtocrf 0x01,r6 + + bf cr7*4+1,9f + lvx vr3,r0,r4 + lvx vr2,r4,r9 + lvx vr1,r4,r10 + lvx vr0,r4,r11 + addi r4,r4,64 + stvx vr3,r0,r3 + stvx vr2,r3,r9 + stvx vr1,r3,r10 + stvx vr0,r3,r11 + addi r3,r3,64 + +9: bf cr7*4+2,10f + lvx vr1,r0,r4 + lvx vr0,r4,r9 + addi r4,r4,32 + stvx vr1,r0,r3 + stvx vr0,r3,r9 + addi r3,r3,32 + +10: bf cr7*4+3,11f + lvx vr1,r0,r4 + addi r4,r4,16 + stvx vr1,r0,r3 + addi r3,r3,16 + + /* Up to 15B to go */ +11: clrldi r5,r5,(64-4) + mtocrf 0x01,r5 + bf cr7*4+0,12f + ld r0,0(r4) + addi r4,r4,8 + std r0,0(r3) + addi r3,r3,8 + +12: bf cr7*4+1,13f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +13: bf cr7*4+2,14f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +14: bf cr7*4+3,15f + lbz r0,0(r4) + stb r0,0(r3) + +15: addi r1,r1,STACKFRAMESIZE + ld r3,48(r1) + b .exit_vmx_copy /* tail call optimise */ + +.Lvmx_unaligned_copy: + /* Get the destination 16B aligned */ + neg r6,r3 + mtocrf 0x01,r6 + clrldi r6,r6,(64-4) + + bf cr7*4+3,1f + lbz r0,0(r4) + addi r4,r4,1 + stb r0,0(r3) + addi r3,r3,1 + +1: bf cr7*4+2,2f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +2: bf cr7*4+1,3f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +3: bf cr7*4+0,4f + lwz r0,0(r4) /* Less chance of a reject with word ops */ + lwz r7,4(r4) + addi r4,r4,8 + stw r0,0(r3) + stw r7,4(r3) + addi r3,r3,8 + +4: sub r5,r5,r6 + + /* Get the desination 128B aligned */ + neg r6,r3 + srdi r7,r6,4 + mtocrf 0x01,r7 + clrldi r6,r6,(64-7) + + li r9,16 + li r10,32 + li r11,48 + + lvsl vr16,0,r4 /* Setup permute control vector */ + lvx vr0,0,r4 + addi r4,r4,16 + + bf cr7*4+3,5f + lvx vr1,r0,r4 + vperm vr8,vr0,vr1,vr16 + addi r4,r4,16 + stvx vr8,r0,r3 + addi r3,r3,16 + vor vr0,vr1,vr1 + +5: bf cr7*4+2,6f + lvx vr1,r0,r4 + vperm vr8,vr0,vr1,vr16 + lvx vr0,r4,r9 + vperm vr9,vr1,vr0,vr16 + addi r4,r4,32 + stvx vr8,r0,r3 + stvx vr9,r3,r9 + addi r3,r3,32 + +6: bf cr7*4+1,7f + lvx vr3,r0,r4 + vperm vr8,vr0,vr3,vr16 + lvx vr2,r4,r9 + vperm vr9,vr3,vr2,vr16 + lvx vr1,r4,r10 + vperm vr10,vr2,vr1,vr16 + lvx vr0,r4,r11 + vperm vr11,vr1,vr0,vr16 + addi r4,r4,64 + stvx vr8,r0,r3 + stvx vr9,r3,r9 + stvx vr10,r3,r10 + stvx vr11,r3,r11 + addi r3,r3,64 + +7: sub r5,r5,r6 + srdi r6,r5,7 + + std r14,STK_REG(r14)(r1) + std r15,STK_REG(r15)(r1) + std r16,STK_REG(r16)(r1) + + li r12,64 + li r14,80 + li r15,96 + li r16,112 + + mtctr r6 + + /* + * Now do cacheline sized loads and stores. By this stage the + * cacheline stores are also cacheline aligned. + */ + .align 5 +8: + lvx vr7,r0,r4 + vperm vr8,vr0,vr7,vr16 + lvx vr6,r4,r9 + vperm vr9,vr7,vr6,vr16 + lvx vr5,r4,r10 + vperm vr10,vr6,vr5,vr16 + lvx vr4,r4,r11 + vperm vr11,vr5,vr4,vr16 + lvx vr3,r4,r12 + vperm vr12,vr4,vr3,vr16 + lvx vr2,r4,r14 + vperm vr13,vr3,vr2,vr16 + lvx vr1,r4,r15 + vperm vr14,vr2,vr1,vr16 + lvx vr0,r4,r16 + vperm vr15,vr1,vr0,vr16 + addi r4,r4,128 + stvx vr8,r0,r3 + stvx vr9,r3,r9 + stvx vr10,r3,r10 + stvx vr11,r3,r11 + stvx vr12,r3,r12 + stvx vr13,r3,r14 + stvx vr14,r3,r15 + stvx vr15,r3,r16 + addi r3,r3,128 + bdnz 8b + + ld r14,STK_REG(r14)(r1) + ld r15,STK_REG(r15)(r1) + ld r16,STK_REG(r16)(r1) + + /* Up to 127B to go */ + clrldi r5,r5,(64-7) + srdi r6,r5,4 + mtocrf 0x01,r6 + + bf cr7*4+1,9f + lvx vr3,r0,r4 + vperm vr8,vr0,vr3,vr16 + lvx vr2,r4,r9 + vperm vr9,vr3,vr2,vr16 + lvx vr1,r4,r10 + vperm vr10,vr2,vr1,vr16 + lvx vr0,r4,r11 + vperm vr11,vr1,vr0,vr16 + addi r4,r4,64 + stvx vr8,r0,r3 + stvx vr9,r3,r9 + stvx vr10,r3,r10 + stvx vr11,r3,r11 + addi r3,r3,64 + +9: bf cr7*4+2,10f + lvx vr1,r0,r4 + vperm vr8,vr0,vr1,vr16 + lvx vr0,r4,r9 + vperm vr9,vr1,vr0,vr16 + addi r4,r4,32 + stvx vr8,r0,r3 + stvx vr9,r3,r9 + addi r3,r3,32 + +10: bf cr7*4+3,11f + lvx vr1,r0,r4 + vperm vr8,vr0,vr1,vr16 + addi r4,r4,16 + stvx vr8,r0,r3 + addi r3,r3,16 + + /* Up to 15B to go */ +11: clrldi r5,r5,(64-4) + addi r4,r4,-16 /* Unwind the +16 load offset */ + mtocrf 0x01,r5 + bf cr7*4+0,12f + lwz r0,0(r4) /* Less chance of a reject with word ops */ + lwz r6,4(r4) + addi r4,r4,8 + stw r0,0(r3) + stw r6,4(r3) + addi r3,r3,8 + +12: bf cr7*4+1,13f + lwz r0,0(r4) + addi r4,r4,4 + stw r0,0(r3) + addi r3,r3,4 + +13: bf cr7*4+2,14f + lhz r0,0(r4) + addi r4,r4,2 + sth r0,0(r3) + addi r3,r3,2 + +14: bf cr7*4+3,15f + lbz r0,0(r4) + stb r0,0(r3) + +15: addi r1,r1,STACKFRAMESIZE + ld r3,48(r1) + b .exit_vmx_copy /* tail call optimise */ +#endif /* CONFiG_ALTIVEC */ -- cgit v1.2.3-70-g09d2