diff options
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r-- | arch/sparc64/lib/GENbzero.S | 160 | ||||
-rw-r--r-- | arch/sparc64/lib/GENcopy_from_user.S | 34 | ||||
-rw-r--r-- | arch/sparc64/lib/GENcopy_to_user.S | 38 | ||||
-rw-r--r-- | arch/sparc64/lib/GENmemcpy.S | 121 | ||||
-rw-r--r-- | arch/sparc64/lib/GENpage.S | 77 | ||||
-rw-r--r-- | arch/sparc64/lib/GENpatch.S | 33 | ||||
-rw-r--r-- | arch/sparc64/lib/Makefile | 6 | ||||
-rw-r--r-- | arch/sparc64/lib/NG2copy_from_user.S | 40 | ||||
-rw-r--r-- | arch/sparc64/lib/NG2copy_to_user.S | 49 | ||||
-rw-r--r-- | arch/sparc64/lib/NG2memcpy.S | 520 | ||||
-rw-r--r-- | arch/sparc64/lib/NG2page.S | 61 | ||||
-rw-r--r-- | arch/sparc64/lib/NG2patch.S | 33 | ||||
-rw-r--r-- | arch/sparc64/lib/NGcopy_from_user.S | 8 | ||||
-rw-r--r-- | arch/sparc64/lib/NGcopy_to_user.S | 8 | ||||
-rw-r--r-- | arch/sparc64/lib/NGmemcpy.S | 371 | ||||
-rw-r--r-- | arch/sparc64/lib/NGpage.S | 1 |
16 files changed, 1393 insertions, 167 deletions
diff --git a/arch/sparc64/lib/GENbzero.S b/arch/sparc64/lib/GENbzero.S new file mode 100644 index 00000000000..f9c71d64eba --- /dev/null +++ b/arch/sparc64/lib/GENbzero.S @@ -0,0 +1,160 @@ +/* GENbzero.S: Generic sparc64 memset/clear_user. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ +#include <asm/asi.h> + +#define EX_ST(x,y) \ +98: x,y; \ + .section .fixup; \ + .align 4; \ +99: retl; \ + mov %o1, %o0; \ + .section __ex_table; \ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + + .align 32 + .text + + .globl GENmemset + .type GENmemset, #function +GENmemset: /* %o0=buf, %o1=pat, %o2=len */ + and %o1, 0xff, %o3 + mov %o2, %o1 + sllx %o3, 8, %g1 + or %g1, %o3, %o2 + sllx %o2, 16, %g1 + or %g1, %o2, %o2 + sllx %o2, 32, %g1 + ba,pt %xcc, 1f + or %g1, %o2, %o2 + + .globl GENbzero + .type GENbzero, #function +GENbzero: + clr %o2 +1: brz,pn %o1, GENbzero_return + mov %o0, %o3 + + /* %o5: saved %asi, restored at GENbzero_done + * %o4: store %asi to use + */ + rd %asi, %o5 + mov ASI_P, %o4 + wr %o4, 0x0, %asi + +GENbzero_from_clear_user: + cmp %o1, 15 + bl,pn %icc, GENbzero_tiny + andcc %o0, 0x7, %g1 + be,pt %xcc, 2f + mov 8, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %g1, 1, %g1 + bne,pt %xcc, 1b + add %o0, 1, %o0 +2: cmp %o1, 128 + bl,pn %icc, GENbzero_medium + andcc %o0, (64 - 1), %g1 + be,pt %xcc, GENbzero_pre_loop + mov 64, %g2 + sub %g2, %g1, %g1 + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 + +GENbzero_pre_loop: + andn %o1, (64 - 1), %g1 + sub %o1, %g1, %o1 +GENbzero_loop: + EX_ST(stxa %o2, [%o0 + 0x00] %asi) + EX_ST(stxa %o2, [%o0 + 0x08] %asi) + EX_ST(stxa %o2, [%o0 + 0x10] %asi) + EX_ST(stxa %o2, [%o0 + 0x18] %asi) + EX_ST(stxa %o2, [%o0 + 0x20] %asi) + EX_ST(stxa %o2, [%o0 + 0x28] %asi) + EX_ST(stxa %o2, [%o0 + 0x30] %asi) + EX_ST(stxa %o2, [%o0 + 0x38] %asi) + subcc %g1, 64, %g1 + bne,pt %xcc, GENbzero_loop + add %o0, 64, %o0 + + membar #Sync + wr %o4, 0x0, %asi + brz,pn %o1, GENbzero_done +GENbzero_medium: + andncc %o1, 0x7, %g1 + be,pn %xcc, 2f + sub %o1, %g1, %o1 +1: EX_ST(stxa %o2, [%o0 + 0x00] %asi) + subcc %g1, 8, %g1 + bne,pt %xcc, 1b + add %o0, 8, %o0 +2: brz,pt %o1, GENbzero_done + nop + +GENbzero_tiny: +1: EX_ST(stba %o2, [%o0 + 0x00] %asi) + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 1, %o0 + + /* fallthrough */ + +GENbzero_done: + wr %o5, 0x0, %asi + +GENbzero_return: + retl + mov %o3, %o0 + .size GENbzero, .-GENbzero + .size GENmemset, .-GENmemset + + .globl GENclear_user + .type GENclear_user, #function +GENclear_user: /* %o0=buf, %o1=len */ + rd %asi, %o5 + brz,pn %o1, GENbzero_done + clr %o3 + cmp %o5, ASI_AIUS + bne,pn %icc, GENbzero + clr %o2 + ba,pt %xcc, GENbzero_from_clear_user + mov ASI_AIUS, %o4 + .size GENclear_user, .-GENclear_user + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_bzero + .type generic_patch_bzero,#function +generic_patch_bzero: + GEN_DO_PATCH(memset, GENmemset) + GEN_DO_PATCH(__bzero, GENbzero) + GEN_DO_PATCH(__clear_user, GENclear_user) + retl + nop + .size generic_patch_bzero,.-generic_patch_bzero diff --git a/arch/sparc64/lib/GENcopy_from_user.S b/arch/sparc64/lib/GENcopy_from_user.S new file mode 100644 index 00000000000..2b9df99e87f --- /dev/null +++ b/arch/sparc64/lib/GENcopy_from_user.S @@ -0,0 +1,34 @@ +/* GENcopy_from_user.S: Generic sparc64 copy from userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME GENcopy_from_user +#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "GENmemcpy.S" diff --git a/arch/sparc64/lib/GENcopy_to_user.S b/arch/sparc64/lib/GENcopy_to_user.S new file mode 100644 index 00000000000..bb3f7084daf --- /dev/null +++ b/arch/sparc64/lib/GENcopy_to_user.S @@ -0,0 +1,38 @@ +/* GENcopy_to_user.S: Generic sparc64 copy to userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME GENcopy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "GENmemcpy.S" diff --git a/arch/sparc64/lib/GENmemcpy.S b/arch/sparc64/lib/GENmemcpy.S new file mode 100644 index 00000000000..89358ee9485 --- /dev/null +++ b/arch/sparc64/lib/GENmemcpy.S @@ -0,0 +1,121 @@ +/* GENmemcpy.S: Generic sparc64 memcpy. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#define GLOBAL_SPARE %g7 +#else +#define GLOBAL_SPARE %g5 +#endif + +#ifndef EX_LD +#define EX_LD(x) x +#endif + +#ifndef EX_ST +#define EX_ST(x) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME GENmemcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + + .register %g2,#scratch + .register %g3,#scratch + + .text + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %XCC, 5 + PREAMBLE + mov %o0, GLOBAL_SPARE + + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + xor %o0, %o1, %o4 + andcc %o4, 0x7, %g0 + bne,a,pn %XCC, 90f + sub %o0, %o1, %o3 + + and %o0, 0x7, %o4 + sub %o4, 0x8, %o4 + sub %g0, %o4, %o4 + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o0)) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + + andn %o2, 0x7, %g1 + sub %o2, %g1, %o2 +1: subcc %g1, 0x8, %g1 + EX_LD(LOAD(ldx, %o1, %g2)) + EX_ST(STORE(stx, %g2, %o0)) + add %o1, 0x8, %o1 + bne,pt %XCC, 1b + add %o0, 0x8, %o0 + + brz,pt %o2, 85f + sub %o0, %o1, %o3 + ba,a,pt %XCC, 90f + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1)) + EX_ST(STORE(stw, %g1, %o1 + %o3)) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o1 + %o3)) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc64/lib/GENpage.S b/arch/sparc64/lib/GENpage.S new file mode 100644 index 00000000000..2ef9d05f21b --- /dev/null +++ b/arch/sparc64/lib/GENpage.S @@ -0,0 +1,77 @@ +/* GENpage.S: Generic clear and copy page. + * + * Copyright (C) 2007 (davem@davemloft.net) + */ +#include <asm/page.h> + + .text + .align 32 + +GENcopy_user_page: + set PAGE_SIZE, %g7 +1: ldx [%o1 + 0x00], %o2 + ldx [%o1 + 0x08], %o3 + ldx [%o1 + 0x10], %o4 + ldx [%o1 + 0x18], %o5 + stx %o2, [%o0 + 0x00] + stx %o3, [%o0 + 0x08] + stx %o4, [%o0 + 0x10] + stx %o5, [%o0 + 0x18] + ldx [%o1 + 0x20], %o2 + ldx [%o1 + 0x28], %o3 + ldx [%o1 + 0x30], %o4 + ldx [%o1 + 0x38], %o5 + stx %o2, [%o0 + 0x20] + stx %o3, [%o0 + 0x28] + stx %o4, [%o0 + 0x30] + stx %o5, [%o0 + 0x38] + subcc %g7, 64, %g7 + add %o1, 64, %o1 + bne,pt %xcc, 1b + add %o0, 64, %o0 + retl + nop + +GENclear_page: +GENclear_user_page: + set PAGE_SIZE, %g7 +1: stx %g0, [%o0 + 0x00] + stx %g0, [%o0 + 0x08] + stx %g0, [%o0 + 0x10] + stx %g0, [%o0 + 0x18] + stx %g0, [%o0 + 0x20] + stx %g0, [%o0 + 0x28] + stx %g0, [%o0 + 0x30] + stx %g0, [%o0 + 0x38] + subcc %g7, 64, %g7 + bne,pt %xcc, 1b + add %o0, 64, %o0 + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_pageops + .type generic_patch_pageops,#function +generic_patch_pageops: + GEN_DO_PATCH(copy_user_page, GENcopy_user_page) + GEN_DO_PATCH(_clear_page, GENclear_page) + GEN_DO_PATCH(clear_user_page, GENclear_user_page) + retl + nop + .size generic_patch_pageops,.-generic_patch_pageops diff --git a/arch/sparc64/lib/GENpatch.S b/arch/sparc64/lib/GENpatch.S new file mode 100644 index 00000000000..fab9e89f16b --- /dev/null +++ b/arch/sparc64/lib/GENpatch.S @@ -0,0 +1,33 @@ +/* GENpatch.S: Patch Ultra-I routines with generic variant. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define GEN_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl generic_patch_copyops + .type generic_patch_copyops,#function +generic_patch_copyops: + GEN_DO_PATCH(memcpy, GENmemcpy) + GEN_DO_PATCH(___copy_from_user, GENcopy_from_user) + GEN_DO_PATCH(___copy_to_user, GENcopy_to_user) + retl + nop + .size generic_patch_copyops,.-generic_patch_copyops diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index c4a6d6e7d03..f095e13910b 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $ +# # Makefile for Sparc64 library files.. # @@ -13,6 +13,10 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ NGpage.o NGbzero.o \ + NG2memcpy.o NG2copy_from_user.o NG2copy_to_user.o NG2patch.o \ + NG2page.o \ + GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o GENpatch.o \ + GENpage.o GENbzero.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o diff --git a/arch/sparc64/lib/NG2copy_from_user.S b/arch/sparc64/lib/NG2copy_from_user.S new file mode 100644 index 00000000000..c77ef5f2210 --- /dev/null +++ b/arch/sparc64/lib/NG2copy_from_user.S @@ -0,0 +1,40 @@ +/* NG2copy_from_user.S: Niagara-2 optimized copy from userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_LD(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: wr %g0, ASI_AIUS, %asi;\ + retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_AIUS_4V +#define ASI_BLK_AIUS_4V 0x17 +#endif + +#define FUNC_NAME NG2copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS_4V, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "NG2memcpy.S" diff --git a/arch/sparc64/lib/NG2copy_to_user.S b/arch/sparc64/lib/NG2copy_to_user.S new file mode 100644 index 00000000000..4bd4093acbb --- /dev/null +++ b/arch/sparc64/lib/NG2copy_to_user.S @@ -0,0 +1,49 @@ +/* NG2copy_to_user.S: Niagara-2 optimized copy to userspace. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#define EX_ST(x) \ +98: x; \ + .section .fixup; \ + .align 4; \ +99: wr %g0, ASI_AIUS, %asi;\ + retl; \ + mov 1, %o0; \ + .section __ex_table,"a";\ + .align 4; \ + .word 98b, 99b; \ + .text; \ + .align 4; + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_AIUS_4V +#define ASI_BLK_AIUS_4V 0x17 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME NG2copy_to_user +#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS_4V +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, memcpy_user_stub; \ + nop +#endif + +#include "NG2memcpy.S" diff --git a/arch/sparc64/lib/NG2memcpy.S b/arch/sparc64/lib/NG2memcpy.S new file mode 100644 index 00000000000..0aed75653b5 --- /dev/null +++ b/arch/sparc64/lib/NG2memcpy.S @@ -0,0 +1,520 @@ +/* NG2memcpy.S: Niagara-2 optimized memcpy. + * + * Copyright (C) 2007 David S. Miller (davem@davemloft.net) + */ + +#ifdef __KERNEL__ +#include <asm/visasm.h> +#include <asm/asi.h> +#define GLOBAL_SPARE %g7 +#else +#define ASI_PNF 0x82 +#define ASI_BLK_P 0xf0 +#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 +#define FPRS_FEF 0x04 +#ifdef MEMCPY_DEBUG +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \ + clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0; +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#else +#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs +#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs +#endif +#define GLOBAL_SPARE %g5 +#endif + +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef EX_LD +#define EX_LD(x) x +#endif + +#ifndef EX_ST +#define EX_ST(x) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef LOAD_BLK +#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest +#endif + +#ifndef STORE +#ifndef MEMCPY_DEBUG +#define STORE(type,src,addr) type src, [addr] +#else +#define STORE(type,src,addr) type##a src, [addr] 0x80 +#endif +#endif + +#ifndef STORE_BLK +#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME NG2memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#ifndef XCC +#define XCC xcc +#endif + +#define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ + faligndata %x0, %x1, %f0; \ + faligndata %x1, %x2, %f2; \ + faligndata %x2, %x3, %f4; \ + faligndata %x3, %x4, %f6; \ + faligndata %x4, %x5, %f8; \ + faligndata %x5, %x6, %f10; \ + faligndata %x6, %x7, %f12; \ + faligndata %x7, %x8, %f14; + +#define FREG_MOVE_1(x0) \ + fmovd %x0, %f0; +#define FREG_MOVE_2(x0, x1) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; +#define FREG_MOVE_3(x0, x1, x2) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; +#define FREG_MOVE_4(x0, x1, x2, x3) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; \ + fmovd %x3, %f6; +#define FREG_MOVE_5(x0, x1, x2, x3, x4) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; \ + fmovd %x3, %f6; \ + fmovd %x4, %f8; +#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; \ + fmovd %x3, %f6; \ + fmovd %x4, %f8; \ + fmovd %x5, %f10; +#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; \ + fmovd %x3, %f6; \ + fmovd %x4, %f8; \ + fmovd %x5, %f10; \ + fmovd %x6, %f12; +#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ + fmovd %x0, %f0; \ + fmovd %x1, %f2; \ + fmovd %x2, %f4; \ + fmovd %x3, %f6; \ + fmovd %x4, %f8; \ + fmovd %x5, %f10; \ + fmovd %x6, %f12; \ + fmovd %x7, %f14; +#define FREG_LOAD_1(base, x0) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)) +#define FREG_LOAD_2(base, x0, x1) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); +#define FREG_LOAD_3(base, x0, x1, x2) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD(LOAD(ldd, base + 0x10, %x2)); +#define FREG_LOAD_4(base, x0, x1, x2, x3) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD(LOAD(ldd, base + 0x18, %x3)); +#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD(LOAD(ldd, base + 0x20, %x4)); +#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD(LOAD(ldd, base + 0x20, %x4)); \ + EX_LD(LOAD(ldd, base + 0x28, %x5)); +#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ + EX_LD(LOAD(ldd, base + 0x00, %x0)); \ + EX_LD(LOAD(ldd, base + 0x08, %x1)); \ + EX_LD(LOAD(ldd, base + 0x10, %x2)); \ + EX_LD(LOAD(ldd, base + 0x18, %x3)); \ + EX_LD(LOAD(ldd, base + 0x20, %x4)); \ + EX_LD(LOAD(ldd, base + 0x28, %x5)); \ + EX_LD(LOAD(ldd, base + 0x30, %x6)); + + .register %g2,#scratch + .register %g3,#scratch + + .text + .align 64 + + .globl FUNC_NAME + .type FUNC_NAME,#function +FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, GLOBAL_SPARE + cmp %o2, 0 + be,pn %XCC, 85f + or %o0, %o1, %o3 + cmp %o2, 16 + blu,a,pn %XCC, 80f + or %o3, %o2, %o3 + + /* 2 blocks (128 bytes) is the minimum we can do the block + * copy with. We need to ensure that we'll iterate at least + * once in the block copy loop. At worst we'll need to align + * the destination to a 64-byte boundary which can chew up + * to (64 - 1) bytes from the length before we perform the + * block copy loop. + * + * However, the cut-off point, performance wise, is around + * 4 64-byte blocks. + */ + cmp %o2, (4 * 64) + blu,pt %XCC, 75f + andcc %o3, 0x7, %g0 + + /* %o0: dst + * %o1: src + * %o2: len (known to be >= 128) + * + * The block copy loops can use %o4, %g2, %g3 as + * temporaries while copying the data. %o5 must + * be preserved between VISEntryHalf and VISExitHalf + */ + + LOAD(prefetch, %o1 + 0x000, #one_read) + LOAD(prefetch, %o1 + 0x040, #one_read) + LOAD(prefetch, %o1 + 0x080, #one_read) + + /* Align destination on 64-byte boundary. */ + andcc %o0, (64 - 1), %o4 + be,pt %XCC, 2f + sub %o4, 64, %o4 + sub %g0, %o4, %o4 ! bytes to align dst + sub %o2, %o4, %o2 +1: subcc %o4, 1, %o4 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o0)) + add %o1, 1, %o1 + bne,pt %XCC, 1b + add %o0, 1, %o0 + +2: + /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve + * o5 from here until we hit VISExitHalf. + */ + VISEntryHalf + + alignaddr %o1, %g0, %g0 + + add %o1, (64 - 1), %o4 + andn %o4, (64 - 1), %o4 + andn %o2, (64 - 1), %g1 + sub %o2, %g1, %o2 + + and %o1, (64 - 1), %g2 + add %o1, %g1, %o1 + sub %o0, %o4, %g3 + brz,pt %g2, 190f + cmp %g2, 32 + blu,a 5f + cmp %g2, 16 + cmp %g2, 48 + blu,a 4f + cmp %g2, 40 + cmp %g2, 56 + blu 170f + nop + ba,a,pt %xcc, 180f + +4: /* 32 <= low bits < 48 */ + blu 150f + nop + ba,a,pt %xcc, 160f +5: /* 0 < low bits < 32 */ + blu,a 6f + cmp %g2, 8 + cmp %g2, 24 + blu 130f + nop + ba,a,pt %xcc, 140f +6: /* 0 < low bits < 16 */ + bgeu 120f + nop + /* fall through for 0 < low bits < 8 */ +110: sub %o4, 64, %g2 + EX_LD(LOAD_BLK(%g2, %f0)) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +120: sub %o4, 56, %g2 + FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +130: sub %o4, 48, %g2 + FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_6(f20, f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +140: sub %o4, 40, %g2 + FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_5(f22, f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +150: sub %o4, 32, %g2 + FREG_LOAD_4(%g2, f0, f2, f4, f6) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_4(f24, f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +160: sub %o4, 24, %g2 + FREG_LOAD_3(%g2, f0, f2, f4) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_3(f26, f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +170: sub %o4, 16, %g2 + FREG_LOAD_2(%g2, f0, f2) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_2(f28, f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +180: sub %o4, 8, %g2 + FREG_LOAD_1(%g2, f0) +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + EX_LD(LOAD_BLK(%o4, %f16)) + FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + FREG_MOVE_1(f30) + subcc %g1, 64, %g1 + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + ba,pt %xcc, 195f + nop + +190: +1: EX_ST(STORE_INIT(%g0, %o4 + %g3)) + subcc %g1, 64, %g1 + EX_LD(LOAD_BLK(%o4, %f0)) + EX_ST(STORE_BLK(%f0, %o4 + %g3)) + add %o4, 64, %o4 + bne,pt %xcc, 1b + LOAD(prefetch, %o4 + 64, #one_read) + +195: + add %o4, %g3, %o0 + membar #Sync + + VISExitHalf + + /* %o2 contains any final bytes still needed to be copied + * over. If anything is left, we copy it one byte at a time. + */ + brz,pt %o2, 85f + sub %o0, %o1, %o3 + ba,a,pt %XCC, 90f + + .align 64 +75: /* 16 < len <= 64 */ + bne,pn %XCC, 75f + sub %o0, %o1, %o3 + +72: + andn %o2, 0xf, %o4 + and %o2, 0xf, %o2 +1: subcc %o4, 0x10, %o4 + EX_LD(LOAD(ldx, %o1, %o5)) + add %o1, 0x08, %o1 + EX_LD(LOAD(ldx, %o1, %g1)) + sub %o1, 0x08, %o1 + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 + EX_ST(STORE(stx, %g1, %o1 + %o3)) + bgu,pt %XCC, 1b + add %o1, 0x8, %o1 +73: andcc %o2, 0x8, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x8, %o2 + EX_LD(LOAD(ldx, %o1, %o5)) + EX_ST(STORE(stx, %o5, %o1 + %o3)) + add %o1, 0x8, %o1 +1: andcc %o2, 0x4, %g0 + be,pt %XCC, 1f + nop + sub %o2, 0x4, %o2 + EX_LD(LOAD(lduw, %o1, %o5)) + EX_ST(STORE(stw, %o5, %o1 + %o3)) + add %o1, 0x4, %o1 +1: cmp %o2, 0 + be,pt %XCC, 85f + nop + ba,pt %xcc, 90f + nop + +75: + andcc %o0, 0x7, %g1 + sub %g1, 0x8, %g1 + be,pn %icc, 2f + sub %g0, %g1, %g1 + sub %o2, %g1, %o2 + +1: subcc %g1, 1, %g1 + EX_LD(LOAD(ldub, %o1, %o5)) + EX_ST(STORE(stb, %o5, %o1 + %o3)) + bgu,pt %icc, 1b + add %o1, 1, %o1 + +2: add %o1, %o3, %o0 + andcc %o1, 0x7, %g1 + bne,pt %icc, 8f + sll %g1, 3, %g1 + + cmp %o2, 16 + bgeu,pt %icc, 72b + nop + ba,a,pt %xcc, 73b + +8: mov 64, %o3 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1, %g2)) + sub %o3, %g1, %o3 + andn %o2, 0x7, %o4 + sllx %g2, %g1, %g2 +1: add %o1, 0x8, %o1 + EX_LD(LOAD(ldx, %o1, %g3)) + subcc %o4, 0x8, %o4 + srlx %g3, %o3, %o5 + or %o5, %g2, %o5 + EX_ST(STORE(stx, %o5, %o0)) + add %o0, 0x8, %o0 + bgu,pt %icc, 1b + sllx %g3, %g1, %g2 + + srl %g1, 3, %g1 + andcc %o2, 0x7, %o2 + be,pn %icc, 85f + add %o1, %g1, %o1 + ba,pt %xcc, 90f + sub %o0, %o1, %o3 + + .align 64 +80: /* 0 < len <= 16 */ + andcc %o3, 0x3, %g0 + bne,pn %XCC, 90f + sub %o0, %o1, %o3 + +1: + subcc %o2, 4, %o2 + EX_LD(LOAD(lduw, %o1, %g1)) + EX_ST(STORE(stw, %g1, %o1 + %o3)) + bgu,pt %XCC, 1b + add %o1, 4, %o1 + +85: retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .align 32 +90: + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %g1)) + EX_ST(STORE(stb, %g1, %o1 + %o3)) + bgu,pt %XCC, 90b + add %o1, 1, %o1 + retl + mov EX_RETVAL(GLOBAL_SPARE), %o0 + + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc64/lib/NG2page.S b/arch/sparc64/lib/NG2page.S new file mode 100644 index 00000000000..73b6b7c72cb --- /dev/null +++ b/arch/sparc64/lib/NG2page.S @@ -0,0 +1,61 @@ +/* NG2page.S: Niagara-2 optimized clear and copy page. + * + * Copyright (C) 2007 (davem@davemloft.net) + */ + +#include <asm/asi.h> +#include <asm/page.h> +#include <asm/visasm.h> + + .text + .align 32 + + /* This is heavily simplified from the sun4u variants + * because Niagara-2 does not have any D-cache aliasing issues. + */ +NG2copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + prefetch [%o1 + 0x00], #one_read + prefetch [%o1 + 0x40], #one_read + VISEntryHalf + set PAGE_SIZE, %g7 + sub %o0, %o1, %g3 +1: stxa %g0, [%o1 + %g3] ASI_BLK_INIT_QUAD_LDD_P + subcc %g7, 64, %g7 + ldda [%o1] ASI_BLK_P, %f0 + stda %f0, [%o1 + %g3] ASI_BLK_P + add %o1, 64, %o1 + bne,pt %xcc, 1b + prefetch [%o1 + 0x40], #one_read + membar #Sync + VISExitHalf + retl + nop + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara2_patch_pageops + .type niagara2_patch_pageops,#function +niagara2_patch_pageops: + NG_DO_PATCH(copy_user_page, NG2copy_user_page) + NG_DO_PATCH(_clear_page, NGclear_page) + NG_DO_PATCH(clear_user_page, NGclear_user_page) + retl + nop + .size niagara2_patch_pageops,.-niagara2_patch_pageops diff --git a/arch/sparc64/lib/NG2patch.S b/arch/sparc64/lib/NG2patch.S new file mode 100644 index 00000000000..28c36f06a6d --- /dev/null +++ b/arch/sparc64/lib/NG2patch.S @@ -0,0 +1,33 @@ +/* NG2patch.S: Patch Ultra-I routines with Niagara-2 variant. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + + .globl niagara2_patch_copyops + .type niagara2_patch_copyops,#function +niagara2_patch_copyops: + NG_DO_PATCH(memcpy, NG2memcpy) + NG_DO_PATCH(___copy_from_user, NG2copy_from_user) + NG_DO_PATCH(___copy_to_user, NG2copy_to_user) + retl + nop + .size niagara2_patch_copyops,.-niagara2_patch_copyops diff --git a/arch/sparc64/lib/NGcopy_from_user.S b/arch/sparc64/lib/NGcopy_from_user.S index 2d93456f76d..e7f433f71b4 100644 --- a/arch/sparc64/lib/NGcopy_from_user.S +++ b/arch/sparc64/lib/NGcopy_from_user.S @@ -1,6 +1,6 @@ /* NGcopy_from_user.S: Niagara optimized copy from userspace. * - * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ #define EX_LD(x) \ @@ -8,8 +8,8 @@ .section .fixup; \ .align 4; \ 99: wr %g0, ASI_AIUS, %asi;\ - retl; \ - mov 1, %o0; \ + ret; \ + restore %g0, 1, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ @@ -24,7 +24,7 @@ #define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest #define LOAD_TWIN(addr_reg,dest0,dest1) \ ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0 -#define EX_RETVAL(x) 0 +#define EX_RETVAL(x) %g0 #ifdef __KERNEL__ #define PREAMBLE \ diff --git a/arch/sparc64/lib/NGcopy_to_user.S b/arch/sparc64/lib/NGcopy_to_user.S index 34112d5054e..6ea01c5532a 100644 --- a/arch/sparc64/lib/NGcopy_to_user.S +++ b/arch/sparc64/lib/NGcopy_to_user.S @@ -1,6 +1,6 @@ /* NGcopy_to_user.S: Niagara optimized copy to userspace. * - * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ #define EX_ST(x) \ @@ -8,8 +8,8 @@ .section .fixup; \ .align 4; \ 99: wr %g0, ASI_AIUS, %asi;\ - retl; \ - mov 1, %o0; \ + ret; \ + restore %g0, 1, %o0; \ .section __ex_table,"a";\ .align 4; \ .word 98b, 99b; \ @@ -23,7 +23,7 @@ #define FUNC_NAME NGcopy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS -#define EX_RETVAL(x) 0 +#define EX_RETVAL(x) %g0 #ifdef __KERNEL__ /* Writing to %asi is _expensive_ so we hardcode it. diff --git a/arch/sparc64/lib/NGmemcpy.S b/arch/sparc64/lib/NGmemcpy.S index 66063a9a66b..96a14caf696 100644 --- a/arch/sparc64/lib/NGmemcpy.S +++ b/arch/sparc64/lib/NGmemcpy.S @@ -1,6 +1,6 @@ /* NGmemcpy.S: Niagara optimized memcpy. * - * Copyright (C) 2006 David S. Miller (davem@davemloft.net) + * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net) */ #ifdef __KERNEL__ @@ -16,6 +16,12 @@ wr %g0, ASI_PNF, %asi #endif +#ifdef __sparc_v9__ +#define SAVE_AMOUNT 128 +#else +#define SAVE_AMOUNT 64 +#endif + #ifndef STORE_ASI #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P #endif @@ -50,7 +56,11 @@ #endif #ifndef STORE_INIT +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA #define STORE_INIT(src,addr) stxa src, [addr] %asi +#else +#define STORE_INIT(src,addr) stx src, [addr + 0x00] +#endif #endif #ifndef FUNC_NAME @@ -73,18 +83,19 @@ .globl FUNC_NAME .type FUNC_NAME,#function -FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ - srlx %o2, 31, %g2 +FUNC_NAME: /* %i0=dst, %i1=src, %i2=len */ + PREAMBLE + save %sp, -SAVE_AMOUNT, %sp + srlx %i2, 31, %g2 cmp %g2, 0 tne %xcc, 5 - PREAMBLE - mov %o0, GLOBAL_SPARE - cmp %o2, 0 + mov %i0, %o0 + cmp %i2, 0 be,pn %XCC, 85f - or %o0, %o1, %o3 - cmp %o2, 16 + or %o0, %i1, %i3 + cmp %i2, 16 blu,a,pn %XCC, 80f - or %o3, %o2, %o3 + or %i3, %i2, %i3 /* 2 blocks (128 bytes) is the minimum we can do the block * copy with. We need to ensure that we'll iterate at least @@ -93,31 +104,31 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ * to (64 - 1) bytes from the length before we perform the * block copy loop. */ - cmp %o2, (2 * 64) + cmp %i2, (2 * 64) blu,pt %XCC, 70f - andcc %o3, 0x7, %g0 + andcc %i3, 0x7, %g0 /* %o0: dst - * %o1: src - * %o2: len (known to be >= 128) + * %i1: src + * %i2: len (known to be >= 128) * - * The block copy loops will use %o4/%o5,%g2/%g3 as + * The block copy loops will use %i4/%i5,%g2/%g3 as * temporaries while copying the data. */ - LOAD(prefetch, %o1, #one_read) + LOAD(prefetch, %i1, #one_read) wr %g0, STORE_ASI, %asi /* Align destination on 64-byte boundary. */ - andcc %o0, (64 - 1), %o4 + andcc %o0, (64 - 1), %i4 be,pt %XCC, 2f - sub %o4, 64, %o4 - sub %g0, %o4, %o4 ! bytes to align dst - sub %o2, %o4, %o2 -1: subcc %o4, 1, %o4 - EX_LD(LOAD(ldub, %o1, %g1)) + sub %i4, 64, %i4 + sub %g0, %i4, %i4 ! bytes to align dst + sub %i2, %i4, %i2 +1: subcc %i4, 1, %i4 + EX_LD(LOAD(ldub, %i1, %g1)) EX_ST(STORE(stb, %g1, %o0)) - add %o1, 1, %o1 + add %i1, 1, %i1 bne,pt %XCC, 1b add %o0, 1, %o0 @@ -136,111 +147,155 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ * aligned store data at a time, this is easy to ensure. */ 2: - andcc %o1, (16 - 1), %o4 - andn %o2, (64 - 1), %g1 ! block copy loop iterator - sub %o2, %g1, %o2 ! final sub-block copy bytes + andcc %i1, (16 - 1), %i4 + andn %i2, (64 - 1), %g1 ! block copy loop iterator be,pt %XCC, 50f - cmp %o4, 8 - be,a,pt %XCC, 10f - sub %o1, 0x8, %o1 + sub %i2, %g1, %i2 ! final sub-block copy bytes + + cmp %i4, 8 + be,pt %XCC, 10f + sub %i1, %i4, %i1 /* Neither 8-byte nor 16-byte aligned, shift and mask. */ - mov %g1, %o4 - and %o1, 0x7, %g1 - sll %g1, 3, %g1 - mov 64, %o3 - andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) - sub %o3, %g1, %o3 - sllx %g2, %g1, %g2 + and %i4, 0x7, GLOBAL_SPARE + sll GLOBAL_SPARE, 3, GLOBAL_SPARE + mov 64, %i5 + EX_LD(LOAD_TWIN(%i1, %g2, %g3)) + sub %i5, GLOBAL_SPARE, %i5 + mov 16, %o4 + mov 32, %o5 + mov 48, %o7 + mov 64, %i3 + + bg,pn %XCC, 9f + nop -#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\ - EX_LD(LOAD(ldx, SRC, TMP1)); \ - srlx TMP1, PRE_SHIFT, TMP2; \ - or TMP2, PRE_VAL, TMP2; \ - EX_ST(STORE_INIT(TMP2, DST)); \ - sllx TMP1, POST_SHIFT, PRE_VAL; - -1: add %o1, 0x8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00) - add %o1, 0x8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08) - add %o1, 0x8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10) - add %o1, 0x8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18) - add %o1, 32, %o1 - LOAD(prefetch, %o1, #one_read) - sub %o1, 32 - 8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20) - add %o1, 8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28) - add %o1, 8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30) - add %o1, 8, %o1 - SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38) - subcc %o4, 64, %o4 - bne,pt %XCC, 1b +#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \ + sllx WORD1, POST_SHIFT, WORD1; \ + srlx WORD2, PRE_SHIFT, TMP; \ + sllx WORD2, POST_SHIFT, WORD2; \ + or WORD1, TMP, WORD1; \ + srlx WORD3, PRE_SHIFT, TMP; \ + or WORD2, TMP, WORD2; + +8: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) + MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) + LOAD(prefetch, %i1 + %i3, #one_read) + + EX_ST(STORE_INIT(%g2, %o0 + 0x00)) + EX_ST(STORE_INIT(%g3, %o0 + 0x08)) + + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o2, %o0 + 0x10)) + EX_ST(STORE_INIT(%o3, %o0 + 0x18)) + + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%g2, %o0 + 0x20)) + EX_ST(STORE_INIT(%g3, %o0 + 0x28)) + + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + add %i1, 64, %i1 + MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o2, %o0 + 0x30)) + EX_ST(STORE_INIT(%o3, %o0 + 0x38)) + + subcc %g1, 64, %g1 + bne,pt %XCC, 8b add %o0, 64, %o0 -#undef SWIVEL_ONE_DWORD + ba,pt %XCC, 60f + add %i1, %i4, %i1 + +9: EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3)) + MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) + LOAD(prefetch, %i1 + %i3, #one_read) + + EX_ST(STORE_INIT(%g3, %o0 + 0x00)) + EX_ST(STORE_INIT(%o2, %o0 + 0x08)) + + EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3)) + MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o3, %o0 + 0x10)) + EX_ST(STORE_INIT(%g2, %o0 + 0x18)) + + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%g3, %o0 + 0x20)) + EX_ST(STORE_INIT(%o2, %o0 + 0x28)) + + EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3)) + add %i1, 64, %i1 + MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1) + + EX_ST(STORE_INIT(%o3, %o0 + 0x30)) + EX_ST(STORE_INIT(%g2, %o0 + 0x38)) + + subcc %g1, 64, %g1 + bne,pt %XCC, 9b + add %o0, 64, %o0 - srl %g1, 3, %g1 ba,pt %XCC, 60f - add %o1, %g1, %o1 + add %i1, %i4, %i1 10: /* Destination is 64-byte aligned, source was only 8-byte * aligned but it has been subtracted by 8 and we perform * one twin load ahead, then add 8 back into source when * we finish the loop. */ - EX_LD(LOAD_TWIN(%o1, %o4, %o5)) -1: add %o1, 16, %o1 - EX_LD(LOAD_TWIN(%o1, %g2, %g3)) - add %o1, 16 + 32, %o1 - LOAD(prefetch, %o1, #one_read) - sub %o1, 32, %o1 + EX_LD(LOAD_TWIN(%i1, %o4, %o5)) + mov 16, %o7 + mov 32, %g2 + mov 48, %g3 + mov 64, %o1 +1: EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + LOAD(prefetch, %i1 + %o1, #one_read) EX_ST(STORE_INIT(%o5, %o0 + 0x00)) ! initializes cache line - EX_ST(STORE_INIT(%g2, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%o1, %o4, %o5)) - add %o1, 16, %o1 - EX_ST(STORE_INIT(%g3, %o0 + 0x10)) + EX_ST(STORE_INIT(%o2, %o0 + 0x08)) + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) + EX_ST(STORE_INIT(%o3, %o0 + 0x10)) EX_ST(STORE_INIT(%o4, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%o1, %g2, %g3)) - add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) EX_ST(STORE_INIT(%o5, %o0 + 0x20)) - EX_ST(STORE_INIT(%g2, %o0 + 0x28)) - EX_LD(LOAD_TWIN(%o1, %o4, %o5)) - EX_ST(STORE_INIT(%g3, %o0 + 0x30)) + EX_ST(STORE_INIT(%o2, %o0 + 0x28)) + EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5)) + add %i1, 64, %i1 + EX_ST(STORE_INIT(%o3, %o0 + 0x30)) EX_ST(STORE_INIT(%o4, %o0 + 0x38)) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 ba,pt %XCC, 60f - add %o1, 0x8, %o1 + add %i1, 0x8, %i1 50: /* Destination is 64-byte aligned, and source is 16-byte * aligned. */ -1: EX_LD(LOAD_TWIN(%o1, %o4, %o5)) - add %o1, 16, %o1 - EX_LD(LOAD_TWIN(%o1, %g2, %g3)) - add %o1, 16 + 32, %o1 - LOAD(prefetch, %o1, #one_read) - sub %o1, 32, %o1 + mov 16, %o7 + mov 32, %g2 + mov 48, %g3 + mov 64, %o1 +1: EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5)) + EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3)) + LOAD(prefetch, %i1 + %o1, #one_read) EX_ST(STORE_INIT(%o4, %o0 + 0x00)) ! initializes cache line EX_ST(STORE_INIT(%o5, %o0 + 0x08)) - EX_LD(LOAD_TWIN(%o1, %o4, %o5)) - add %o1, 16, %o1 - EX_ST(STORE_INIT(%g2, %o0 + 0x10)) - EX_ST(STORE_INIT(%g3, %o0 + 0x18)) - EX_LD(LOAD_TWIN(%o1, %g2, %g3)) - add %o1, 16, %o1 + EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5)) + EX_ST(STORE_INIT(%o2, %o0 + 0x10)) + EX_ST(STORE_INIT(%o3, %o0 + 0x18)) + EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3)) + add %i1, 64, %i1 EX_ST(STORE_INIT(%o4, %o0 + 0x20)) EX_ST(STORE_INIT(%o5, %o0 + 0x28)) - EX_ST(STORE_INIT(%g2, %o0 + 0x30)) - EX_ST(STORE_INIT(%g3, %o0 + 0x38)) + EX_ST(STORE_INIT(%o2, %o0 + 0x30)) + EX_ST(STORE_INIT(%o3, %o0 + 0x38)) subcc %g1, 64, %g1 bne,pt %XCC, 1b add %o0, 64, %o0 @@ -249,47 +304,47 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ 60: membar #Sync - /* %o2 contains any final bytes still needed to be copied + /* %i2 contains any final bytes still needed to be copied * over. If anything is left, we copy it one byte at a time. */ - RESTORE_ASI(%o3) - brz,pt %o2, 85f - sub %o0, %o1, %o3 + RESTORE_ASI(%i3) + brz,pt %i2, 85f + sub %o0, %i1, %i3 ba,a,pt %XCC, 90f .align 64 70: /* 16 < len <= 64 */ bne,pn %XCC, 75f - sub %o0, %o1, %o3 + sub %o0, %i1, %i3 72: - andn %o2, 0xf, %o4 - and %o2, 0xf, %o2 -1: subcc %o4, 0x10, %o4 - EX_LD(LOAD(ldx, %o1, %o5)) - add %o1, 0x08, %o1 - EX_LD(LOAD(ldx, %o1, %g1)) - sub %o1, 0x08, %o1 - EX_ST(STORE(stx, %o5, %o1 + %o3)) - add %o1, 0x8, %o1 - EX_ST(STORE(stx, %g1, %o1 + %o3)) + andn %i2, 0xf, %i4 + and %i2, 0xf, %i2 +1: subcc %i4, 0x10, %i4 + EX_LD(LOAD(ldx, %i1, %o4)) + add %i1, 0x08, %i1 + EX_LD(LOAD(ldx, %i1, %g1)) + sub %i1, 0x08, %i1 + EX_ST(STORE(stx, %o4, %i1 + %i3)) + add %i1, 0x8, %i1 + EX_ST(STORE(stx, %g1, %i1 + %i3)) bgu,pt %XCC, 1b - add %o1, 0x8, %o1 -73: andcc %o2, 0x8, %g0 + add %i1, 0x8, %i1 +73: andcc %i2, 0x8, %g0 be,pt %XCC, 1f nop - sub %o2, 0x8, %o2 - EX_LD(LOAD(ldx, %o1, %o5)) - EX_ST(STORE(stx, %o5, %o1 + %o3)) - add %o1, 0x8, %o1 -1: andcc %o2, 0x4, %g0 + sub %i2, 0x8, %i2 + EX_LD(LOAD(ldx, %i1, %o4)) + EX_ST(STORE(stx, %o4, %i1 + %i3)) + add %i1, 0x8, %i1 +1: andcc %i2, 0x4, %g0 be,pt %XCC, 1f nop - sub %o2, 0x4, %o2 - EX_LD(LOAD(lduw, %o1, %o5)) - EX_ST(STORE(stw, %o5, %o1 + %o3)) - add %o1, 0x4, %o1 -1: cmp %o2, 0 + sub %i2, 0x4, %i2 + EX_LD(LOAD(lduw, %i1, %i5)) + EX_ST(STORE(stw, %i5, %i1 + %i3)) + add %i1, 0x4, %i1 +1: cmp %i2, 0 be,pt %XCC, 85f nop ba,pt %xcc, 90f @@ -300,71 +355,71 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %g1, 0x8, %g1 be,pn %icc, 2f sub %g0, %g1, %g1 - sub %o2, %g1, %o2 + sub %i2, %g1, %i2 1: subcc %g1, 1, %g1 - EX_LD(LOAD(ldub, %o1, %o5)) - EX_ST(STORE(stb, %o5, %o1 + %o3)) + EX_LD(LOAD(ldub, %i1, %i5)) + EX_ST(STORE(stb, %i5, %i1 + %i3)) bgu,pt %icc, 1b - add %o1, 1, %o1 + add %i1, 1, %i1 -2: add %o1, %o3, %o0 - andcc %o1, 0x7, %g1 +2: add %i1, %i3, %o0 + andcc %i1, 0x7, %g1 bne,pt %icc, 8f sll %g1, 3, %g1 - cmp %o2, 16 + cmp %i2, 16 bgeu,pt %icc, 72b nop ba,a,pt %xcc, 73b -8: mov 64, %o3 - andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1, %g2)) - sub %o3, %g1, %o3 - andn %o2, 0x7, %o4 +8: mov 64, %i3 + andn %i1, 0x7, %i1 + EX_LD(LOAD(ldx, %i1, %g2)) + sub %i3, %g1, %i3 + andn %i2, 0x7, %i4 sllx %g2, %g1, %g2 -1: add %o1, 0x8, %o1 - EX_LD(LOAD(ldx, %o1, %g3)) - subcc %o4, 0x8, %o4 - srlx %g3, %o3, %o5 - or %o5, %g2, %o5 - EX_ST(STORE(stx, %o5, %o0)) +1: add %i1, 0x8, %i1 + EX_LD(LOAD(ldx, %i1, %g3)) + subcc %i4, 0x8, %i4 + srlx %g3, %i3, %i5 + or %i5, %g2, %i5 + EX_ST(STORE(stx, %i5, %o0)) add %o0, 0x8, %o0 bgu,pt %icc, 1b sllx %g3, %g1, %g2 srl %g1, 3, %g1 - andcc %o2, 0x7, %o2 + andcc %i2, 0x7, %i2 be,pn %icc, 85f - add %o1, %g1, %o1 + add %i1, %g1, %i1 ba,pt %xcc, 90f - sub %o0, %o1, %o3 + sub %o0, %i1, %i3 .align 64 80: /* 0 < len <= 16 */ - andcc %o3, 0x3, %g0 + andcc %i3, 0x3, %g0 bne,pn %XCC, 90f - sub %o0, %o1, %o3 + sub %o0, %i1, %i3 1: - subcc %o2, 4, %o2 - EX_LD(LOAD(lduw, %o1, %g1)) - EX_ST(STORE(stw, %g1, %o1 + %o3)) + subcc %i2, 4, %i2 + EX_LD(LOAD(lduw, %i1, %g1)) + EX_ST(STORE(stw, %g1, %i1 + %i3)) bgu,pt %XCC, 1b - add %o1, 4, %o1 + add %i1, 4, %i1 -85: retl - mov EX_RETVAL(GLOBAL_SPARE), %o0 +85: ret + restore EX_RETVAL(%i0), %g0, %o0 .align 32 90: - subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %g1)) - EX_ST(STORE(stb, %g1, %o1 + %o3)) + subcc %i2, 1, %i2 + EX_LD(LOAD(ldub, %i1, %g1)) + EX_ST(STORE(stb, %g1, %i1 + %i3)) bgu,pt %XCC, 90b - add %o1, 1, %o1 - retl - mov EX_RETVAL(GLOBAL_SPARE), %o0 + add %i1, 1, %i1 + ret + restore EX_RETVAL(%i0), %g0, %o0 .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc64/lib/NGpage.S b/arch/sparc64/lib/NGpage.S index 8ce3a0c9c53..428920de05b 100644 --- a/arch/sparc64/lib/NGpage.S +++ b/arch/sparc64/lib/NGpage.S @@ -45,6 +45,7 @@ NGcopy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ retl nop + .globl NGclear_page, NGclear_user_page NGclear_page: /* %o0=dest */ NGclear_user_page: /* %o0=dest, %o1=vaddr */ mov 8, %g1 |