diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-05-05 20:33:10 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2014-05-05 20:57:12 +1000 |
commit | f6869e7fe657bd977e72954cd78c5871a6a4f71d (patch) | |
tree | 1e34a4bb36cde4b541d7344599e65b541bb4bee7 /arch/powerpc/lib | |
parent | 5a4e58bc693f04aa650219784e5e339e0db6b902 (diff) | |
parent | cec4b7eaf09d330e94e8e94133d360e6f1855974 (diff) |
Merge remote-tracking branch 'anton/abiv2' into next
This series adds support for building the powerpc 64-bit
LE kernel using the new ABI v2. We already supported
running ABI v2 userspace programs but this adds support
for building the kernel itself using the new ABI.
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/copypage_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/lib/copypage_power7.S | 12 | ||||
-rw-r--r-- | arch/powerpc/lib/copyuser_64.S | 2 | ||||
-rw-r--r-- | arch/powerpc/lib/copyuser_power7.S | 32 | ||||
-rw-r--r-- | arch/powerpc/lib/hweight_64.S | 8 | ||||
-rw-r--r-- | arch/powerpc/lib/mem_64.S | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/memcpy_64.S | 10 | ||||
-rw-r--r-- | arch/powerpc/lib/memcpy_power7.S | 26 |
8 files changed, 48 insertions, 48 deletions
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 9f9434a8526..e59c9c2ebe9 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -20,7 +20,7 @@ _GLOBAL(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h FTR_SECTION_ELSE - b .copypage_power7 + b copypage_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 395c594722a..d7dafb3777a 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -56,15 +56,15 @@ _GLOBAL(copypage_power7) #ifdef CONFIG_ALTIVEC mflr r0 - std r3,48(r1) - std r4,56(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) mtlr r0 li r0,(PAGE_SIZE/128) @@ -103,7 +103,7 @@ _GLOBAL(copypage_power7) addi r3,r3,128 bdnz 1b - b .exit_vmx_copy /* tail call optimise */ + b exit_vmx_copy /* tail call optimise */ #else li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 596a285c075..0860ee46013 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -18,7 +18,7 @@ #endif .align 7 -_GLOBAL(__copy_tofrom_user) +_GLOBAL_TOC(__copy_tofrom_user) BEGIN_FTR_SECTION nop FTR_SECTION_ELSE diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index e8e9c36dc78..c46c876ac96 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -66,7 +66,7 @@ ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Ldo_err3: - bl .exit_vmx_usercopy + bl exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit @@ -85,9 +85,9 @@ .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: - ld r3,48(r1) - ld r4,56(r1) - ld r5,64(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1) b __copy_tofrom_user_base @@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy #endif @@ -295,12 +295,12 @@ err1; stb r0,0(r3) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_usercopy + bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -514,7 +514,7 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -717,5 +717,5 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 9b96ff2ecd4..19e66001a4f 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -24,7 +24,7 @@ _GLOBAL(__arch_hweight8) BEGIN_FTR_SECTION - b .__sw_hweight8 + b __sw_hweight8 nop nop FTR_SECTION_ELSE @@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION - b .__sw_hweight16 + b __sw_hweight16 nop nop nop @@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION - b .__sw_hweight32 + b __sw_hweight32 nop nop nop @@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION - b .__sw_hweight64 + b __sw_hweight64 nop nop nop diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc656..0738f96befb 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -79,8 +79,8 @@ _GLOBAL(memset) _GLOBAL(memmove) cmplw 0,r3,r4 - bgt .backwards_memcpy - b .memcpy + bgt backwards_memcpy + b memcpy _GLOBAL(backwards_memcpy) rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index dc4ba7953b9..32a06ec395d 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -10,12 +10,12 @@ #include <asm/ppc_asm.h> .align 7 -_GLOBAL(memcpy) +_GLOBAL_TOC(memcpy) BEGIN_FTR_SECTION #ifdef __LITTLE_ENDIAN__ cmpdi cr7,r5,0 #else - std r3,48(r1) /* save destination pointer for return value */ + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ #endif FTR_SECTION_ELSE #ifndef SELFTEST @@ -88,7 +88,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f lbz r9,8(r4) stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Lsrc_unaligned: @@ -171,7 +171,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f rotldi r9,r9,8 stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Ldst_unaligned: @@ -216,6 +216,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: bf cr7*4+3,4f lbz r0,0(r4) stb r0,0(r3) -4: ld r3,48(r1) /* return dest pointer */ +4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr #endif diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index e4177dbea6b..2ff5c142f87 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #endif @@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,48(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,56(r1) - std r5,64(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -447,8 +447,8 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ |