diff options
55 files changed, 902 insertions, 680 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 31b96942c0a..68cde2f8a12 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -113,8 +113,13 @@ else endif endif -CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc -CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) +CFLAGS-$(CONFIG_PPC64) := -mtraceback=no +ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,-mcall-aixdesc) +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) +else +CFLAGS-$(CONFIG_PPC64) += -mcall-aixdesc +endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD) @@ -151,7 +156,7 @@ endif CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) KBUILD_CPPFLAGS += -Iarch/$(ARCH) -KBUILD_AFLAGS += -Iarch/$(ARCH) +KBUILD_AFLAGS += -Iarch/$(ARCH) $(AFLAGS-y) KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) CPP = $(CC) -E $(KBUILD_CFLAGS) diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S index 6636b1d7821..243b8497d58 100644 --- a/arch/powerpc/boot/util.S +++ b/arch/powerpc/boot/util.S @@ -45,7 +45,7 @@ udelay: mfspr r4,SPRN_PVR srwi r4,r4,16 cmpwi 0,r4,1 /* 601 ? */ - bne .udelay_not_601 + bne .Ludelay_not_601 00: li r0,86 /* Instructions / microsecond? */ mtctr r0 10: addi r0,r0,0 /* NOP */ @@ -54,7 +54,7 @@ udelay: bne 00b blr -.udelay_not_601: +.Ludelay_not_601: mulli r4,r3,1000 /* nanoseconds */ /* Change r4 to be the number of ticks using: * (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 97e02f985df..37991e154ef 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -42,15 +42,47 @@ void __patch_exception(int exc, unsigned long addr); } while (0) #endif +#define OP_RT_RA_MASK 0xffff0000UL +#define LIS_R2 0x3c020000UL +#define ADDIS_R2_R12 0x3c4c0000UL +#define ADDI_R2_R2 0x38420000UL + static inline unsigned long ppc_function_entry(void *func) { -#ifdef CONFIG_PPC64 +#if defined(CONFIG_PPC64) +#if defined(_CALL_ELF) && _CALL_ELF == 2 + u32 *insn = func; + + /* + * A PPC64 ABIv2 function may have a local and a global entry + * point. We need to use the local entry point when patching + * functions, so identify and step over the global entry point + * sequence. + * + * The global entry point sequence is always of the form: + * + * addis r2,r12,XXXX + * addi r2,r2,XXXX + * + * A linker optimisation may convert the addis to lis: + * + * lis r2,XXXX + * addi r2,r2,XXXX + */ + if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) || + ((*insn & OP_RT_RA_MASK) == LIS_R2)) && + ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2)) + return (unsigned long)(insn + 2); + else + return (unsigned long)func; +#else /* - * On PPC64 the function pointer actually points to the function's - * descriptor. The first entry in the descriptor is the address - * of the function text. + * On PPC64 ABIv1 the function pointer actually points to the + * function's descriptor. The first entry in the descriptor is the + * address of the function text. */ return ((func_descr_t *)func)->entry; +#endif #else return (unsigned long)func; #endif diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h index b6f5a33b8ee..40014921fff 100644 --- a/arch/powerpc/include/asm/context_tracking.h +++ b/arch/powerpc/include/asm/context_tracking.h @@ -2,9 +2,9 @@ #define _ASM_POWERPC_CONTEXT_TRACKING_H #ifdef CONFIG_CONTEXT_TRACKING -#define SCHEDULE_USER bl .schedule_user +#define SCHEDULE_USER bl schedule_user #else -#define SCHEDULE_USER bl .schedule +#define SCHEDULE_USER bl schedule #endif #endif diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a563d9afd17..a8b52b61043 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h @@ -174,10 +174,10 @@ exc_##label##_book3e: mtlr r16; #define TLB_MISS_STATS_D(name) \ addi r9,r13,MMSTAT_DSTATS+name; \ - bl .tlb_stat_inc; + bl tlb_stat_inc; #define TLB_MISS_STATS_I(name) \ addi r9,r13,MMSTAT_ISTATS+name; \ - bl .tlb_stat_inc; + bl tlb_stat_inc; #define TLB_MISS_STATS_X(name) \ ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \ cmpdi cr2,r8,-1; \ @@ -185,7 +185,7 @@ exc_##label##_book3e: addi r9,r13,MMSTAT_DSTATS+name; \ b 62f; \ 61: addi r9,r13,MMSTAT_ISTATS+name; \ -62: bl .tlb_stat_inc; +62: bl tlb_stat_inc; #define TLB_MISS_STATS_SAVE_INFO \ std r14,EX_TLB_ESR(r12); /* save ESR */ #define TLB_MISS_STATS_SAVE_INFO_BOLTED \ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index aeaa56cd9b5..8f35cd7d59c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -517,7 +517,7 @@ label##_relon_hv: \ #define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11) #define ADD_NVGPRS \ - bl .save_nvgprs + bl save_nvgprs #define RUNLATCH_ON \ BEGIN_FTR_SECTION \ diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index 169d039ed40..e3661872fbe 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h @@ -61,6 +61,7 @@ struct dyn_arch_ftrace { #endif #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) { @@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name */ return !strcmp(sym + 4, name + 3); } +#endif #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_FTRACE */ diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index f51a5580bfd..e20eb95429a 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h @@ -20,9 +20,9 @@ */ #define TRACE_WITH_FRAME_BUFFER(func) \ mflr r0; \ - stdu r1, -32(r1); \ + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ std r0, 16(r1); \ - stdu r1, -32(r1); \ + stdu r1, -STACK_FRAME_OVERHEAD(r1); \ bl func; \ ld r1, 0(r1); \ ld r1, 0(r1); @@ -36,8 +36,8 @@ * have to call a C function so call a wrapper that saves all the * C-clobbered registers. */ -#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) -#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) +#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on) +#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off) /* * This is used by assembly code to soft-disable interrupts first and diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index 7b6feab6fd2..af15d4d8d60 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -30,6 +30,7 @@ #include <linux/ptrace.h> #include <linux/percpu.h> #include <asm/probes.h> +#include <asm/code-patching.h> #define __ARCH_WANT_KPROBES_INSN_SLOT @@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t; if ((colon = strchr(name, ':')) != NULL) { \ colon++; \ if (*colon != '\0' && *colon != '.') \ - addr = *(kprobe_opcode_t **)addr; \ + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ } else if (name[0] != '.') \ - addr = *(kprobe_opcode_t **)addr; \ + addr = (kprobe_opcode_t *)ppc_function_entry(addr); \ } else { \ char dot_name[KSYM_NAME_LEN]; \ dot_name[0] = '.'; \ diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h index b36f650a13f..e3ad5c72724 100644 --- a/arch/powerpc/include/asm/linkage.h +++ b/arch/powerpc/include/asm/linkage.h @@ -2,6 +2,7 @@ #define _ASM_POWERPC_LINKAGE_H #ifdef CONFIG_PPC64 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #define cond_syscall(x) \ asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \ "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n") @@ -9,5 +10,6 @@ asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) #endif +#endif #endif /* _ASM_POWERPC_LINKAGE_H */ diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 49fa55bfbac..dcfcad139bc 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -35,6 +35,7 @@ struct mod_arch_specific { #ifdef __powerpc64__ unsigned int stubs_section; /* Index of stubs section in module */ unsigned int toc_section; /* What section is the TOC? */ + bool toc_fixed; /* Have we fixed up .TOC.? */ #ifdef CONFIG_DYNAMIC_FTRACE unsigned long toc; unsigned long tramp; @@ -77,6 +78,9 @@ struct mod_arch_specific { # endif /* MODULE */ #endif +bool is_module_trampoline(u32 *insns); +int module_trampoline_target(struct module *mod, u32 *trampoline, + unsigned long *target); struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6586a40a46c..6400f1814fe 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION; \ LDX_BE r10,0,r10; /* get log write index */ \ cmpd cr1,r11,r10; \ beq+ cr1,33f; \ - bl .accumulate_stolen_time; \ + bl accumulate_stolen_time; \ ld r12,_MSR(r1); \ andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \ 33: \ @@ -189,57 +189,53 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define __STK_REG(i) (112 + ((i)-14)*8) #define STK_REG(i) __STK_REG(__REG_##i) +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STK_GOT 24 +#define __STK_PARAM(i) (32 + ((i)-3)*8) +#else +#define STK_GOT 40 #define __STK_PARAM(i) (48 + ((i)-3)*8) +#endif #define STK_PARAM(i) __STK_PARAM(__REG_##i) -#define XGLUE(a,b) a##b -#define GLUE(a,b) XGLUE(a,b) +#if defined(_CALL_ELF) && _CALL_ELF == 2 #define _GLOBAL(name) \ .section ".text"; \ .align 2 ; \ + .type name,@function; \ .globl name; \ - .globl GLUE(.,name); \ - .section ".opd","aw"; \ -name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): +name: -#define _INIT_GLOBAL(name) \ - __REF; \ +#define _GLOBAL_TOC(name) \ + .section ".text"; \ .align 2 ; \ + .type name,@function; \ .globl name; \ - .globl GLUE(.,name); \ - .section ".opd","aw"; \ name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): +0: addis r2,r12,(.TOC.-0b)@ha; \ + addi r2,r2,(.TOC.-0b)@l; \ + .localentry name,.-name #define _KPROBE(name) \ .section ".kprobes.text","a"; \ .align 2 ; \ + .type name,@function; \ .globl name; \ - .globl GLUE(.,name); \ - .section ".opd","aw"; \ -name: \ - .quad GLUE(.,name); \ - .quad .TOC.@tocbase; \ - .quad 0; \ - .previous; \ - .type GLUE(.,name),@function; \ -GLUE(.,name): +name: + +#define DOTSYM(a) a + +#else + +#define XGLUE(a,b) a##b +#define GLUE(a,b) XGLUE(a,b) -#define _STATIC(name) \ +#define _GLOBAL(name) \ .section ".text"; \ .align 2 ; \ + .globl name; \ + .globl GLUE(.,name); \ .section ".opd","aw"; \ name: \ .quad GLUE(.,name); \ @@ -249,9 +245,13 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): -#define _INIT_STATIC(name) \ - __REF; \ +#define _GLOBAL_TOC(name) _GLOBAL(name) + +#define _KPROBE(name) \ + .section ".kprobes.text","a"; \ .align 2 ; \ + .globl name; \ + .globl GLUE(.,name); \ .section ".opd","aw"; \ name: \ .quad GLUE(.,name); \ @@ -261,6 +261,10 @@ name: \ .type GLUE(.,name),@function; \ GLUE(.,name): +#define DOTSYM(a) GLUE(.,a) + +#endif + #else /* 32-bit */ #define _ENTRY(n) \ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index d0e784e0ff4..d1bb96d5a29 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -39,6 +39,7 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } +#if !defined(_CALL_ELF) || _CALL_ELF != 2 #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { @@ -49,6 +50,7 @@ static inline void *dereference_function_descriptor(void *ptr) ptr = p; return ptr; } +#endif #endif diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3ddf7027670..ac062f50473 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl) SYSCALL(ni_syscall) SYSCALL_SPU(setpgid) SYSCALL(ni_syscall) -SYSX(sys_ni_syscall,sys_olduname, sys_olduname) +SYSX(sys_ni_syscall,sys_olduname,sys_olduname) SYSCALL_SPU(umask) SYSCALL_SPU(chroot) COMPAT_SYS(ustat) @@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill) COMPAT_SYS_SPU(utimes) COMPAT_SYS_SPU(statfs64) COMPAT_SYS_SPU(fstatfs64) -SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64) +SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64) PPC_SYS_SPU(rtas) OLDSYS(debug_setcontext) SYSCALL(ni_syscall) @@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat) SYSCALL_SPU(mknodat) SYSCALL_SPU(fchownat) COMPAT_SYS_SPU(futimesat) -SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64) +SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64) SYSCALL_SPU(unlinkat) SYSCALL_SPU(renameat) SYSCALL_SPU(linkat) diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h index 7e39c9146a7..59dad113897 100644 --- a/arch/powerpc/include/uapi/asm/elf.h +++ b/arch/powerpc/include/uapi/asm/elf.h @@ -291,9 +291,17 @@ do { \ #define R_PPC64_DTPREL16_HIGHERA 104 /* half16 (sym+add)@dtprel@highera */ #define R_PPC64_DTPREL16_HIGHEST 105 /* half16 (sym+add)@dtprel@highest */ #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16 (sym+add)@dtprel@highesta */ +#define R_PPC64_TLSGD 107 +#define R_PPC64_TLSLD 108 +#define R_PPC64_TOCSAVE 109 + +#define R_PPC64_REL16 249 +#define R_PPC64_REL16_LO 250 +#define R_PPC64_REL16_HI 251 +#define R_PPC64_REL16_HA 252 /* Keep this the last entry. */ -#define R_PPC64_NUM 107 +#define R_PPC64_NUM 253 /* There's actually a third entry here, but it's unused */ struct ppc64_opd_entry diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index cc2d8962e09..4f1393d2007 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) _GLOBAL(__setup_cpu_e6500) mflr r6 #ifdef CONFIG_PPC64 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: #endif bl setup_pw20_idle @@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) #ifdef CONFIG_PPC_BOOK3E_64 _GLOBAL(__restore_cpu_e6500) mflr r5 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: - bl .setup_pw20_idle - bl .setup_altivec_idle + bl setup_pw20_idle + bl setup_altivec_idle bl __restore_cpu_e5500 mtlr r5 blr @@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) mflr r4 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors 1: mtlr r4 blr @@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) mflr r5 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors b 2f 1: ld r10,CPU_SPEC_FEATURES(r4) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 662c6dd9807..9fde8a1bf1e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -39,8 +39,8 @@ * System calls. */ .section ".toc","aw" -.SYS_CALL_TABLE: - .tc .sys_call_table[TC],.sys_call_table +SYS_CALL_TABLE: + .tc sys_call_table[TC],sys_call_table /* This value is used to mark exception frames on the stack. */ exception_marker: @@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION LDX_BE r10,0,r10 /* get log write index */ cmpd cr1,r11,r10 beq+ cr1,33f - bl .accumulate_stolen_time + bl accumulate_stolen_time REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) std r10,SOFTE(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall + bl do_show_syscall REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */ * Need to vector to 32 Bit or default sys_call_table here, * based on caller's run-mode / personality. */ - ld r11,.SYS_CALL_TABLE@toc(2) + ld r11,SYS_CALL_TABLE@toc(2) andi. r10,r10,_TIF_32BIT beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ @@ -174,14 +174,14 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 - ldx r10,r11,r0 /* Fetch system call handler [ptr] */ - mtctr r10 + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ + mtctr r12 bctrl /* Call handler */ syscall_exit: std r3,RESULT(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall_exit + bl do_show_syscall_exit ld r3,RESULT(r1) #endif CURRENT_THREAD_INFO(r12, r1) @@ -248,9 +248,9 @@ syscall_error: /* Traced system call support */ syscall_dotrace: - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_enter + bl do_syscall_trace_enter /* * Restore argument registers possibly just changed. * We use the return value of do_syscall_trace_enter @@ -308,7 +308,7 @@ syscall_exit_work: 4: /* Anything else left to do? */ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq .ret_from_except_lite + beq ret_from_except_lite /* Re-enable interrupts */ #ifdef CONFIG_PPC_BOOK3E @@ -319,10 +319,10 @@ syscall_exit_work: mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_leave - b .ret_from_except + bl do_syscall_trace_leave + b ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) @@ -345,42 +345,44 @@ _GLOBAL(save_nvgprs) */ _GLOBAL(ppc_fork) - bl .save_nvgprs - bl .sys_fork + bl save_nvgprs + bl sys_fork b syscall_exit _GLOBAL(ppc_vfork) - bl .save_nvgprs - bl .sys_vfork + bl save_nvgprs + bl sys_vfork b syscall_exit _GLOBAL(ppc_clone) - bl .save_nvgprs - bl .sys_clone + bl save_nvgprs + bl sys_clone b syscall_exit _GLOBAL(ppc32_swapcontext) - bl .save_nvgprs - bl .compat_sys_swapcontext + bl save_nvgprs + bl compat_sys_swapcontext b syscall_exit _GLOBAL(ppc64_swapcontext) - bl .save_nvgprs - bl .sys_swapcontext + bl save_nvgprs + bl sys_swapcontext b syscall_exit _GLOBAL(ret_from_fork) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) li r3,0 b syscall_exit _GLOBAL(ret_from_kernel_thread) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) - ld r14, 0(r14) mtlr r14 mr r3,r15 +#if defined(_CALL_ELF) && _CALL_ELF == 2 + mr r12,r14 +#endif blrl li r3,0 b syscall_exit @@ -611,7 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) _GLOBAL(ret_from_except) ld r11,_TRAP(r1) andi. r0,r11,1 - bne .ret_from_except_lite + bne ret_from_except_lite REST_NVGPRS(r1) _GLOBAL(ret_from_except_lite) @@ -661,23 +663,23 @@ _GLOBAL(ret_from_except_lite) #endif 1: andi. r0,r4,_TIF_NEED_RESCHED beq 2f - bl .restore_interrupts + bl restore_interrupts SCHEDULE_USER - b .ret_from_except_lite + b ret_from_except_lite 2: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM bne 3f /* only restore TM if nothing else to do */ addi r3,r1,STACK_FRAME_OVERHEAD - bl .restore_tm_state + bl restore_tm_state b restore 3: #endif - bl .save_nvgprs - bl .restore_interrupts + bl save_nvgprs + bl restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_notify_resume - b .ret_from_except + bl do_notify_resume + b ret_from_except resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ @@ -730,7 +732,7 @@ resume_kernel: * sure we are soft-disabled first and reconcile irq state. */ RECONCILE_IRQ_STATE(r3,r4) -1: bl .preempt_schedule_irq +1: bl preempt_schedule_irq /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) @@ -792,7 +794,7 @@ restore_no_replay: */ do_restore: #ifdef CONFIG_PPC_BOOK3E - b .exception_return_book3e + b exception_return_book3e #else /* * Clear the reservation. If we know the CPU tracks the address of @@ -907,7 +909,7 @@ restore_check_irq_replay: * * Still, this might be useful for things like hash_page */ - bl .__check_irq_replay + bl __check_irq_replay cmpwi cr0,r3,0 beq restore_no_replay @@ -928,13 +930,13 @@ restore_check_irq_replay: cmpwi cr0,r3,0x500 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .do_IRQ - b .ret_from_except + bl do_IRQ + b ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .timer_interrupt - b .ret_from_except + bl timer_interrupt + b ret_from_except #ifdef CONFIG_PPC_DOORBELL 1: #ifdef CONFIG_PPC_BOOK3E @@ -948,14 +950,14 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .doorbell_exception - b .ret_from_except + bl doorbell_exception + b ret_from_except #endif /* CONFIG_PPC_DOORBELL */ -1: b .ret_from_except /* What else to do here ? */ +1: b ret_from_except /* What else to do here ? */ unrecov_restore: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b unrecov_restore #ifdef CONFIG_PPC_RTAS @@ -1021,7 +1023,7 @@ _GLOBAL(enter_rtas) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ - LOAD_REG_ADDR(r4,.rtas_return_loc) + LOAD_REG_ADDR(r4,rtas_return_loc) clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 @@ -1045,7 +1047,7 @@ _GLOBAL(enter_rtas) rfid b . /* prevent speculative execution */ -_STATIC(rtas_return_loc) +rtas_return_loc: FIXUP_ENDIAN /* relocation is off at this point */ @@ -1054,7 +1056,7 @@ _STATIC(rtas_return_loc) bcl 20,31,$+4 0: mflr r3 - ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ + ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ mfmsr r6 li r0,MSR_RI @@ -1071,9 +1073,9 @@ _STATIC(rtas_return_loc) b . /* prevent speculative execution */ .align 3 -1: .llong .rtas_restore_regs +1: .llong rtas_restore_regs -_STATIC(rtas_restore_regs) +rtas_restore_regs: /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(13, r1) /* Restore paca */ @@ -1173,7 +1175,7 @@ _GLOBAL(mcount) _GLOBAL(_mcount) blr -_GLOBAL(ftrace_caller) +_GLOBAL_TOC(ftrace_caller) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -1197,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr #else -_GLOBAL(mcount) - blr - -_GLOBAL(_mcount) +_GLOBAL_TOC(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -1238,7 +1237,7 @@ _GLOBAL(ftrace_graph_caller) ld r11, 112(r1) addi r3, r11, 16 - bl .prepare_ftrace_return + bl prepare_ftrace_return nop ld r0, 128(r1) @@ -1254,7 +1253,7 @@ _GLOBAL(return_to_handler) mr r31, r1 stdu r1, -112(r1) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ @@ -1284,7 +1283,7 @@ _GLOBAL(mod_return_to_handler) */ ld r2, PACATOC(r13) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c1bee3ce9d1..771b4e92e5d 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ - b .ret_from_except_lite; + b ret_from_except_lite; /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -550,11 +550,11 @@ interrupt_end_book3e: CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x100) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Machine Check Interrupt */ @@ -562,11 +562,11 @@ interrupt_end_book3e: MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_MC(0x000) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception + bl machine_check_exception b ret_from_mc_except /* Data Storage Interrupt */ @@ -591,7 +591,7 @@ interrupt_end_book3e: /* External Input Interrupt */ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, - external_input, .do_IRQ, ACK_NONE) + external_input, do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); @@ -612,9 +612,9 @@ interrupt_end_book3e: std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) - bl .save_nvgprs - bl .program_check_exception - b .ret_from_except + bl save_nvgprs + bl program_check_exception + b ret_from_except /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); @@ -625,13 +625,13 @@ interrupt_end_book3e: ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_fpu + bl load_up_fpu b fast_exception_return 1: INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception - b .ret_from_except + bl kernel_fp_unavailable_exception + b ret_from_except /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); @@ -644,16 +644,16 @@ BEGIN_FTR_SECTION ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_altivec + bl load_up_altivec b fast_exception_return 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except /* AltiVec Assist */ START_EXCEPTION(altivec_assist); @@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION - bl .altivec_assist_exception + bl altivec_assist_exception END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #else - bl .unknown_exception + bl unknown_exception #endif - b .ret_from_except + b ret_from_except /* Decrementer Interrupt */ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, - decrementer, .timer_interrupt, ACK_DEC) + decrementer, timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, - fixed_interval, .unknown_exception, ACK_FIT) + fixed_interval, unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x9f0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_BOOKE_WDT - bl .WatchdogException + bl WatchdogException #else - bl .unknown_exception + bl unknown_exception #endif b ret_from_crit_except @@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); @@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mr r4,r14 ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except kernel_dbg_exc: b . /* NYI */ @@ -839,9 +839,9 @@ kernel_dbg_exc: mr r4,r14 ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, @@ -850,23 +850,23 @@ kernel_dbg_exc: INTS_DISABLE CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD - bl .performance_monitor_exception - b .ret_from_except_lite + bl performance_monitor_exception + b ret_from_except_lite /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, - doorbell, .doorbell_exception, ACK_NONE) + doorbell, doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2a0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* @@ -878,21 +878,21 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2e0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Hypervisor call */ @@ -901,10 +901,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); @@ -912,10 +912,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* LRAT Error interrupt */ START_EXCEPTION(lrat_error); @@ -1014,16 +1014,16 @@ storage_fault_common: mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .do_page_fault + bl do_page_fault cmpdi r3,0 bne- 1f - b .ret_from_except_lite -1: bl .save_nvgprs + b ret_from_except_lite +1: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it @@ -1035,10 +1035,10 @@ alignment_more: addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except /* * We branch here from entry_64.S for the last stage of the exception @@ -1172,7 +1172,7 @@ bad_stack_book3e: std r12,0(r11) ld r2,PACATOC(r13) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) * and always use AS 0, so we just set it up to match our link * address and never use 0 based addresses. */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* Init global core bits */ - bl .init_core_book3e + bl init_core_book3e /* Init per-thread bits */ - bl .init_thread_book3e + bl init_thread_book3e /* Return to common init code */ tovirt(r28,r28) @@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) */ _GLOBAL(book3e_secondary_core_init_tlb_set) li r4,1 - b .generic_secondary_smp_init + b generic_secondary_smp_init _GLOBAL(book3e_secondary_core_init) mflr r28 @@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) bne 2f /* Setup TLB for this core */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* We can return from the above running at a different * address, so recalculate r2 (TOC) */ - bl .relative_toc + bl relative_toc /* Init global core bits */ -2: bl .init_core_book3e +2: bl init_core_book3e /* Init per-thread bits */ -3: bl .init_thread_book3e +3: bl init_thread_book3e /* Return to common init code at proper virtual address. * @@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init) mflr r28 b 3b -_STATIC(init_core_book3e) +init_core_book3e: /* Establish the interrupt vector base */ LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) mtspr SPRN_IVPR,r3 sync blr -_STATIC(init_thread_book3e) +init_thread_book3e: lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3afd3915921..20f11eb4dff 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -132,12 +132,12 @@ BEGIN_FTR_SECTION #endif beq cr1,2f - b .power7_wakeup_noloss -2: b .power7_wakeup_loss + b power7_wakeup_noloss +2: b power7_wakeup_loss /* Fast Sleep wakeup on PowerNV */ 8: GET_PACA(r13) - b .power7_wakeup_tb_loss + b power7_wakeup_tb_loss 9: END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) @@ -211,16 +211,16 @@ data_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -243,11 +243,11 @@ instruction_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -524,7 +524,7 @@ do_stab_bolted_pSeries: std r12,PACA_EXSLB+EX_R12(r13) GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) + EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) @@ -769,38 +769,38 @@ kvmppc_skip_Hinterrupt: /*** Common interrupt handlers ***/ - STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) + STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) - STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) - STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) + STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) #endif - STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) - STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) - STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) + STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) + STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) + STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) #endif - STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) - STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) - STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) + STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) #ifdef CONFIG_ALTIVEC - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) #else - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) #endif #ifdef CONFIG_CBE_RAS - STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) - STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) - STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) + STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) + STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) + STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ /* @@ -829,16 +829,16 @@ data_access_slb_relon_pSeries: mfspr r3,SPRN_DAR mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -854,11 +854,11 @@ instruction_access_slb_relon_pSeries: mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -966,7 +966,7 @@ system_call_entry: b system_call_common ppc64_runlatch_on_trampoline: - b .__ppc64_runlatch_on + b __ppc64_runlatch_on /* * Here we have detected that the kernel stack pointer is bad. @@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r12,RESULT(r1) std r11,STACK_FRAME_OVERHEAD-16(r1) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1046,7 +1046,7 @@ data_access_common: ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ .align 7 .globl h_data_storage_common @@ -1056,11 +1056,11 @@ h_data_storage_common: mfspr r10,SPRN_HDSISR stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except .align 7 .globl instruction_access_common @@ -1071,9 +1071,9 @@ instruction_access_common: ld r3,_NIP(r1) andis. r4,r12,0x5820 li r5,0x400 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) /* * Here is the common SLB miss user that is used when going to virtual @@ -1088,7 +1088,7 @@ slb_miss_user_common: stw r9,PACA_EXGEN+EX_CCR(r13) std r10,PACA_EXGEN+EX_LR(r13) std r11,PACA_EXGEN+EX_SRR0(r13) - bl .slb_allocate_user + bl slb_allocate_user ld r10,PACA_EXGEN+EX_LR(r13) ld r3,PACA_EXGEN+EX_R3(r13) @@ -1131,9 +1131,9 @@ slb_miss_fault: unrecov_user_slb: EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b #endif /* __DISABLED__ */ @@ -1158,10 +1158,10 @@ machine_check_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception - b .ret_from_except + bl machine_check_exception + b ret_from_except .align 7 .globl alignment_common @@ -1175,31 +1175,31 @@ alignment_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except .align 7 .globl program_check_common program_check_common: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .program_check_exception - b .ret_from_except + bl program_check_exception + b ret_from_except .align 7 .globl fp_unavailable_common fp_unavailable_common: EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) bne 1f /* if from user, just load it up */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception + bl kernel_fp_unavailable_exception BUG_OPCODE 1: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif - bl .load_up_fpu + bl load_up_fpu b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .fp_unavailable_tm - b .ret_from_except + bl fp_unavailable_tm + b ret_from_except #endif .align 7 .globl altivec_unavailable_common @@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - bl .load_up_altivec + bl load_up_altivec b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_tm - b .ret_from_except + bl altivec_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except .align 7 .globl vsx_unavailable_common @@ -1272,26 +1272,26 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - b .load_up_vsx + b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_tm - b .ret_from_except + bl vsx_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_exception - b .ret_from_except + bl vsx_unavailable_exception + b ret_from_except - STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) - STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) .align 7 .globl __end_handlers @@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler) machine_check_handle_early: std r0,GPR0(r1) /* Save r0 */ EXCEPTION_PROLOG_COMMON_3(0x200) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_early + bl machine_check_early ld r12,_MSR(r1) #ifdef CONFIG_PPC_P7_NAP /* @@ -1408,11 +1408,11 @@ machine_check_handle_early: /* Supervisor state loss */ li r0,1 stb r0,PACA_NAPSTATELOST(r13) -3: bl .machine_check_queue_event +3: bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP GET_PACA(r13) ld r1,PACAR1(r13) - b .power7_enter_nap_mode + b power7_enter_nap_mode 4: #endif /* @@ -1444,7 +1444,7 @@ machine_check_handle_early: andi. r11,r12,MSR_RI bne 2f 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b 2: /* @@ -1452,7 +1452,7 @@ machine_check_handle_early: * Queue up the MCE event so that we can log it later, while * returning from kernel or opal call. */ - bl .machine_check_queue_event + bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP rfid 9: @@ -1468,7 +1468,7 @@ machine_check_handle_early: * r3 is saved in paca->slb_r3 * We assume we aren't going to take any exceptions during this procedure. */ -_GLOBAL(slb_miss_realmode) +slb_miss_realmode: mflr r10 #ifdef CONFIG_RELOCATABLE mtctr r11 @@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - bl .slb_allocate_realmode + bl slb_allocate_realmode /* All done -- return from exception. */ @@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode) unrecov_slb: EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b @@ -1536,7 +1536,7 @@ power4_fixup_nap: * Hash table stuff */ .align 7 -_STATIC(do_hash_page) +do_hash_page: std r3,_DAR(r1) std r4,_DSISR(r1) @@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) * * at return r3 = 0 for success, 1 for page fault, negative for error */ - bl .hash_page /* build HPTE if possible */ + bl hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ /* Success */ @@ -1587,35 +1587,35 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_page_fault + bl do_page_fault cmpdi r3,0 beq+ 12f - bl .save_nvgprs + bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* We have a data breakpoint exception - handle it */ handle_dabr_fault: - bl .save_nvgprs + bl save_nvgprs ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_break -12: b .ret_from_except_lite + bl do_break +12: b ret_from_except_lite /* We have a page fault that hash_page could handle but HV refused * the PTE insertion */ -13: bl .save_nvgprs +13: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .low_hash_fault - b .ret_from_except + bl low_hash_fault + b ret_from_except /* * We come here as a result of a DSI at a point where we don't want @@ -1624,16 +1624,16 @@ handle_dabr_fault: * were soft-disabled. We want to invoke the exception handler for * the access, or panic if there isn't a handler. */ -77: bl .save_nvgprs +77: bl save_nvgprs mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD li r5,SIGSEGV - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* here we have a segment miss */ do_ste_alloc: - bl .ste_allocate /* try to insert stab entry */ + bl ste_allocate /* try to insert stab entry */ cmpdi r3,0 bne- handle_page_fault b fast_exception_return @@ -1646,7 +1646,7 @@ do_ste_alloc: * We assume (DAR >> 60) == 0xc. */ .align 7 -_GLOBAL(do_stab_bolted) +do_stab_bolted: stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ mfspr r11,SPRN_DAR /* ea */ diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 6a014c763cc..f202d0731b0 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned int op; - unsigned int jmp[5]; unsigned long ptr; unsigned long ip = rec->ip; - unsigned long tramp; - int offset; + void *tramp; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) @@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod, } /* lets find where the pointer goes */ - tramp = find_bl_target(ip, op); - - /* - * On PPC64 the trampoline looks like: - * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> - * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> - * Where the bytes 2,3,6 and 7 make up the 32bit offset - * to the TOC that holds the pointer. - * to jump to. - * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) - * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) - * The actually address is 32 bytes from the offset - * into the TOC. - * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) - */ - - pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); - - /* Find where the trampoline jumps to */ - if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { - printk(KERN_ERR "Failed to read %lx\n", tramp); - return -EFAULT; - } + tramp = (void *)find_bl_target(ip, op); - pr_devel(" %08x %08x", jmp[0], jmp[1]); + pr_devel("ip:%lx jumps to %p", ip, tramp); - /* verify that this is what we expect it to be */ - if (((jmp[0] & 0xffff0000) != 0x3d820000) || - ((jmp[1] & 0xffff0000) != 0x398c0000) || - (jmp[2] != 0xf8410028) || - (jmp[3] != 0xe96c0020) || - (jmp[4] != 0xe84c0028)) { + if (!is_module_trampoline(tramp)) { printk(KERN_ERR "Not a trampoline\n"); return -EINVAL; } - /* The bottom half is signed extended */ - offset = ((unsigned)((unsigned short)jmp[0]) << 16) + - (int)((short)jmp[1]); - - pr_devel(" %x ", offset); - - /* get the address this jumps too */ - tramp = mod->arch.toc + offset + 32; - pr_devel("toc: %lx", tramp); - - if (probe_kernel_read(jmp, (void *)tramp, 8)) { - printk(KERN_ERR "Failed to read %lx\n", tramp); + if (module_trampoline_target(mod, tramp, &ptr)) { + printk(KERN_ERR "Failed to get trampoline target\n"); return -EFAULT; } - pr_devel(" %08x %08x\n", jmp[0], jmp[1]); - -#ifdef __LITTLE_ENDIAN__ - ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; -#else - ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; -#endif + pr_devel("trampoline target %lx", ptr); /* This should match what was called */ if (ptr != ppc_function_entry((void *)addr)) { - printk(KERN_ERR "addr does not match %lx\n", ptr); + printk(KERN_ERR "addr %lx does not match expected %lx\n", + ptr, ppc_function_entry((void *)addr)); return -EINVAL; } /* - * We want to nop the line, but the next line is - * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) - * This needs to be turned to a nop too. - */ - if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) - return -EFAULT; - - if (op != 0xe8410028) { - printk(KERN_ERR "Next line is not ld! (%08x)\n", op); - return -EINVAL; - } - - /* - * Milton Miller pointed out that we can not blindly do nops. - * If a task was preempted when calling a trace function, - * the nops will remove the way to restore the TOC in r2 - * and the r2 TOC will get corrupted. - */ - - /* - * Replace: - * bl <tramp> <==== will be replaced with "b 1f" - * ld r2,40(r1) - * 1: + * Our original call site looks like: + * + * bl <tramp> + * ld r2,XX(r1) + * + * Milton Miller pointed out that we can not simply nop the branch. + * If a task was preempted when calling a trace function, the nops + * will remove the way to restore the TOC in r2 and the r2 TOC will + * get corrupted. + * + * Use a b +8 to jump over the load. */ op = 0x48000008; /* b +8 */ @@ -349,19 +292,24 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int op[2]; - unsigned long ip = rec->ip; + void *ip = (void *)rec->ip; /* read where this goes */ - if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) + if (probe_kernel_read(op, ip, sizeof(op))) return -EFAULT; /* - * It should be pointing to two nops or - * b +8; ld r2,40(r1) + * We expect to see: + * + * b +8 + * ld r2,XX(r1) + * + * The load offset is different depending on the ABI. For simplicity + * just mask it out when doing the compare. */ - if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && - ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { - printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); + if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { + printk(KERN_ERR "Unexpected call sequence: %x %x\n", + op[0], op[1]); return -EINVAL; } @@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* create the branch to the trampoline */ - op[0] = create_branch((unsigned int *)ip, - rec->arch.mod->arch.tramp, BRANCH_SET_LINK); - if (!op[0]) { - printk(KERN_ERR "REL24 out of range!\n"); + /* Ensure branch is within 24 bits */ + if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "Branch out of range"); return -EINVAL; } - /* ld r2,40(r1) */ - op[1] = 0xe8410028; - - pr_devel("write to %lx\n", rec->ip); - - if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) - return -EPERM; - - flush_icache_range(ip, ip + 8); + if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } return 0; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b7363bd4245..a95145d7f61 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -70,16 +70,15 @@ _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION FIXUP_ENDIAN - b .__start_initialization_multiplatform + b __start_initialization_multiplatform END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap - /* Secondary processors spin on this value until it becomes nonzero. - * When it does it contains the real address of the descriptor - * of the function that the cpu should jump to to continue - * initialization. + /* Secondary processors spin on this value until it becomes non-zero. + * When non-zero, it contains the real address of the function the cpu + * should jump to. */ .balign 8 .globl __secondary_hold_spinloop @@ -140,16 +139,15 @@ __secondary_hold: tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ -100: ld r4,__secondary_hold_spinloop-_stext(r26) - cmpdi 0,r4,0 +100: ld r12,__secondary_hold_spinloop-_stext(r26) + cmpdi 0,r12,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) #ifdef CONFIG_PPC_BOOK3E - tovirt(r4,r4) + tovirt(r12,r12) #endif - ld r4,0(r4) /* deref function descriptor */ - mtctr r4 + mtctr r12 mr r3,r24 /* * it may be the case that other platforms have r4 right to @@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init) mr r24,r3 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 - bl .book3e_secondary_thread_init + bl book3e_secondary_thread_init #endif b generic_secondary_common_init @@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init) mr r25,r4 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 mr r4,r25 - bl .book3e_secondary_core_init + bl book3e_secondary_core_init #endif generic_secondary_common_init: @@ -236,7 +234,7 @@ generic_secondary_common_init: ld r13,0(r13) /* Get base vaddr of paca array */ #ifndef CONFIG_SMP addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ - b .kexec_wait /* wait for next kernel if !SMP */ + b kexec_wait /* wait for next kernel if !SMP */ #else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ @@ -250,7 +248,7 @@ generic_secondary_common_init: blt 1b mr r3,r24 /* not found, copy phys to r3 */ - b .kexec_wait /* next kernel might do better */ + b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) #ifdef CONFIG_PPC_BOOK3E @@ -264,11 +262,13 @@ generic_secondary_common_init: /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) - ld r23,CPU_SPEC_RESTORE(r23) - cmpdi 0,r23,0 + ld r12,CPU_SPEC_RESTORE(r23) + cmpdi 0,r12,0 beq 3f - ld r23,0(r23) - mtctr r23 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r12) +#endif + mtctr r12 bctrl 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ @@ -299,7 +299,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -_STATIC(__mmu_off) +__mmu_off: mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -324,12 +324,12 @@ _STATIC(__mmu_off) * DT block, r4 is a physical pointer to the kernel itself * */ -_GLOBAL(__start_initialization_multiplatform) +__start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ - bl .enable_64b_mode + bl enable_64b_mode /* Get TOC pointer (current runtime address) */ - bl .relative_toc + bl relative_toc /* find out where we are now */ bcl 20,31,$+4 @@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) */ cmpldi cr0,r5,0 beq 1f - b .__boot_from_prom /* yes -> prom */ + b __boot_from_prom /* yes -> prom */ 1: /* Save parameters */ mr r31,r3 @@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) #endif #ifdef CONFIG_PPC_BOOK3E - bl .start_initialization_book3e - b .__after_prom_start + bl start_initialization_book3e + b __after_prom_start #else /* Setup some critical 970 SPRs before switching MMU off */ mfspr r0,SPRN_PVR @@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform) beq 1f cmpwi r0,0x45 /* 970GX */ bne 2f -1: bl .__cpu_preinit_ppc970 +1: bl __cpu_preinit_ppc970 2: /* Switch off MMU if not already off */ - bl .__mmu_off - b .__after_prom_start + bl __mmu_off + b __after_prom_start #endif /* CONFIG_PPC_BOOK3E */ -_INIT_STATIC(__boot_from_prom) +__boot_from_prom: #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE /* Save parameters */ mr r31,r3 @@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) #ifdef CONFIG_RELOCATABLE /* Relocate code for where we are now */ mr r3,r26 - bl .relocate + bl relocate #endif /* Restore parameters */ @@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom) /* Do all of the interaction with OF client interface */ mr r8,r26 - bl .prom_init + bl prom_init #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ trap -_STATIC(__after_prom_start) +__after_prom_start: #ifdef CONFIG_RELOCATABLE /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ @@ -424,7 +424,7 @@ _STATIC(__after_prom_start) bne 1f add r25,r25,r26 1: mr r3,r25 - bl .relocate + bl relocate #endif /* @@ -464,12 +464,12 @@ _STATIC(__after_prom_start) lis r5,(copy_to_here - _stext)@ha addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ - bl .copy_and_flush /* copy the first n bytes */ + bl copy_and_flush /* copy the first n bytes */ /* this includes the code being */ /* executed here. */ addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ - addi r8,r8,(4f - _stext)@l /* that we just made */ - mtctr r8 + addi r12,r8,(4f - _stext)@l /* that we just made */ + mtctr r12 bctr .balign 8 @@ -478,9 +478,9 @@ p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ addis r5,r26,(p_end - _stext)@ha ld r5,(p_end - _stext)@l(r5) /* get _end */ -5: bl .copy_and_flush /* copy the rest */ +5: bl copy_and_flush /* copy the rest */ -9: b .start_here_multiplatform +9: b start_here_multiplatform /* * Copy routine used to copy the kernel to start at physical address 0 @@ -544,7 +544,7 @@ __secondary_start_pmac_0: _GLOBAL(pmac_secondary_start) /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode li r0,0 mfspr r3,SPRN_HID4 @@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) slbia /* get TOC pointer (real address) */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ - bl .__restore_cpu_ppc970 + bl __restore_cpu_ppc970 /* pSeries do that early though I don't think we really need it */ mfmsr r3 @@ -619,7 +619,7 @@ __secondary_start: std r14,PACAKSAVE(r13) /* Do early setup for that CPU (stab, slb, hash table pointer) */ - bl .early_setup_secondary + bl early_setup_secondary /* * setup the new stack pointer, but *don't* use this until @@ -639,7 +639,7 @@ __secondary_start: stb r0,PACAIRQHAPPENED(r13) /* enable MMU and jump to start_secondary */ - LOAD_REG_ADDR(r3, .start_secondary_prolog) + LOAD_REG_ADDR(r3, start_secondary_prolog) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) mtspr SPRN_SRR0,r3 @@ -652,11 +652,11 @@ __secondary_start: * zero the stack back-chain pointer and get the TOC virtual address * before going into C code. */ -_GLOBAL(start_secondary_prolog) +start_secondary_prolog: ld r2,PACATOC(r13) li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . /* * Reset stack pointer and call start_secondary @@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . #endif /* * This subroutine clobbers r11 and r12 */ -_GLOBAL(enable_64b_mode) +enable_64b_mode: mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ @@ -715,9 +715,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b /* * This is where the main kernel code starts. */ -_INIT_STATIC(start_here_multiplatform) +start_here_multiplatform: /* set up the TOC */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, @@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform) /* Restore parameters passed from prom_init/kexec */ mr r3,r31 - bl .early_setup /* also sets r13 and SPRG_PACA */ + bl early_setup /* also sets r13 and SPRG_PACA */ - LOAD_REG_ADDR(r3, .start_here_common) + LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 @@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform) b . /* prevent speculative execution */ /* This is where all platforms converge execution */ -_INIT_GLOBAL(start_here_common) + +start_here_common: /* relocation is on at this point */ std r1,PACAKSAVE(r13) @@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common) ld r2,PACATOC(r13) /* Do more system initializations in virtual mode */ - bl .setup_system + bl setup_system /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) @@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common) stb r0,PACAIRQHAPPENED(r13) /* Generic kernel entry */ - bl .start_kernel + bl start_kernel /* Not reached */ BUG_OPCODE diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index bfb73cc209c..48c21acef91 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -43,7 +43,7 @@ _GLOBAL(\name) */ #ifdef CONFIG_TRACE_IRQFLAGS stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 #endif li r0,1 diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index e3edaa18991..f57a19348bd 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) mflr r0 std r0,16(r1) stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 ld r0,16(r1) mtlr r0 diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c3ab8697561..dca6e16c243 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common) /* Make sure FPU, VSX etc... are flushed as we may lose * state when going to nap mode */ - bl .discard_lazy_cpu_state + bl discard_lazy_cpu_state #endif /* CONFIG_SMP */ /* Hard disable interrupts */ @@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss) _GLOBAL(power7_wakeup_noloss) lbz r0,PACA_NAPSTATELOST(r13) cmpwi r0,0 - bne .power7_wakeup_loss + bne power7_wakeup_loss ld r1,PACAR1(r13) ld r4,_MSR(r1) ld r5,_NIP(r1) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3d0249599d5..4e314b90c75 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 - bl .__do_softirq + bl __do_softirq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 - bl .__do_irq + bl __do_irq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) stb r4,PACAKEXECSTATE(r13) SYNC - b .kexec_wait + b kexec_wait /* * switch to real mode (turn mmu off) @@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) /* copy dest pages, flush whole dest image */ mr r3,r29 - bl .kexec_copy_flush /* (image) */ + bl kexec_copy_flush /* (image) */ /* turn off mmu */ bl real_mode @@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) mr r4,r30 /* start, aka phys mem offset */ li r5,0x100 li r6,0 - bl .copy_and_flush /* (dest, src, copy limit, start offset) */ + bl copy_and_flush /* (dest, src, copy limit, start offset) */ 1: /* assume normal blr return */ /* release other cpus to the new kernel secondary start at 0x60 */ @@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence) stw r6,kexec_flag-1b(5) /* clear out hardware hash page table and tlb */ - ld r5,0(r27) /* deref function descriptor */ - mtctr r5 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r27) /* deref function descriptor */ +#else + mr r12,r27 +#endif + mtctr r12 bctrl /* ppc_md.hpte_clear_all(void); */ /* @@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence) li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ + +#ifdef CONFIG_MODULES +#if defined(_CALL_ELF) && _CALL_ELF == 2 + +#ifdef CONFIG_MODVERSIONS +.weak __crc_TOC. +.section "___kcrctab+TOC.","a" +.globl __kcrctab_TOC. +__kcrctab_TOC.: + .llong __crc_TOC. +#endif + +/* + * Export a fake .TOC. since both modpost and depmod will complain otherwise. + * Both modpost and depmod strip the leading . so we do the same here. + */ +.section "__ksymtab_strings","a" +__kstrtab_TOC.: + .asciz "TOC." + +.section "___ksymtab+TOC.","a" +/* This symbol name is important: it's used by modpost to find exported syms */ +.globl __ksymtab_TOC. +__ksymtab_TOC.: + .llong 0 /* .value */ + .llong __kstrtab_TOC. +#endif /* ELFv2 */ +#endif /* MODULES */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 12664c130d7..ef349d07712 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/bug.h> +#include <linux/uaccess.h> #include <asm/module.h> #include <asm/firmware.h> #include <asm/code-patching.h> @@ -41,46 +42,170 @@ #define DEBUGP(fmt , ...) #endif +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define R2_STACK_OFFSET 24 + +/* An address is simply the address of the function. */ +typedef unsigned long func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return addr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func; +} + +/* PowerPC64 specific values for the Elf64_Sym st_other field. */ +#define STO_PPC64_LOCAL_BIT 5 +#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) +#define PPC64_LOCAL_ENTRY_OFFSET(other) \ + (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) + +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + /* sym->st_other indicates offset to local entry point + * (otherwise it will assume r12 is the address of the start + * of function and try to derive r2 from it). */ + return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); +} +#else +#define R2_STACK_OFFSET 40 + +/* An address is address of the OPD entry, which contains address of fn. */ +typedef struct ppc64_opd_entry func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return *(struct ppc64_opd_entry *)addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return func_desc(addr).funcaddr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func.funcaddr; +} +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + return 0; +} +#endif + /* Like PPC32, we need little trampolines to do > 24-bit jumps (into the kernel itself). But on PPC64, these need to be used for every jump, actually, to reset r2 (TOC+0x8000). */ struct ppc64_stub_entry { - /* 28 byte jump instruction sequence (7 instructions) */ - unsigned char jump[28]; - unsigned char unused[4]; + /* 28 byte jump instruction sequence (7 instructions). We only + * need 6 instructions on ABIv2 but we always allocate 7 so + * so we don't have to modify the trampoline load instruction. */ + u32 jump[7]; + u32 unused; /* Data for the above code */ - struct ppc64_opd_entry opd; + func_desc_t funcdata; }; -/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) - function which may be more than 24-bits away. We could simply - patch the new r2 value and function pointer into the stub, but it's - significantly shorter to put these values at the end of the stub - code, and patch the stub address (32-bits relative to the TOC ptr, - r2) into the stub. */ -static struct ppc64_stub_entry ppc64_stub = -{ .jump = { -#ifdef __LITTLE_ENDIAN__ - 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ - 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ - /* Save current r2 value in magic place on the stack. */ - 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ - 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ - 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ - 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ - 0x20, 0x04, 0x80, 0x4e /* bctr */ -#else - 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ - 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ +/* + * PPC64 uses 24 bit jumps, but we need to jump into other modules or + * the kernel which may be further. So we jump to a stub. + * + * For ELFv1 we need to use this to set up the new r2 value (aka TOC + * pointer). For ELFv2 it's the callee's responsibility to set up the + * new r2, but for both we need to save the old r2. + * + * We could simply patch the new r2 value and function pointer into + * the stub, but it's significantly shorter to put these values at the + * end of the stub code, and patch the stub address (32-bits relative + * to the TOC ptr, r2) into the stub. + */ + +static u32 ppc64_stub_insns[] = { + 0x3d620000, /* addis r11,r2, <high> */ + 0x396b0000, /* addi r11,r11, <low> */ /* Save current r2 value in magic place on the stack. */ - 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ - 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ - 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ - 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ - 0x4e, 0x80, 0x04, 0x20 /* bctr */ + 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ + 0xe98b0020, /* ld r12,32(r11) */ +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + /* Set up new r2 from function descriptor */ + 0xe84b0026, /* ld r2,40(r11) */ +#endif + 0x7d8903a6, /* mtctr r12 */ + 0x4e800420 /* bctr */ +}; + +#ifdef CONFIG_DYNAMIC_FTRACE + +static u32 ppc64_stub_mask[] = { + 0xffff0000, + 0xffff0000, + 0xffffffff, + 0xffffffff, +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + 0xffffffff, +#endif + 0xffffffff, + 0xffffffff +}; + +bool is_module_trampoline(u32 *p) +{ + unsigned int i; + u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; + + BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); + + if (probe_kernel_read(insns, p, sizeof(insns))) + return -EFAULT; + + for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { + u32 insna = insns[i]; + u32 insnb = ppc64_stub_insns[i]; + u32 mask = ppc64_stub_mask[i]; + + if ((insna & mask) != (insnb & mask)) + return false; + } + + return true; +} + +int module_trampoline_target(struct module *mod, u32 *trampoline, + unsigned long *target) +{ + u32 buf[2]; + u16 upper, lower; + long offset; + void *toc_entry; + + if (probe_kernel_read(buf, trampoline, sizeof(buf))) + return -EFAULT; + + upper = buf[0] & 0xffff; + lower = buf[1] & 0xffff; + + /* perform the addis/addi, both signed */ + offset = ((short)upper << 16) + (short)lower; + + /* + * Now get the address this trampoline jumps to. This + * is always 32 bytes into our trampoline stub. + */ + toc_entry = (void *)mod->arch.toc + offset + 32; + + if (probe_kernel_read(target, toc_entry, sizeof(*target))) + return -EFAULT; + + return 0; +} + #endif -} }; /* Count how many different 24-bit relocations (different symbol, different addend) */ @@ -183,6 +308,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, return relocs * sizeof(struct ppc64_stub_entry); } +/* Still needed for ELFv2, for .TOC. */ static void dedotify_versions(struct modversion_info *vers, unsigned long size) { @@ -193,7 +319,7 @@ static void dedotify_versions(struct modversion_info *vers, memmove(vers->name, vers->name+1, strlen(vers->name)); } -/* Undefined symbols which refer to .funcname, hack to funcname */ +/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) { unsigned int i; @@ -207,6 +333,24 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) } } +static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex) +{ + unsigned int i, numsyms; + Elf64_Sym *syms; + + syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; + numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); + + for (i = 1; i < numsyms; i++) { + if (syms[i].st_shndx == SHN_UNDEF + && strcmp(strtab + syms[i].st_name, "TOC.") == 0) + return &syms[i]; + } + return NULL; +} + int module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, char *secstrings, @@ -271,21 +415,12 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) /* Patch stub to reference function and correct r2 value. */ static inline int create_stub(Elf64_Shdr *sechdrs, struct ppc64_stub_entry *entry, - struct ppc64_opd_entry *opd, + unsigned long addr, struct module *me) { - Elf64_Half *loc1, *loc2; long reladdr; - *entry = ppc64_stub; - -#ifdef __LITTLE_ENDIAN__ - loc1 = (Elf64_Half *)&entry->jump[0]; - loc2 = (Elf64_Half *)&entry->jump[4]; -#else - loc1 = (Elf64_Half *)&entry->jump[2]; - loc2 = (Elf64_Half *)&entry->jump[6]; -#endif + memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); @@ -296,35 +431,33 @@ static inline int create_stub(Elf64_Shdr *sechdrs, } DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); - *loc1 = PPC_HA(reladdr); - *loc2 = PPC_LO(reladdr); - entry->opd.funcaddr = opd->funcaddr; - entry->opd.r2 = opd->r2; + entry->jump[0] |= PPC_HA(reladdr); + entry->jump[1] |= PPC_LO(reladdr); + entry->funcdata = func_desc(addr); return 1; } -/* Create stub to jump to function described in this OPD: we need the +/* Create stub to jump to function described in this OPD/ptr: we need the stub to set up the TOC ptr (r2) for the function. */ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, - unsigned long opdaddr, + unsigned long addr, struct module *me) { struct ppc64_stub_entry *stubs; - struct ppc64_opd_entry *opd = (void *)opdaddr; unsigned int i, num_stubs; num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); /* Find this stub, or if that fails, the next avail. entry */ stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; stubs[i].opd.funcaddr; i++) { + for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { BUG_ON(i >= num_stubs); - if (stubs[i].opd.funcaddr == opd->funcaddr) + if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) return (unsigned long)&stubs[i]; } - if (!create_stub(sechdrs, &stubs[i], opd, me)) + if (!create_stub(sechdrs, &stubs[i], addr, me)) return 0; return (unsigned long)&stubs[i]; @@ -339,7 +472,8 @@ static int restore_r2(u32 *instruction, struct module *me) me->name, *instruction); return 0; } - *instruction = 0xe8410028; /* ld r2,40(r1) */ + /* ld r2,R2_STACK_OFFSET(r1) */ + *instruction = 0xe8410000 | R2_STACK_OFFSET; return 1; } @@ -357,6 +491,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, DEBUGP("Applying ADD relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + + /* First time we're called, we can fix up .TOC. */ + if (!me->arch.toc_fixed) { + sym = find_dot_toc(sechdrs, strtab, symindex); + /* It's theoretically possible that a module doesn't want a + * .TOC. so don't fail it just for that. */ + if (sym) + sym->st_value = my_r2(sechdrs, me); + me->arch.toc_fixed = true; + } + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -453,7 +598,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return -ENOENT; if (!restore_r2((u32 *)location + 1, me)) return -ENOEXEC; - } + } else + value += local_entry_offset(sym); /* Convert value to relative */ value -= (unsigned long)location; @@ -474,6 +620,31 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *location = value - (unsigned long)location; break; + case R_PPC64_TOCSAVE: + /* + * Marker reloc indicates we don't have to save r2. + * That would only save us one instruction, so ignore + * it. + */ + break; + + case R_PPC64_REL16_HA: + /* Subtract location pointer */ + value -= (unsigned long)location; + value = ((value + 0x8000) >> 16); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + + case R_PPC64_REL16_LO: + /* Subtract location pointer */ + value -= (unsigned long)location; + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + default: printk("%s: Unknown ADD relocation: %lu\n", me->name, diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 31d021506d2..2ae1b99166c 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -54,6 +54,7 @@ #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif +#include <asm/code-patching.h> #include <linux/kprobes.h> #include <linux/kdebug.h> @@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); - childregs->gpr[14] = usp; /* function */ + /* function */ + if (usp) + childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); childregs->softe = 1; @@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; #endif - /* - * The PPC64 ABI makes use of a TOC to contain function - * pointers. The function (ret_from_except) is actually a pointer - * to the TOC entry. The first entry is a pointer to the actual - * function. - */ -#ifdef CONFIG_PPC64 - kregs->nip = *((unsigned long *)f); -#else - kregs->nip = (unsigned long)f; -#endif + kregs->nip = ppc_function_entry(f); return 0; } diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b0c263da219..77aa1e95e90 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 reloc_got2 kernstart_addr memstart_addr linux_banner _stext opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry boot_command_line __prom_init_toc_start __prom_init_toc_end -btext_setup_display" +btext_setup_display TOC." NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fbe24377eda..90b532ace0d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -341,7 +341,7 @@ void smp_release_cpus(void) ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); - *ptr = __pa(generic_secondary_smp_init); + *ptr = ppc_function_entry(generic_secondary_smp_init); /* And wait a bit for them to catch up */ for (i = 0; i < 100000; i++) { diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 93219c34af3..895c50ca943 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -17,12 +17,12 @@ #include <asm/ppc_asm.h> #ifdef CONFIG_PPC64 -#define SYSCALL(func) .llong .sys_##func,.sys_##func -#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func -#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func -#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall -#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func -#define SYSX(f, f3264, f32) .llong .f,.f3264 +#define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) +#define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) +#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) +#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) +#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) +#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) #else #define SYSCALL(func) .long sys_##func #define COMPAT_SYS(func) .long sys_##func @@ -36,6 +36,8 @@ #define PPC_SYS_SPU(func) PPC_SYS(func) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) +.section .rodata,"a" + #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall @@ -43,5 +45,7 @@ .p2align 3 #endif -_GLOBAL(sys_call_table) +.globl sys_call_table +sys_call_table: + #include <asm/systbl.h> diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 508c54b92fa..ee061c3715d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -42,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ /* Stack frame offsets for local variables. */ #define TM_FRAME_L0 TM_FRAME_SIZE-16 #define TM_FRAME_L1 TM_FRAME_SIZE-8 -#define STACK_PARAM(x) (48+((x)*8)) /* In order to access the TM SPRs, TM must be enabled. So, do so: */ @@ -109,12 +108,12 @@ _GLOBAL(tm_reclaim) mflr r0 stw r6, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ - std r3, STACK_PARAM(0)(r1) + std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) /* We need to setup MSR for VSX register save instructions. Here we @@ -210,7 +209,7 @@ dont_backup_fp: /* Now get some more GPRS free */ std r7, GPR7(r1) /* Temporary stash */ std r12, GPR12(r1) /* '' '' '' */ - ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ + ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ @@ -297,7 +296,7 @@ dont_backup_fp: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) @@ -320,7 +319,7 @@ _GLOBAL(__tm_recheckpoint) mflr r0 stw r5, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. @@ -478,7 +477,7 @@ restore_gprs: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index e18e3cfc32d..8c86422a1e3 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) #endif /* CONFIG_SMP */ /* Jump to partition switch code */ - bl .kvmppc_hv_entry_trampoline + bl kvmppc_hv_entry_trampoline nop /* diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b031f932c0c..9f0ad718e47 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1658,7 +1658,7 @@ kvmppc_hdsi: /* Search the hash table. */ mr r3, r9 /* vcpu pointer */ li r7, 1 /* data fault */ - bl .kvmppc_hpte_hv_fault + bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) @@ -1732,7 +1732,7 @@ kvmppc_hisi: mr r4, r10 mr r6, r11 li r7, 0 /* instruction fault */ - bl .kvmppc_hpte_hv_fault + bl kvmppc_hpte_hv_fault ld r9, HSTATE_KVM_VCPU(r13) ld r10, VCPU_PC(r9) ld r11, VCPU_MSR(r9) @@ -1806,16 +1806,16 @@ hcall_real_fallback: .globl hcall_real_table hcall_real_table: .long 0 /* 0 - unused */ - .long .kvmppc_h_remove - hcall_real_table - .long .kvmppc_h_enter - hcall_real_table - .long .kvmppc_h_read - hcall_real_table + .long DOTSYM(kvmppc_h_remove) - hcall_real_table + .long DOTSYM(kvmppc_h_enter) - hcall_real_table + .long DOTSYM(kvmppc_h_read) - hcall_real_table .long 0 /* 0x10 - H_CLEAR_MOD */ .long 0 /* 0x14 - H_CLEAR_REF */ - .long .kvmppc_h_protect - hcall_real_table - .long .kvmppc_h_get_tce - hcall_real_table - .long .kvmppc_h_put_tce - hcall_real_table + .long DOTSYM(kvmppc_h_protect) - hcall_real_table + .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table + .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table .long 0 /* 0x24 - H_SET_SPRG0 */ - .long .kvmppc_h_set_dabr - hcall_real_table + .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long 0 /* 0x2c */ .long 0 /* 0x30 */ .long 0 /* 0x34 */ @@ -1831,11 +1831,11 @@ hcall_real_table: .long 0 /* 0x5c */ .long 0 /* 0x60 */ #ifdef CONFIG_KVM_XICS - .long .kvmppc_rm_h_eoi - hcall_real_table - .long .kvmppc_rm_h_cppr - hcall_real_table - .long .kvmppc_rm_h_ipi - hcall_real_table + .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table .long 0 /* 0x70 - H_IPOLL */ - .long .kvmppc_rm_h_xirr - hcall_real_table + .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ .long 0 /* 0x68 - H_CPPR */ @@ -1869,7 +1869,7 @@ hcall_real_table: .long 0 /* 0xd4 */ .long 0 /* 0xd8 */ .long 0 /* 0xdc */ - .long .kvmppc_h_cede - hcall_real_table + .long DOTSYM(kvmppc_h_cede) - hcall_real_table .long 0 /* 0xe4 */ .long 0 /* 0xe8 */ .long 0 /* 0xec */ @@ -1886,11 +1886,11 @@ hcall_real_table: .long 0 /* 0x118 */ .long 0 /* 0x11c */ .long 0 /* 0x120 */ - .long .kvmppc_h_bulk_remove - hcall_real_table + .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table .long 0 /* 0x128 */ .long 0 /* 0x12c */ .long 0 /* 0x130 */ - .long .kvmppc_h_set_xdabr - hcall_real_table + .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table hcall_real_table_end: ignore_hdec: @@ -2115,7 +2115,7 @@ kvm_cede_exit: /* Try to handle a machine check in real mode */ machine_check_realmode: mr r3, r9 /* get vcpu pointer */ - bl .kvmppc_realmode_machine_check + bl kvmppc_realmode_machine_check nop cmpdi r3, 0 /* continue exiting from guest? */ ld r9, HSTATE_KVM_VCPU(r13) diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 9f9434a8526..e59c9c2ebe9 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S @@ -20,7 +20,7 @@ _GLOBAL(copy_page) BEGIN_FTR_SECTION lis r5,PAGE_SIZE@h FTR_SECTION_ELSE - b .copypage_power7 + b copypage_power7 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) ori r5,r5,PAGE_SIZE@l BEGIN_FTR_SECTION diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 395c594722a..d7dafb3777a 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -56,15 +56,15 @@ _GLOBAL(copypage_power7) #ifdef CONFIG_ALTIVEC mflr r0 - std r3,48(r1) - std r4,56(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) mtlr r0 li r0,(PAGE_SIZE/128) @@ -103,7 +103,7 @@ _GLOBAL(copypage_power7) addi r3,r3,128 bdnz 1b - b .exit_vmx_copy /* tail call optimise */ + b exit_vmx_copy /* tail call optimise */ #else li r0,(PAGE_SIZE/128) diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 596a285c075..0860ee46013 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -18,7 +18,7 @@ #endif .align 7 -_GLOBAL(__copy_tofrom_user) +_GLOBAL_TOC(__copy_tofrom_user) BEGIN_FTR_SECTION nop FTR_SECTION_ELSE diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index e8e9c36dc78..c46c876ac96 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -66,7 +66,7 @@ ld r15,STK_REG(R15)(r1) ld r14,STK_REG(R14)(r1) .Ldo_err3: - bl .exit_vmx_usercopy + bl exit_vmx_usercopy ld r0,STACKFRAMESIZE+16(r1) mtlr r0 b .Lexit @@ -85,9 +85,9 @@ .Lexit: addi r1,r1,STACKFRAMESIZE .Ldo_err1: - ld r3,48(r1) - ld r4,56(r1) - ld r5,64(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + ld r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + ld r5,-STACKFRAMESIZE+STK_REG(R29)(r1) b __copy_tofrom_user_base @@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) - std r4,56(r1) - std r5,64(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) blt .Lshort_copy #endif @@ -295,12 +295,12 @@ err1; stb r0,0(r3) mflr r0 std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_usercopy + bl enter_vmx_usercopy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -514,7 +514,7 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -717,5 +717,5 @@ err3; lbz r0,0(r4) err3; stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - b .exit_vmx_usercopy /* tail call optimise */ + b exit_vmx_usercopy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S index 9b96ff2ecd4..19e66001a4f 100644 --- a/arch/powerpc/lib/hweight_64.S +++ b/arch/powerpc/lib/hweight_64.S @@ -24,7 +24,7 @@ _GLOBAL(__arch_hweight8) BEGIN_FTR_SECTION - b .__sw_hweight8 + b __sw_hweight8 nop nop FTR_SECTION_ELSE @@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight16) BEGIN_FTR_SECTION - b .__sw_hweight16 + b __sw_hweight16 nop nop nop @@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight32) BEGIN_FTR_SECTION - b .__sw_hweight32 + b __sw_hweight32 nop nop nop @@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) _GLOBAL(__arch_hweight64) BEGIN_FTR_SECTION - b .__sw_hweight64 + b __sw_hweight64 nop nop nop diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index f4fcb0bc656..0738f96befb 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S @@ -79,8 +79,8 @@ _GLOBAL(memset) _GLOBAL(memmove) cmplw 0,r3,r4 - bgt .backwards_memcpy - b .memcpy + bgt backwards_memcpy + b memcpy _GLOBAL(backwards_memcpy) rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */ diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index dc4ba7953b9..32a06ec395d 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S @@ -10,12 +10,12 @@ #include <asm/ppc_asm.h> .align 7 -_GLOBAL(memcpy) +_GLOBAL_TOC(memcpy) BEGIN_FTR_SECTION #ifdef __LITTLE_ENDIAN__ cmpdi cr7,r5,0 #else - std r3,48(r1) /* save destination pointer for return value */ + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* save destination pointer for return value */ #endif FTR_SECTION_ELSE #ifndef SELFTEST @@ -88,7 +88,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f lbz r9,8(r4) stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Lsrc_unaligned: @@ -171,7 +171,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 2: bf cr7*4+3,3f rotldi r9,r9,8 stb r9,0(r3) -3: ld r3,48(r1) /* return dest pointer */ +3: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr .Ldst_unaligned: @@ -216,6 +216,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) 3: bf cr7*4+3,4f lbz r0,0(r4) stb r0,0(r3) -4: ld r3,48(r1) /* return dest pointer */ +4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */ blr #endif diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index e4177dbea6b..2ff5c142f87 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,48(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #endif @@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,48(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,56(r1) - std r5,64(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) - bl .enter_vmx_copy + bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+48(r1) - ld r4,STACKFRAMESIZE+56(r1) - ld r5,STACKFRAMESIZE+64(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -447,8 +447,8 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: /* Get the destination 16B aligned */ @@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,48(r1) - b .exit_vmx_copy /* tail call optimise */ + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) + b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 1136d26a95a..057cbbb4c57 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -201,7 +201,8 @@ htab_insert_pte: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* Patched by htab_finish_init() */ /* Try all again */ @@ -296,7 +299,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* Patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -471,7 +475,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -526,7 +530,8 @@ htab_special_pfn: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -588,7 +595,7 @@ htab_inval_old_hpte: li r6,MMU_PAGE_64K /* psize */ ld r7,STK_PARAM(R9)(r1) /* ssize */ ld r8,STK_PARAM(R8)(r1) /* local */ - bl .flush_hash_page + bl flush_hash_page /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ lis r0,_PAGE_HPTE_SUB@h ori r0,r0,_PAGE_HPTE_SUB@l @@ -660,7 +667,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -812,7 +820,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -857,7 +865,8 @@ ht64_insert_pte: li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert1) +.globl ht64_call_hpte_insert1 +ht64_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge ht64_pte_insert_ok /* Insertion successful */ @@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1) li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert2) +.globl ht64_call_hpte_insert2 +ht64_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ ht64_pte_insert_ok /* Insertion successful */ @@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(ht64_call_hpte_remove) +.globl ht64_call_hpte_remove +ht64_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -952,7 +963,8 @@ ht64_modify_pte: li r7,MMU_PAGE_64K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(ht64_call_hpte_updatepp) +.globl ht64_call_hpte_updatepp +ht64_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 12f1a397f50..91666f07e60 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -622,47 +622,43 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ -#define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) +extern u32 htab_call_hpte_insert1[]; +extern u32 htab_call_hpte_insert2[]; +extern u32 htab_call_hpte_remove[]; +extern u32 htab_call_hpte_updatepp[]; +extern u32 ht64_call_hpte_insert1[]; +extern u32 ht64_call_hpte_insert2[]; +extern u32 ht64_call_hpte_remove[]; +extern u32 ht64_call_hpte_updatepp[]; static void __init htab_finish_init(void) { - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - #ifdef CONFIG_PPC_HAS_HASH_64K - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - patch_branch(ht64_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(ht64_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); - #endif /* CONFIG_PPC_HAS_HASH_64K */ patch_branch(htab_call_hpte_insert1, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_insert2, - FUNCTION_TEXT(ppc_md.hpte_insert), + ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); patch_branch(htab_call_hpte_remove, - FUNCTION_TEXT(ppc_md.hpte_remove), + ppc_function_entry(ppc_md.hpte_remove), BRANCH_SET_LINK); patch_branch(htab_call_hpte_updatepp, - FUNCTION_TEXT(ppc_md.hpte_updatepp), + ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); } diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9d1d33cd2be..4623366f82e 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, patch_instruction(insn_addr, insn); } +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_linear[]; +extern u32 slb_miss_kernel_load_io[]; +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_vmemmap[]; + void slb_set_size(u16 size) { - extern unsigned int *slb_compare_rr_to_size; - if (mmu_slb_size == size) return; @@ -272,11 +276,7 @@ void slb_initialize(void) unsigned long linear_llp, vmalloc_llp, io_llp; unsigned long lflags, vflags; static int slb_encoding_inited; - extern unsigned int *slb_miss_kernel_load_linear; - extern unsigned int *slb_miss_kernel_load_io; - extern unsigned int *slb_compare_rr_to_size; #ifdef CONFIG_SPARSEMEM_VMEMMAP - extern unsigned int *slb_miss_kernel_load_vmemmap; unsigned long vmemmap_llp; #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index e0b3cf42b05..736d18b3cef 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode) /* Linear mapping encoding bits, the "li" instruction below will * be patched by the kernel at boot */ -_GLOBAL(slb_miss_kernel_load_linear) +.globl slb_miss_kernel_load_linear +slb_miss_kernel_load_linear: li r11,0 /* * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 @@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) /* Check virtual memmap region. To be patches at kernel boot */ cmpldi cr0,r9,0xf bne 1f -_GLOBAL(slb_miss_kernel_load_vmemmap) +.globl slb_miss_kernel_load_vmemmap +slb_miss_kernel_load_vmemmap: li r11,0 b 6f 1: @@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) b 6f 5: /* IO mapping */ - _GLOBAL(slb_miss_kernel_load_io) +.globl slb_miss_kernel_load_io +slb_miss_kernel_load_io: li r11,0 6: /* @@ -250,7 +253,8 @@ slb_finish_load: 7: ld r10,PACASTABRR(r13) addi r10,r10,1 /* This gets soft patched on boot. */ -_GLOBAL(slb_compare_rr_to_size) +.globl slb_compare_rr_to_size +slb_compare_rr_to_size: cmpldi r10,0 blt+ 4f diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index 6382098d6f8..ba093f55367 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c @@ -27,6 +27,7 @@ #include <asm/cacheflush.h> #include <asm/dbell.h> #include <asm/fsl_guts.h> +#include <asm/code-patching.h> #include <sysdev/fsl_soc.h> #include <sysdev/mpic.h> @@ -267,7 +268,7 @@ out: flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); out_be64((u64 *)(&spin_table->addr_h), - __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); + __pa(ppc_function_entry(generic_secondary_smp_init))); flush_spin_table(spin_table); #endif diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 90745eaa45f..c8017a7bcab 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -40,6 +40,7 @@ #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cputhreads.h> +#include <asm/code-patching.h> #include "interrupt.h" #include <asm/udbg.h> @@ -70,8 +71,8 @@ static cpumask_t of_spin_map; static inline int smp_startup_cpu(unsigned int lcpu) { int status; - unsigned long start_here = __pa((u32)*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/pasemi/powersave.S b/arch/powerpc/platforms/pasemi/powersave.S index 56f45adcd08..81ab555aa49 100644 --- a/arch/powerpc/platforms/pasemi/powersave.S +++ b/arch/powerpc/platforms/pasemi/powersave.S @@ -66,7 +66,7 @@ sleep_common: std r3, 48(r1) /* Only do power savings when in astate 0 */ - bl .check_astate + bl check_astate cmpwi r3,0 bne 1f diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S index 3cd262897c2..11a3169ee58 100644 --- a/arch/powerpc/platforms/powernv/opal-takeover.S +++ b/arch/powerpc/platforms/powernv/opal-takeover.S @@ -21,11 +21,13 @@ _GLOBAL(opal_query_takeover) mfcr r0 stw r0,8(r1) + stdu r1,-STACKFRAMESIZE(r1) std r3,STK_PARAM(R3)(r1) std r4,STK_PARAM(R4)(r1) li r3,H_HAL_TAKEOVER li r4,H_HAL_TAKEOVER_QUERY_MAGIC HVSC + addi r1,r1,STACKFRAMESIZE ld r10,STK_PARAM(R3)(r1) std r4,0(r10) ld r10,STK_PARAM(R4)(r1) diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index f531ffe35b3..b5ebc545a37 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -32,7 +32,7 @@ std r12,PACASAVEDMSR(r13); \ andc r12,r12,r0; \ mtmsrd r12,1; \ - LOAD_REG_ADDR(r0,.opal_return); \ + LOAD_REG_ADDR(r0,opal_return); \ mtlr r0; \ li r0,MSR_DR|MSR_IR|MSR_LE;\ andc r12,r12,r0; \ @@ -44,7 +44,7 @@ mtspr SPRN_HSRR0,r12; \ hrfid -_STATIC(opal_return) +opal_return: /* * Fixup endian on OPAL return... we should be able to simplify * this by instead converting the below trampoline to a set of diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index bf5fcd45216..1601a1ea02c 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -31,6 +31,7 @@ #include <asm/xics.h> #include <asm/opal.h> #include <asm/runlatch.h> +#include <asm/code-patching.h> #include "powernv.h" @@ -50,8 +51,8 @@ static void pnv_smp_setup_cpu(int cpu) int pnv_smp_kick_cpu(int nr) { unsigned int pcpu = get_hard_smp_processor_id(nr); - unsigned long start_here = __pa(*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); long rc; BUG_ON(nr < 0 || nr >= NR_CPUS); diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 444fe7759e5..7891a86066e 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1); \ std r0,16(r1); \ addi r4,r1,STK_PARAM(FIRST_REG); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ - bl .__trace_hcall_entry; \ + bl __trace_hcall_entry; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ ld r3,STK_PARAM(R3)(r1); \ @@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1); \ mr r3,r6; \ std r0,16(r1); \ stdu r1,-STACK_FRAME_OVERHEAD(r1); \ - bl .__trace_hcall_exit; \ + bl __trace_hcall_exit; \ addi r1,r1,STACK_FRAME_OVERHEAD; \ ld r0,16(r1); \ ld r3,STK_PARAM(R3)(r1); \ diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 24f58cb0a54..a3555b10c1a 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -44,6 +44,7 @@ #include <asm/xics.h> #include <asm/dbell.h> #include <asm/plpar_wrappers.h> +#include <asm/code-patching.h> #include "pseries.h" #include "offline_states.h" @@ -96,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) static inline int smp_startup_cpu(unsigned int lcpu) { int status; - unsigned long start_here = __pa((u32)*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c index 268bc899c1f..8c79ce016cf 100644 --- a/arch/powerpc/platforms/wsp/scom_smp.c +++ b/arch/powerpc/platforms/wsp/scom_smp.c @@ -20,6 +20,7 @@ #include <asm/reg_a2.h> #include <asm/scom.h> #include <asm/udbg.h> +#include <asm/code-patching.h> #include "wsp.h" @@ -405,7 +406,7 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) goto fail; } - start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init + start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init : generic_secondary_thread_init); pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); diff --git a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h index ccd9c84c4e3..d1dc3742551 100644 --- a/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h +++ b/tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h @@ -46,12 +46,15 @@ #define R20 r20 #define R21 r21 #define R22 r22 +#define R29 r29 +#define R30 r30 +#define R31 r31 #define STACKFRAMESIZE 256 -#define STK_PARAM(i) (48 + ((i)-3)*8) #define STK_REG(i) (112 + ((i)-14)*8) #define _GLOBAL(A) FUNC_START(test_ ## A) +#define _GLOBAL_TOC(A) _GLOBAL(A) #define PPC_MTOCRF(A, B) mtocrf A, B |