summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/code-patching.h
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-07 10:17:56 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-07 10:17:56 +0200
commitf1615bbe9be4def59c3b3eaddb60722efeed16c2 (patch)
treeca3020e65447576fc1826e819651e6ba072030b5 /arch/powerpc/include/asm/code-patching.h
parentcfb3c0ab0903abb6ea5215b37eebd9c2a1f057eb (diff)
parentcd3de83f147601356395b57a8673e9c5ff1e59d1 (diff)
Merge tag 'v3.16-rc4' into drm-intel-next-queued
Due to Dave's vacation drm-next hasn't opened yet for 3.17 so I couldn't move my drm-intel-next queue forward yet like I usually do. Just pull in the latest upstream -rc to unblock patch merging - I don't want to needlessly rebase my current patch pile really and void all the testing we've done already. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'arch/powerpc/include/asm/code-patching.h')
-rw-r--r--arch/powerpc/include/asm/code-patching.h51
1 files changed, 47 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 97e02f985df..840a5509b3f 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -42,18 +42,61 @@ void __patch_exception(int exc, unsigned long addr);
} while (0)
#endif
+#define OP_RT_RA_MASK 0xffff0000UL
+#define LIS_R2 0x3c020000UL
+#define ADDIS_R2_R12 0x3c4c0000UL
+#define ADDI_R2_R2 0x38420000UL
+
static inline unsigned long ppc_function_entry(void *func)
{
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+ u32 *insn = func;
+
+ /*
+ * A PPC64 ABIv2 function may have a local and a global entry
+ * point. We need to use the local entry point when patching
+ * functions, so identify and step over the global entry point
+ * sequence.
+ *
+ * The global entry point sequence is always of the form:
+ *
+ * addis r2,r12,XXXX
+ * addi r2,r2,XXXX
+ *
+ * A linker optimisation may convert the addis to lis:
+ *
+ * lis r2,XXXX
+ * addi r2,r2,XXXX
+ */
+ if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+ ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+ ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+ return (unsigned long)(insn + 2);
+ else
+ return (unsigned long)func;
+#else
/*
- * On PPC64 the function pointer actually points to the function's
- * descriptor. The first entry in the descriptor is the address
- * of the function text.
+ * On PPC64 ABIv1 the function pointer actually points to the
+ * function's descriptor. The first entry in the descriptor is the
+ * address of the function text.
*/
return ((func_descr_t *)func)->entry;
+#endif
#else
return (unsigned long)func;
#endif
}
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+ /* PPC64 ABIv2 the global entry point is at the address */
+ return (unsigned long)func;
+#else
+ /* All other cases there is no change vs ppc_function_entry() */
+ return ppc_function_entry(func);
+#endif
+}
+
#endif /* _ASM_POWERPC_CODE_PATCHING_H */