From aaddd3eacaeaef3503035750b3f21ac2bfe97cbf Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:21 +1000 Subject: powerpc: Move code patching code into arch/powerpc/lib/code-patching.c We currently have a few routines for patching code in asm/system.h, because they didn't fit anywhere else. I'd like to clean them up a little and add some more, so first move them into a dedicated C file - they don't need to be inlined. While we're moving the code, drop create_function_call(), it's intended caller never got merged and will be replaced in future with something different. Signed-off-by: Michael Ellerman Acked-by: Kumar Gala Signed-off-by: Paul Mackerras --- include/asm-powerpc/code-patching.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 include/asm-powerpc/code-patching.h (limited to 'include/asm-powerpc/code-patching.h') diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h new file mode 100644 index 00000000000..0b91fdf944d --- /dev/null +++ b/include/asm-powerpc/code-patching.h @@ -0,0 +1,25 @@ +#ifndef _ASM_POWERPC_CODE_PATCHING_H +#define _ASM_POWERPC_CODE_PATCHING_H + +/* + * Copyright 2008, Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* Flags for create_branch: + * "b" == create_branch(addr, target, 0); + * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); + * "bl" == create_branch(addr, target, BRANCH_SET_LINK); + * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK); + */ +#define BRANCH_SET_LINK 0x1 +#define BRANCH_ABSOLUTE 0x2 + +extern void create_branch(unsigned long addr, unsigned long target, int flags); +extern void create_instruction(unsigned long addr, unsigned int instr); + +#endif /* _ASM_POWERPC_CODE_PATCHING_H */ -- cgit v1.2.3-70-g09d2 From e7a57273c6407bb6903fbaddec8c2119bf318617 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:22 +1000 Subject: powerpc: Allow create_branch() to return errors Currently create_branch() creates a branch instruction for you, and patches it into the call site. In some circumstances it would be nice to be able to create the instruction and patch it later, and also some code might want to check for errors in the branch creation before doing the patching. A future commit will change create_branch() to check for errors. For callers that don't care, replace create_branch() with patch_branch(), which just creates the branch and patches it directly. While we're touching all the callers, change to using unsigned int *, as this seems to match usage better. That allows (and requires) us to remove the volatile in the definition of vector in powermac/smp.c and mpc86xx_smp.c, that's correct because now that we're passing vector as an unsigned int * the compiler knows that it's value might change across the patch_branch() call. Signed-off-by: Michael Ellerman Acked-by: Kumar Gala Acked-by: Jon Loeliger Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/crash_dump.c | 6 ++++-- arch/powerpc/lib/code-patching.c | 20 ++++++++++++-------- arch/powerpc/platforms/86xx/mpc86xx_smp.c | 5 ++--- arch/powerpc/platforms/powermac/smp.c | 5 ++--- include/asm-powerpc/code-patching.h | 6 ++++-- 5 files changed, 24 insertions(+), 18 deletions(-) (limited to 'include/asm-powerpc/code-patching.h') diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 35b9a668b0e..26648544d5e 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -34,6 +34,8 @@ void __init reserve_kdump_trampoline(void) static void __init create_trampoline(unsigned long addr) { + unsigned int *p = (unsigned int *)addr; + /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we * need to branch to current address + 32 MB. So we insert a nop at @@ -42,8 +44,8 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - create_instruction(addr, 0x60000000); /* nop */ - create_branch(addr + 4, addr + PHYSICAL_START, 0); + patch_instruction(p, 0x60000000); /* nop */ + patch_branch(++p, addr + PHYSICAL_START, 0); } void __init setup_kdump_trampoline(void) diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 7afae88ed1d..638dde313cb 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -11,23 +11,27 @@ #include -void create_instruction(unsigned long addr, unsigned int instr) +void patch_instruction(unsigned int *addr, unsigned int instr) { - unsigned int *p; - p = (unsigned int *)addr; - *p = instr; - asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (p)); + *addr = instr; + asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); } -void create_branch(unsigned long addr, unsigned long target, int flags) +void patch_branch(unsigned int *addr, unsigned long target, int flags) +{ + patch_instruction(addr, create_branch(addr, target, flags)); +} + +unsigned int create_branch(const unsigned int *addr, + unsigned long target, int flags) { unsigned int instruction; if (! (flags & BRANCH_ABSOLUTE)) - target = target - addr; + target = target - (unsigned long)addr; /* Mask out the flags and target, so they don't step on each other. */ instruction = 0x48000000 | (flags & 0x3) | (target & 0x03FFFFFC); - create_instruction(addr, instruction); + return instruction; } diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index 63f55853cd6..835f2dc24dc 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c @@ -57,8 +57,7 @@ smp_86xx_kick_cpu(int nr) unsigned int save_vector; unsigned long target, flags; int n = 0; - volatile unsigned int *vector - = (volatile unsigned int *)(KERNELBASE + 0x100); + unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); if (nr < 0 || nr >= NR_CPUS) return; @@ -72,7 +71,7 @@ smp_86xx_kick_cpu(int nr) /* Setup fake reset vector to call __secondary_start_mpc86xx. */ target = (unsigned long) __secondary_start_mpc86xx; - create_branch((unsigned long)vector, target, BRANCH_SET_LINK); + patch_branch(vector, target, BRANCH_SET_LINK); /* Kick that CPU */ smp_86xx_release_core(nr); diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index bf202f7eadf..4ae3d00e0bd 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -787,8 +787,7 @@ static void __devinit smp_core99_kick_cpu(int nr) { unsigned int save_vector; unsigned long target, flags; - volatile unsigned int *vector - = ((volatile unsigned int *)(KERNELBASE+0x100)); + unsigned int *vector = (unsigned int *)(KERNELBASE+0x100); if (nr < 0 || nr > 3) return; @@ -805,7 +804,7 @@ static void __devinit smp_core99_kick_cpu(int nr) * b __secondary_start_pmac_0 + nr*8 - KERNELBASE */ target = (unsigned long) __secondary_start_pmac_0 + nr * 8; - create_branch((unsigned long)vector, target, BRANCH_SET_LINK); + patch_branch(vector, target, BRANCH_SET_LINK); /* Put some life in our friend */ pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index 0b91fdf944d..fdb187cbc40 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -19,7 +19,9 @@ #define BRANCH_SET_LINK 0x1 #define BRANCH_ABSOLUTE 0x2 -extern void create_branch(unsigned long addr, unsigned long target, int flags); -extern void create_instruction(unsigned long addr, unsigned int instr); +unsigned int create_branch(const unsigned int *addr, + unsigned long target, int flags); +void patch_branch(unsigned int *addr, unsigned long target, int flags); +void patch_instruction(unsigned int *addr, unsigned int instr); #endif /* _ASM_POWERPC_CODE_PATCHING_H */ -- cgit v1.2.3-70-g09d2 From 07630a37beefe8e4401c602f04e3e5bcbba50b31 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:28 +1000 Subject: powerpc: Add ppc_function_entry() which gets the entry point for a function Because function pointers point to different things on 32-bit vs 64-bit, add a macro that deals with dereferencing the OPD on 64-bit. The soon to be merged ftrace wants this, as well as other code I am working on. Signed-off-by: Michael Ellerman Acked-by: Kumar Gala Signed-off-by: Paul Mackerras --- include/asm-powerpc/code-patching.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'include/asm-powerpc/code-patching.h') diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index fdb187cbc40..a45a7ff7872 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -10,6 +10,8 @@ * 2 of the License, or (at your option) any later version. */ +#include + /* Flags for create_branch: * "b" == create_branch(addr, target, 0); * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); @@ -24,4 +26,18 @@ unsigned int create_branch(const unsigned int *addr, void patch_branch(unsigned int *addr, unsigned long target, int flags); void patch_instruction(unsigned int *addr, unsigned int instr); +static inline unsigned long ppc_function_entry(void *func) +{ +#ifdef CONFIG_PPC64 + /* + * On PPC64 the function pointer actually points to the function's + * descriptor. The first entry in the descriptor is the address + * of the function text. + */ + return ((func_descr_t *)func)->entry; +#else + return (unsigned long)func; +#endif +} + #endif /* _ASM_POWERPC_CODE_PATCHING_H */ -- cgit v1.2.3-70-g09d2 From 411781a290b0d0a31fd73826b3ee110f1e3cc3b6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:29 +1000 Subject: powerpc: Add new code patching routines This commit adds some new routines for patching code, which will be used in a following commit. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/lib/code-patching.c | 107 ++++++++++++++++++++++++++++++++++++ include/asm-powerpc/code-patching.h | 8 +++ 2 files changed, 115 insertions(+) (limited to 'include/asm-powerpc/code-patching.h') diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 430f4c15d78..27957c4ea9e 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -41,3 +41,110 @@ unsigned int create_branch(const unsigned int *addr, return instruction; } + +unsigned int create_cond_branch(const unsigned int *addr, + unsigned long target, int flags) +{ + unsigned int instruction; + long offset; + + offset = target; + if (! (flags & BRANCH_ABSOLUTE)) + offset = offset - (unsigned long)addr; + + /* Check we can represent the target in the instruction format */ + if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) + return 0; + + /* Mask out the flags and target, so they don't step on each other. */ + instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC); + + return instruction; +} + +static unsigned int branch_opcode(unsigned int instr) +{ + return (instr >> 26) & 0x3F; +} + +static int instr_is_branch_iform(unsigned int instr) +{ + return branch_opcode(instr) == 18; +} + +static int instr_is_branch_bform(unsigned int instr) +{ + return branch_opcode(instr) == 16; +} + +int instr_is_relative_branch(unsigned int instr) +{ + if (instr & BRANCH_ABSOLUTE) + return 0; + + return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); +} + +static unsigned long branch_iform_target(const unsigned int *instr) +{ + signed long imm; + + imm = *instr & 0x3FFFFFC; + + /* If the top bit of the immediate value is set this is negative */ + if (imm & 0x2000000) + imm -= 0x4000000; + + if ((*instr & BRANCH_ABSOLUTE) == 0) + imm += (unsigned long)instr; + + return (unsigned long)imm; +} + +static unsigned long branch_bform_target(const unsigned int *instr) +{ + signed long imm; + + imm = *instr & 0xFFFC; + + /* If the top bit of the immediate value is set this is negative */ + if (imm & 0x8000) + imm -= 0x10000; + + if ((*instr & BRANCH_ABSOLUTE) == 0) + imm += (unsigned long)instr; + + return (unsigned long)imm; +} + +unsigned long branch_target(const unsigned int *instr) +{ + if (instr_is_branch_iform(*instr)) + return branch_iform_target(instr); + else if (instr_is_branch_bform(*instr)) + return branch_bform_target(instr); + + return 0; +} + +int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr) +{ + if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr)) + return branch_target(instr) == addr; + + return 0; +} + +unsigned int translate_branch(const unsigned int *dest, const unsigned int *src) +{ + unsigned long target; + + target = branch_target(src); + + if (instr_is_branch_iform(*src)) + return create_branch(dest, target, *src); + else if (instr_is_branch_bform(*src)) + return create_cond_branch(dest, target, *src); + + return 0; +} diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index a45a7ff7872..40ad46b1dd9 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -23,9 +23,17 @@ unsigned int create_branch(const unsigned int *addr, unsigned long target, int flags); +unsigned int create_cond_branch(const unsigned int *addr, + unsigned long target, int flags); void patch_branch(unsigned int *addr, unsigned long target, int flags); void patch_instruction(unsigned int *addr, unsigned int instr); +int instr_is_relative_branch(unsigned int instr); +int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr); +unsigned long branch_target(const unsigned int *instr); +unsigned int translate_branch(const unsigned int *dest, + const unsigned int *src); + static inline unsigned long ppc_function_entry(void *func) { #ifdef CONFIG_PPC64 -- cgit v1.2.3-70-g09d2 From b7bcda631e87eb3466d0baa9885650ba7d7ed89d Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:35 +1000 Subject: powerpc: Add PPC_NOP_INSTR, a hash define for the preferred nop instruction A bunch of code has hard-coded the value for a "nop" instruction, it would be nice to have a #define for it. Signed-off-by: Michael Ellerman Acked-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 3 ++- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/module_64.c | 3 ++- include/asm-powerpc/code-patching.h | 2 ++ 4 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include/asm-powerpc/code-patching.h') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index f8deb3761de..ba5b23f5476 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -17,6 +17,7 @@ #include #include +#include #include #include /* for PTRRELOC on ARCH=ppc */ @@ -1663,7 +1664,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) pend = ((unsigned int *)fcur) + (fcur->end_off / 4); for (p = pstart; p < pend; p++) { - *p = 0x60000000u; + *p = PPC_NOP_INSTR; asm volatile ("dcbst 0, %0" : : "r" (p)); } asm volatile ("sync" : : : "memory"); diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 26648544d5e..e0debcca0bf 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -44,7 +44,7 @@ static void __init create_trampoline(unsigned long addr) * branch to "addr" we jump to ("addr" + 32 MB). Although it requires * two instructions it doesn't require any registers. */ - patch_instruction(p, 0x60000000); /* nop */ + patch_instruction(p, PPC_NOP_INSTR); patch_branch(++p, addr + PHYSICAL_START, 0); } diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 4803f2de98d..ee6a2982d56 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "setup.h" @@ -330,7 +331,7 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, restore r2. */ static int restore_r2(u32 *instruction, struct module *me) { - if (*instruction != 0x60000000) { + if (*instruction != PPC_NOP_INSTR) { printk("%s: Expect noop after relocate, got %08x\n", me->name, *instruction); return 0; diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index 40ad46b1dd9..ef3a5d156db 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -12,6 +12,8 @@ #include +#define PPC_NOP_INSTR 0x60000000 + /* Flags for create_branch: * "b" == create_branch(addr, target, 0); * "ba" == create_branch(addr, target, BRANCH_ABSOLUTE); -- cgit v1.2.3-70-g09d2 From 2d1b2027626d5151fff8ef7c06ca8e7876a1a510 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 2 Jul 2008 01:16:40 +1000 Subject: powerpc: Fixup lwsync at runtime To allow for a single kernel image on e500 v1/v2/mc we need to fixup lwsync at runtime. On e500v1/v2 lwsync causes an illop so we need to patch up the code. We default to 'sync' since that is always safe and if the cpu is capable we will replace 'sync' with 'lwsync'. We introduce CPU_FTR_LWSYNC as a way to determine at runtime if this is needed. This flag could be moved elsewhere since we dont really use it for the normal CPU_FTR purpose. Finally we only store the relative offset in the fixup section to keep it as small as possible rather than using a full fixup_entry. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/module.c | 6 ++++++ arch/powerpc/kernel/setup_32.c | 4 ++++ arch/powerpc/kernel/setup_64.c | 2 ++ arch/powerpc/kernel/vdso.c | 10 +++++++++ arch/powerpc/kernel/vdso32/vdso32.lds.S | 3 +++ arch/powerpc/kernel/vdso64/vdso64.lds.S | 3 +++ arch/powerpc/kernel/vmlinux.lds.S | 6 ++++++ arch/powerpc/lib/feature-fixups-test.S | 15 +++++++++++++ arch/powerpc/lib/feature-fixups.c | 36 +++++++++++++++++++++++++++++++ include/asm-powerpc/code-patching.h | 3 ++- include/asm-powerpc/cputable.h | 21 +++++++++--------- include/asm-powerpc/feature-fixups.h | 10 +++++++++ include/asm-powerpc/synch.h | 38 ++++++++++++++++++++------------- 13 files changed, 131 insertions(+), 26 deletions(-) (limited to 'include/asm-powerpc/code-patching.h') diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 40dd52d81c1..af07003573c 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -86,6 +86,12 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); + if (sect != NULL) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); + return 0; } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 9e83add5429..0109e7f0ccf 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -101,6 +101,10 @@ unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); + do_lwsync_fixups(spec->cpu_features, + PTRRELOC(&__start___lwsync_fixup), + PTRRELOC(&__stop___lwsync_fixup)); + return KERNELBASE + offset; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 098fd96a394..04d8de9f0fc 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -363,6 +363,8 @@ void __init setup_system(void) &__start___ftr_fixup, &__stop___ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); + do_lwsync_fixups(cur_cpu_spec->cpu_features, + &__start___lwsync_fixup, &__stop___lwsync_fixup); /* * Unflatten the device-tree passed by prom_init or kexec diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index ce245a850db..f177c60ea76 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -571,6 +571,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, if (start64) do_feature_fixups(powerpc_firmware_features, start64, start64 + size64); + + start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64); + if (start64) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + start64, start64 + size64); #endif /* CONFIG_PPC64 */ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); @@ -585,6 +590,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, start32, start32 + size32); #endif /* CONFIG_PPC64 */ + start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32); + if (start32) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + start32, start32 + size32); + return 0; } diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 271793577cd..be3b6a41dc0 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -33,6 +33,9 @@ SECTIONS . = ALIGN(8); __ftr_fixup : { *(__ftr_fixup) } + . = ALIGN(8); + __lwsync_fixup : { *(__lwsync_fixup) } + #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : { *(__fw_ftr_fixup) } diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index e608d1bd3bf..d0b2526dd38 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -34,6 +34,9 @@ SECTIONS . = ALIGN(8); __ftr_fixup : { *(__ftr_fixup) } + . = ALIGN(8); + __lwsync_fixup : { *(__lwsync_fixup) } + . = ALIGN(8); __fw_ftr_fixup : { *(__fw_ftr_fixup) } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 3c07811989f..6856f6c1572 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -127,6 +127,12 @@ SECTIONS *(__ftr_fixup) __stop___ftr_fixup = .; } + . = ALIGN(8); + __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { + __start___lwsync_fixup = .; + *(__lwsync_fixup) + __stop___lwsync_fixup = .; + } #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S index 0549be04399..cb737484c5a 100644 --- a/arch/powerpc/lib/feature-fixups-test.S +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -10,6 +10,7 @@ #include #include +#include .text @@ -725,3 +726,17 @@ MAKE_MACRO_TEST_EXPECTED(FTR); MAKE_MACRO_TEST(FW_FTR); MAKE_MACRO_TEST_EXPECTED(FW_FTR); #endif + +globl(lwsync_fixup_test) +1: or 1,1,1 + LWSYNC +globl(end_lwsync_fixup_test) + +globl(lwsync_fixup_test_expected_LWSYNC) +1: or 1,1,1 + lwsync + +globl(lwsync_fixup_test_expected_SYNC) +1: or 1,1,1 + sync + diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 48e1ed89052..4e43702b981 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -110,6 +110,22 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) +{ + unsigned int *start, *end, *dest; + + if (!(value & CPU_FTR_LWSYNC)) + return ; + + start = fixup_start; + end = fixup_end; + + for (; start < end; start++) { + dest = (void *)start + *start; + patch_instruction(dest, PPC_LWSYNC_INSTR); + } +} + #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ @@ -295,6 +311,25 @@ static void test_fw_macros(void) #endif } +static void test_lwsync_macros(void) +{ + extern void lwsync_fixup_test; + extern void end_lwsync_fixup_test; + extern void lwsync_fixup_test_expected_LWSYNC; + extern void lwsync_fixup_test_expected_SYNC; + unsigned long size = &end_lwsync_fixup_test - + &lwsync_fixup_test; + + /* The fixups have already been done for us during boot */ + if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { + check(memcmp(&lwsync_fixup_test, + &lwsync_fixup_test_expected_LWSYNC, size) == 0); + } else { + check(memcmp(&lwsync_fixup_test, + &lwsync_fixup_test_expected_SYNC, size) == 0); + } +} + static int __init test_feature_fixups(void) { printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); @@ -307,6 +342,7 @@ static int __init test_feature_fixups(void) test_alternative_case_with_external_branch(); test_cpu_macros(); test_fw_macros(); + test_lwsync_macros(); return 0; } diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index ef3a5d156db..107d9b915e3 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -12,7 +12,8 @@ #include -#define PPC_NOP_INSTR 0x60000000 +#define PPC_NOP_INSTR 0x60000000 +#define PPC_LWSYNC_INSTR 0x7c2004ac /* Flags for create_branch: * "b" == create_branch(addr, target, 0); diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 4e4491cb9d3..3171ac904b9 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -156,6 +156,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) +#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) /* * Add the 64-bit processor unique features in the top half of the word; @@ -369,43 +370,43 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_NODSISRALIGN) #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR) + CPU_FTR_L2CSR | CPU_FTR_LWSYNC) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ -#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) -#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ CPU_FTR_MMCRA | CPU_FTR_CTRL) -#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA) -#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) -#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR) -#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR) -#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR) -#define CPU_FTRS_CELL (CPU_FTR_USE_TB | \ +#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) -#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \ +#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h index ab30129dced..a1029967620 100644 --- a/include/asm-powerpc/feature-fixups.h +++ b/include/asm-powerpc/feature-fixups.h @@ -113,4 +113,14 @@ label##5: \ #endif /* __ASSEMBLY__ */ +/* LWSYNC feature sections */ +#define START_LWSYNC_SECTION(label) label##1: +#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \ +label##2: \ + .pushsection sect,"a"; \ + .align 2; \ +label##3: \ + .long label##1b-label##3b; \ + .popsection; + #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index 42a1ef59069..45963e80f55 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h @@ -3,34 +3,42 @@ #ifdef __KERNEL__ #include +#include -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) -#define __SUBARCH_HAS_LWSYNC -#endif +#ifndef __ASSEMBLY__ +extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; +extern void do_lwsync_fixups(unsigned long value, void *fixup_start, + void *fixup_end); + +static inline void eieio(void) +{ + __asm__ __volatile__ ("eieio" : : : "memory"); +} + +static inline void isync(void) +{ + __asm__ __volatile__ ("isync" : : : "memory"); +} +#endif /* __ASSEMBLY__ */ -#ifdef __SUBARCH_HAS_LWSYNC +#if defined(__powerpc64__) # define LWSYNC lwsync +#elif defined(CONFIG_E500) +# define LWSYNC \ + START_LWSYNC_SECTION(96); \ + sync; \ + MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup); #else # define LWSYNC sync #endif #ifdef CONFIG_SMP #define ISYNC_ON_SMP "\n\tisync\n" -#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" +#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" #else #define ISYNC_ON_SMP #define LWSYNC_ON_SMP #endif -static inline void eieio(void) -{ - __asm__ __volatile__ ("eieio" : : : "memory"); -} - -static inline void isync(void) -{ - __asm__ __volatile__ ("isync" : : : "memory"); -} - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYNCH_H */ -- cgit v1.2.3-70-g09d2