diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-02-10 15:25:18 +0000 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-02-10 15:25:18 +0000 |
commit | 4a9cb360197684a861bc06f06d33d5fcc8ffcbf5 (patch) | |
tree | e83a6ecf2fd04ac3535a07a19fc645c1949f0698 /arch | |
parent | 0193c00e85a35ceee7856c89c25b34d5752e0baa (diff) |
ARM: fixup SMP alternatives in modules
With certain configurations, we inline the unlock functions in modules,
which results in SMP alternatives being created in modules. We need to
fix those up when loading a module to prevent undefined instruction
faults.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/head.S | 38 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 22 |
2 files changed, 47 insertions, 13 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb2..f06ff9feb0d 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) #ifdef CONFIG_SMP_ON_UP + __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? @@ -415,18 +416,7 @@ __fixup_smp_on_up: sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 -2: cmp r4, r5 - movhs pc, lr - ldmia r4!, {r0, r6} - ARM( str r6, [r0, r3] ) - THUMB( add r0, r0, r3 ) -#ifdef __ARMEB__ - THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. -#endif - THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. - THUMB( strh r6, [r0] ) - b 2b + b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align @@ -440,7 +430,31 @@ smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection +#endif + .text +__do_fixup_smp_on_up: + cmp r4, r5 + movhs pc, lr + ldmia r4!, {r0, r6} + ARM( str r6, [r0, r3] ) + THUMB( add r0, r0, r3 ) +#ifdef __ARMEB__ + THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif + THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. + THUMB( strh r6, [r0] ) + b __do_fixup_smp_on_up +ENDPROC(__do_fixup_smp_on_up) + +ENTRY(fixup_smp) + stmfd sp!, {r4 - r6, lr} + mov r4, r0 + add r5, r0, r1 + mov r3, #0 + bl __do_fixup_smp_on_up + ldmfd sp!, {r4 - r6, pc} +ENDPROC(fixup_smp) #include "head-common.S" diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..6d4105e6872 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -22,6 +22,7 @@ #include <asm/pgtable.h> #include <asm/sections.h> +#include <asm/smp_plat.h> #include <asm/unwind.h> #ifdef CONFIG_XIP_KERNEL @@ -268,12 +269,28 @@ struct mod_unwind_map { const Elf_Shdr *txt_sec; }; +static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, + const Elf_Shdr *sechdrs, const char *name) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) + if (strcmp(name, secstrs + s->sh_name) == 0) + return s; + + return NULL; +} + +extern void fixup_smp(const void *, unsigned long); + int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + const Elf_Shdr * __maybe_unused s = NULL; #ifdef CONFIG_ARM_UNWIND const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; - const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; + const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; struct mod_unwind_map maps[ARM_SEC_MAX]; int i; @@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_size); #endif + s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); + if (s && !is_smp()) + fixup_smp((void *)s->sh_addr, s->sh_size); return 0; } |