From 932ded4b0b9bf111fbf9d176ec12152a0d29b0fd Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Fri, 28 Nov 2014 13:40:45 +0000 Subject: arm64: add module support for alternatives fixups Currently the kernel patches all necessary instructions once at boot time, so modules are not covered by this. Change the apply_alternatives() function to take a beginning and an end pointer and introduce a new variant (apply_alternatives_all()) to cover the existing use case for the static kernel image section. Add a module_finalize() function to arm64 to check for an alternatives section in a module and patch only the instructions from that specific area. Since that module code is not touched before the module initialization has ended, we don't need to halt the machine before doing the patching in the module's code. Signed-off-by: Andre Przywara Signed-off-by: Will Deacon --- arch/arm64/include/asm/alternative.h | 3 ++- arch/arm64/kernel/alternative.c | 29 +++++++++++++++++++++++++---- arch/arm64/kernel/module.c | 18 ++++++++++++++++++ arch/arm64/kernel/smp.c | 2 +- 4 files changed, 46 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index f6d206e7f9e..d261f01e2ba 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -13,7 +13,8 @@ struct alt_instr { u8 alt_len; /* size of new instruction(s), <= orig_len */ }; -void apply_alternatives(void); +void apply_alternatives_all(void); +void apply_alternatives(void *start, size_t length); void free_alternatives_memory(void); #define ALTINSTR_ENTRY(feature) \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 1a3badab800..ad7821d64a1 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -28,12 +28,18 @@ extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; -static int __apply_alternatives(void *dummy) +struct alt_region { + struct alt_instr *begin; + struct alt_instr *end; +}; + +static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; + struct alt_region *region = alt_region; u8 *origptr, *replptr; - for (alt = __alt_instructions; alt < __alt_instructions_end; alt++) { + for (alt = region->begin; alt < region->end; alt++) { if (!cpus_have_cap(alt->cpufeature)) continue; @@ -51,10 +57,25 @@ static int __apply_alternatives(void *dummy) return 0; } -void apply_alternatives(void) +void apply_alternatives_all(void) { + struct alt_region region = { + .begin = __alt_instructions, + .end = __alt_instructions_end, + }; + /* better not try code patching on a live SMP system */ - stop_machine(__apply_alternatives, NULL, NULL); + stop_machine(__apply_alternatives, ®ion, NULL); +} + +void apply_alternatives(void *start, size_t length) +{ + struct alt_region region = { + .begin = start, + .end = start + length, + }; + + __apply_alternatives(®ion); } void free_alternatives_memory(void) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1eb1cc95513..fd027b101de 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -26,6 +26,7 @@ #include #include #include +#include #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 @@ -394,3 +395,20 @@ overflow: me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); return -ENOEXEC; } + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { + apply_alternatives((void *)s->sh_addr, s->sh_size); + return 0; + } + } + + return 0; +} diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 0ef87896e4a..7ae6ee08526 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -310,7 +310,7 @@ void cpu_die(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); - apply_alternatives(); + apply_alternatives_all(); } void __init smp_prepare_boot_cpu(void) -- cgit v1.2.3-70-g09d2