From 83a7a2ad2a9173dcabc05df0f01d1d85b7ba1c2c Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 10 Jun 2010 00:10:43 +0000 Subject: x86, alternatives: Use 16-bit numbers for cpufeature index We already have cpufeature indicies above 255, so use a 16-bit number for the alternatives index. This consumes a padding field and so doesn't add any size, but it means that abusing the padding field to create assembly errors on overflow no longer works. We can retain the test simply by redirecting it to the .discard section, however. [ v3: updated to include open-coded locations ] Signed-off-by: H. Peter Anvin LKML-Reference: Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/alternative.h | 7 ++++--- arch/x86/include/asm/cpufeature.h | 14 ++++++++------ arch/x86/kernel/entry_32.S | 2 +- arch/x86/lib/clear_page_64.S | 2 +- arch/x86/lib/copy_page_64.S | 2 +- arch/x86/lib/memcpy_64.S | 2 +- arch/x86/lib/memset_64.S | 2 +- 7 files changed, 17 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 03b6bb5394a..bc6abb7bc7e 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -45,10 +45,9 @@ struct alt_instr { u8 *instr; /* original instruction */ u8 *replacement; - u8 cpuid; /* cpuid bit set for replacement */ + u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ u8 replacementlen; /* length of new instruction, <= instrlen */ - u8 pad1; #ifdef CONFIG_X86_64 u32 pad2; #endif @@ -86,9 +85,11 @@ static inline int alternatives_text_reserved(void *start, void *end) _ASM_ALIGN "\n" \ _ASM_PTR "661b\n" /* label */ \ _ASM_PTR "663f\n" /* new instruction */ \ - " .byte " __stringify(feature) "\n" /* feature bit */ \ + " .word " __stringify(feature) "\n" /* feature bit */ \ " .byte 662b-661b\n" /* sourcelen */ \ " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .discard,\"aw\",@progbits\n" \ " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ ".previous\n" \ ".section .altinstr_replacement, \"ax\"\n" \ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 46814591438..e8b88967de3 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -291,7 +291,7 @@ extern const char * const x86_power_flags[32]; * patch the target code for additional performance. * */ -static __always_inline __pure bool __static_cpu_has(u8 bit) +static __always_inline __pure bool __static_cpu_has(u16 bit) { #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) asm goto("1: jmp %l[t_no]\n" @@ -300,11 +300,11 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) _ASM_ALIGN "\n" _ASM_PTR "1b\n" _ASM_PTR "0\n" /* no replacement */ - " .byte %P0\n" /* feature bit */ + " .word %P0\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 0\n" /* replacement len */ - " .byte 0xff + 0 - (2b-1b)\n" /* padding */ ".previous\n" + /* skipping size check since replacement size = 0 */ : : "i" (bit) : : t_no); return true; t_no: @@ -318,10 +318,12 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) _ASM_ALIGN "\n" _ASM_PTR "1b\n" _ASM_PTR "3f\n" - " .byte %P1\n" /* feature bit */ + " .word %P1\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 4f - 3f\n" /* replacement len */ - " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */ + ".previous\n" + ".section .discard,\"aw\",@progbits\n" + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ ".previous\n" ".section .altinstr_replacement,\"ax\"\n" "3: movb $1,%0\n" @@ -337,7 +339,7 @@ static __always_inline __pure bool __static_cpu_has(u8 bit) ( \ __builtin_constant_p(boot_cpu_has(bit)) ? \ boot_cpu_has(bit) : \ - (__builtin_constant_p(bit) && !((bit) & ~0xff)) ? \ + __builtin_constant_p(bit) ? \ __static_cpu_has(bit) : \ boot_cpu_has(bit) \ ) diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index cd49141cf15..7862cf510ea 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -914,7 +914,7 @@ ENTRY(simd_coprocessor_error) .balign 4 .long 661b .long 663f - .byte X86_FEATURE_XMM + .word X86_FEATURE_XMM .byte 662b-661b .byte 664f-663f .previous diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index ebeafcce04a..aa4326bfb24 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -52,7 +52,7 @@ ENDPROC(clear_page) .align 8 .quad clear_page .quad 1b - .byte X86_FEATURE_REP_GOOD + .word X86_FEATURE_REP_GOOD .byte .Lclear_page_end - clear_page .byte 2b - 1b .previous diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 727a5d46d2f..6fec2d1cebe 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -113,7 +113,7 @@ ENDPROC(copy_page) .align 8 .quad copy_page .quad 1b - .byte X86_FEATURE_REP_GOOD + .word X86_FEATURE_REP_GOOD .byte .Lcopy_page_end - copy_page .byte 2b - 1b .previous diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index f82e884928a..bcbcd1e0f7d 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -131,7 +131,7 @@ ENDPROC(__memcpy) .align 8 .quad memcpy .quad .Lmemcpy_c - .byte X86_FEATURE_REP_GOOD + .word X86_FEATURE_REP_GOOD /* * Replace only beginning, memcpy is used to apply alternatives, diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index e88d3b81644..09d34426965 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -121,7 +121,7 @@ ENDPROC(__memset) .align 8 .quad memset .quad .Lmemset_c - .byte X86_FEATURE_REP_GOOD + .word X86_FEATURE_REP_GOOD .byte .Lfinal - memset .byte .Lmemset_e - .Lmemset_c .previous -- cgit v1.2.3-70-g09d2 From df378ccfc4dd04e263426ad805516915874774aa Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 13 Jul 2010 14:55:11 -0700 Subject: x86, alternatives: Fix one more open-coded 8-bit alternative number Fix a missing case of an 8-bit alternative number, buried inside an assembly macro. Signed-off-by: H. Peter Anvin Reported-by: Yinghai Lu Cc: Suresh Siddha LKML-Reference: <4C3BDDA3.2060900@kernel.org> --- arch/x86/lib/copy_user_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 71100c98e33..a460158b5ac 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -29,7 +29,7 @@ .align 8 .quad 0b .quad 2b - .byte \feature /* when feature is set */ + .word \feature /* when feature is set */ .byte 5 .byte 5 .previous -- cgit v1.2.3-70-g09d2 From 3b770a2128423a687e6e9c57184a584fb4ba4c77 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 13 Jul 2010 14:57:50 -0700 Subject: x86, alternatives: BUG on encountering an invalid CPU feature number Make the alternatives-patching code BUG on encountering an invalid CPU feature number. Should have done this a long time ago. Signed-off-by: H. Peter Anvin Cc: Yinghai Lu Cc: Suresh Siddha LKML-Reference: --- arch/x86/kernel/alternative.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 70237732a6c..f65ab8b014c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -214,6 +214,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start, u8 *instr = a->instr; BUG_ON(a->replacementlen > a->instrlen); BUG_ON(a->instrlen > sizeof(insnbuf)); + BUG_ON(a->cpuid >= NCAPINTS*32); if (!boot_cpu_has(a->cpuid)) continue; #ifdef CONFIG_X86_64 -- cgit v1.2.3-70-g09d2