From 51c52e86694f19e84600a40f6156889feafd8ae9 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:36 +1000 Subject: powerpc: Split out do_feature_fixups() from cputable.c The logic to patch CPU feature sections lives in cputable.c, but these days it's used for CPU features as well as firmware features. Move it into it's own file for neatness and as preparation for some additions. While we're moving the code, we pull the loop body logic into a separate routine, and remove a comment which doesn't apply anymore. Signed-off-by: Michael Ellerman Acked-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/lib/feature-fixups.c | 56 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 arch/powerpc/lib/feature-fixups.c (limited to 'arch/powerpc/lib/feature-fixups.c') diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c new file mode 100644 index 00000000000..f6fd5d2ff10 --- /dev/null +++ b/arch/powerpc/lib/feature-fixups.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * Modifications for ppc64: + * Copyright (C) 2003 Dave Engebretsen + * + * Copyright 2008 Michael Ellerman, IBM Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include + + +struct fixup_entry { + unsigned long mask; + unsigned long value; + long start_off; + long end_off; +}; + +static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) +{ + unsigned int *pstart, *pend, *p; + + if ((value & fcur->mask) == fcur->value) + return; + + pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); + pend = ((unsigned int *)fcur) + (fcur->end_off / 4); + + for (p = pstart; p < pend; p++) { + *p = PPC_NOP_INSTR; + asm volatile ("dcbst 0, %0" : : "r" (p)); + } + asm volatile ("sync" : : : "memory"); + for (p = pstart; p < pend; p++) + asm volatile ("icbi 0,%0" : : "r" (p)); + asm volatile ("sync; isync" : : : "memory"); +} + +void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) +{ + struct fixup_entry *fcur, *fend; + + fcur = fixup_start; + fend = fixup_end; + + for (; fcur < fend; fcur++) + patch_feature_section(value, fcur); +} -- cgit v1.2.3-70-g09d2 From fac23fe4be23259a8eaa9bad822f5b14dd07d15c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:32:54 +1000 Subject: powerpc: Introduce infrastructure for feature sections with alternatives The current feature section logic only supports nop'ing out code, this means if you want to choose at runtime between instruction sequences, one or both cases will have to execute the nop'ed out contents of the other section, eg: BEGIN_FTR_SECTION or 1,1,1 END_FTR_SECTION_IFSET(FOO) BEGIN_FTR_SECTION or 2,2,2 END_FTR_SECTION_IFCLR(FOO) and the resulting code will be either, or 1,1,1 nop or, nop or 2,2,2 For small code segments this is fine, but for larger code blocks and in performance criticial code segments, it would be nice to avoid the nops. This commit starts to implement logic to allow the following: BEGIN_FTR_SECTION or 1,1,1 FTR_SECTION_ELSE or 2,2,2 ALT_FTR_SECTION_END_IFSET(FOO) and the resulting code will be: or 1,1,1 or, or 2,2,2 We achieve this by extending the existing FTR macros. The current feature section semantic just becomes a special case, ie. if the else case is empty we nop out the default case. The key limitation is that the size of the else case must be less than or equal to the size of the default case. If the else case is smaller the remainder of the section is nop'ed. We let the linker put the else case code in with the rest of the text, so that relative branches from the else case are more likley to link, this has the disadvantage that we can't free the unused else cases. This commit introduces the required macro and linker script changes, but does not enable the patching of the alternative sections. We also need to update two hand-made section entries in reg.h and timex.h Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/vdso32/vdso32.lds.S | 2 +- arch/powerpc/kernel/vdso64/vdso64.lds.S | 2 +- arch/powerpc/kernel/vmlinux.lds.S | 2 +- arch/powerpc/lib/feature-fixups.c | 2 + include/asm-powerpc/feature-fixups.h | 68 ++++++++++++++++++++++++++------- include/asm-powerpc/reg.h | 2 + include/asm-powerpc/timex.h | 2 + 7 files changed, 64 insertions(+), 16 deletions(-) (limited to 'arch/powerpc/lib/feature-fixups.c') diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 9352ab5200e..271793577cd 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -24,7 +24,7 @@ SECTIONS . = ALIGN(16); .text : { - *(.text .stub .text.* .gnu.linkonce.t.*) + *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) } PROVIDE(__etext = .); PROVIDE(_etext = .); diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index 7d6585f9027..e608d1bd3bf 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -24,7 +24,7 @@ SECTIONS . = ALIGN(16); .text : { - *(.text .stub .text.* .gnu.linkonce.t.*) + *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) *(.sfpr .glink) } :text PROVIDE(__etext = .); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0c3000bf8d7..3c07811989f 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -35,7 +35,7 @@ SECTIONS ALIGN_FUNCTION(); *(.text.head) _text = .; - *(.text .fixup .text.init.refok .exit.text.refok) + *(.text .fixup .text.init.refok .exit.text.refok __ftr_alt_*) SCHED_TEXT LOCK_TEXT KPROBES_TEXT diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index f6fd5d2ff10..973d547ef01 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -22,6 +22,8 @@ struct fixup_entry { unsigned long value; long start_off; long end_off; + long alt_start_off; + long alt_end_off; }; static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h index 35f927888fe..ab30129dced 100644 --- a/include/asm-powerpc/feature-fixups.h +++ b/include/asm-powerpc/feature-fixups.h @@ -29,24 +29,35 @@ #define FTR_ENTRY_OFFSET PPC_LONG #endif +#define START_FTR_SECTION(label) label##1: + +#define FTR_SECTION_ELSE_NESTED(label) \ +label##2: \ + .pushsection __ftr_alt_##label,"a"; \ + .align 2; \ +label##3: + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ -99: \ - .section sect,"a"; \ +label##4: \ + .popsection; \ + .pushsection sect,"a"; \ .align 3; \ -98: \ +label##5: \ FTR_ENTRY_LONG msk; \ FTR_ENTRY_LONG val; \ - FTR_ENTRY_OFFSET label##b-98b; \ - FTR_ENTRY_OFFSET 99b-98b; \ - .previous - + FTR_ENTRY_OFFSET label##1b-label##5b; \ + FTR_ENTRY_OFFSET label##2b-label##5b; \ + FTR_ENTRY_OFFSET label##3b-label##5b; \ + FTR_ENTRY_OFFSET label##4b-label##5b; \ + .popsection; /* CPU feature dependent sections */ -#define BEGIN_FTR_SECTION_NESTED(label) label: -#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) +#define BEGIN_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FTR_SECTION START_FTR_SECTION(97) -#define END_FTR_SECTION_NESTED(msk, val, label) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) #define END_FTR_SECTION(msk, val) \ @@ -55,12 +66,27 @@ #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) +/* CPU feature sections with alternatives, use BEGIN_FTR_SECTION to start */ +#define FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) +#define ALT_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FTR_SECTION_END(msk, val) \ + ALT_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FTR_SECTION_END_IFSET(msk) \ + ALT_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FTR_SECTION_END_IFCLR(msk) \ + ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97) /* Firmware feature dependent sections */ -#define BEGIN_FW_FTR_SECTION_NESTED(label) label: -#define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) +#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label) +#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97) -#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ +#define END_FW_FTR_SECTION_NESTED(msk, val, label) \ + FTR_SECTION_ELSE_NESTED(label) \ MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) #define END_FW_FTR_SECTION(msk, val) \ @@ -69,6 +95,22 @@ #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0) +/* Firmware feature sections with alternatives */ +#define FW_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label) +#define FW_FTR_SECTION_ELSE FTR_SECTION_ELSE_NESTED(97) +#define ALT_FW_FTR_SECTION_END_NESTED(msk, val, label) \ + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) +#define ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, msk, label) +#define ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, label) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, 0, label) +#define ALT_FW_FTR_SECTION_END(msk, val) \ + ALT_FW_FTR_SECTION_END_NESTED(msk, val, 97) +#define ALT_FW_FTR_SECTION_END_IFSET(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFSET(msk, 97) +#define ALT_FW_FTR_SECTION_END_IFCLR(msk) \ + ALT_FW_FTR_SECTION_END_NESTED_IFCLR(msk, 97) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 079999b032a..7256efb5c14 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -732,6 +732,8 @@ " .llong %1\n" \ " .llong 97b-98b\n" \ " .llong 99b-98b\n" \ + " .llong 0\n" \ + " .llong 0\n" \ ".previous" \ : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) #else diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index 92dedde761d..c55e14f7ef4 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h @@ -38,6 +38,8 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 97b-98b\n" " .long 99b-98b\n" + " .long 0\n" + " .long 0\n" ".previous" : "=r" (ret) : "i" (CPU_FTR_601)); return ret; -- cgit v1.2.3-70-g09d2 From 9b1a735de64cc975c31a1642ec55e082ddbdfeaf Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:33:02 +1000 Subject: powerpc: Add logic to patch alternative feature sections This commit adds the logic to patch alternative sections. This is fairly straightforward, except for branches. Relative branches that jump from inside the else section to outside of it need to be translated as they're moved, otherwise they will jump to the wrong location. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/lib/feature-fixups.c | 79 ++++++++++++++++++++++++++++++++------- 1 file changed, 65 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/lib/feature-fixups.c') diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 973d547ef01..174c1eba9ac 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -26,24 +26,66 @@ struct fixup_entry { long alt_end_off; }; -static void patch_feature_section(unsigned long value, struct fixup_entry *fcur) +static unsigned int *calc_addr(struct fixup_entry *fcur, long offset) { - unsigned int *pstart, *pend, *p; + /* + * We store the offset to the code as a negative offset from + * the start of the alt_entry, to support the VDSO. This + * routine converts that back into an actual address. + */ + return (unsigned int *)((unsigned long)fcur + offset); +} + +static int patch_alt_instruction(unsigned int *src, unsigned int *dest, + unsigned int *alt_start, unsigned int *alt_end) +{ + unsigned int instr; + + instr = *src; + + if (instr_is_relative_branch(*src)) { + unsigned int *target = (unsigned int *)branch_target(src); + + /* Branch within the section doesn't need translating */ + if (target < alt_start || target >= alt_end) { + instr = translate_branch(dest, src); + if (!instr) + return 1; + } + } + + patch_instruction(dest, instr); + + return 0; +} + +static int patch_feature_section(unsigned long value, struct fixup_entry *fcur) +{ + unsigned int *start, *end, *alt_start, *alt_end, *src, *dest; + + start = calc_addr(fcur, fcur->start_off); + end = calc_addr(fcur, fcur->end_off); + alt_start = calc_addr(fcur, fcur->alt_start_off); + alt_end = calc_addr(fcur, fcur->alt_end_off); + + if ((alt_end - alt_start) > (end - start)) + return 1; if ((value & fcur->mask) == fcur->value) - return; + return 0; - pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); - pend = ((unsigned int *)fcur) + (fcur->end_off / 4); + src = alt_start; + dest = start; - for (p = pstart; p < pend; p++) { - *p = PPC_NOP_INSTR; - asm volatile ("dcbst 0, %0" : : "r" (p)); + for (; src < alt_end; src++, dest++) { + if (patch_alt_instruction(src, dest, alt_start, alt_end)) + return 1; } - asm volatile ("sync" : : : "memory"); - for (p = pstart; p < pend; p++) - asm volatile ("icbi 0,%0" : : "r" (p)); - asm volatile ("sync; isync" : : : "memory"); + + for (; dest < end; dest++) + patch_instruction(dest, PPC_NOP_INSTR); + + return 0; } void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) @@ -53,6 +95,15 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) fcur = fixup_start; fend = fixup_end; - for (; fcur < fend; fcur++) - patch_feature_section(value, fcur); + for (; fcur < fend; fcur++) { + if (patch_feature_section(value, fcur)) { + __WARN(); + printk("Unable to patch feature section at %p - %p" \ + " with %p - %p\n", + calc_addr(fcur, fcur->start_off), + calc_addr(fcur, fcur->end_off), + calc_addr(fcur, fcur->alt_start_off), + calc_addr(fcur, fcur->alt_end_off)); + } + } } -- cgit v1.2.3-70-g09d2 From 362e7701fd183d990e0863883461edef61392710 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 24 Jun 2008 11:33:03 +1000 Subject: powerpc: Add self-tests of the feature fixup code This commit adds tests of the feature fixup code, they are run during boot if CONFIG_FTR_FIXUP_SELFTEST=y. Some of the tests manually invoke the patching routines to check their behaviour, and others use the macros and so are patched during the normal patching done during boot. Because we have two sets of macros with different names, we use a macro to generate the test of the macros, very niiiice. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/Kconfig.debug | 5 + arch/powerpc/lib/Makefile | 1 + arch/powerpc/lib/feature-fixups-test.S | 727 +++++++++++++++++++++++++++++++++ arch/powerpc/lib/feature-fixups.c | 206 ++++++++++ 4 files changed, 939 insertions(+) create mode 100644 arch/powerpc/lib/feature-fixups-test.S (limited to 'arch/powerpc/lib/feature-fixups.c') diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index dc589390336..2840ab69ef4 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -62,6 +62,11 @@ config CODE_PATCHING_SELFTEST depends on DEBUG_KERNEL default n +config FTR_FIXUP_SELFTEST + bool "Run self-tests of the feature-fixup code." + depends on DEBUG_KERNEL + default n + choice prompt "Serial Port" depends on KGDB diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index fc52771f0cd..2a88e8b9a3c 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-y += code-patching.o obj-y += feature-fixups.o +obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S new file mode 100644 index 00000000000..10d038b501a --- /dev/null +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -0,0 +1,727 @@ +/* + * Copyright 2008 Michael Ellerman, IBM Corporation. + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + + .text + +#define globl(x) \ + .globl x; \ +x: + +globl(ftr_fixup_test1) + or 1,1,1 + or 2,2,2 /* fixup will nop out this instruction */ + or 3,3,3 + +globl(end_ftr_fixup_test1) + +globl(ftr_fixup_test1_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test1_expected) + or 1,1,1 + nop + or 3,3,3 + +globl(ftr_fixup_test2) + or 1,1,1 + or 2,2,2 /* fixup will replace this with ftr_fixup_test2_alt */ + or 3,3,3 + +globl(end_ftr_fixup_test2) + +globl(ftr_fixup_test2_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test2_alt) + or 31,31,31 + +globl(ftr_fixup_test2_expected) + or 1,1,1 + or 31,31,31 + or 3,3,3 + +globl(ftr_fixup_test3) + or 1,1,1 + or 2,2,2 /* fixup will fail to replace this */ + or 3,3,3 + +globl(end_ftr_fixup_test3) + +globl(ftr_fixup_test3_orig) + or 1,1,1 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test3_alt) + or 31,31,31 + or 31,31,31 + +globl(ftr_fixup_test4) + or 1,1,1 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 3,3,3 + +globl(end_ftr_fixup_test4) + +globl(ftr_fixup_test4_expected) + or 1,1,1 + or 31,31,31 + or 31,31,31 + nop + nop + or 3,3,3 + +globl(ftr_fixup_test4_orig) + or 1,1,1 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 3,3,3 + +globl(ftr_fixup_test4_alt) + or 31,31,31 + or 31,31,31 + + +globl(ftr_fixup_test5) + or 1,1,1 +BEGIN_FTR_SECTION + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 + or 2,2,2 +FTR_SECTION_ELSE +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b +1: bdnz 3b +ALT_FTR_SECTION_END(0, 1) + or 1,1,1 + +globl(end_ftr_fixup_test5) + +globl(ftr_fixup_test5_expected) + or 1,1,1 +2: b 3f +3: or 5,5,5 + beq 3b + b 1f + or 6,6,6 + b 2b +1: bdnz 3b + or 1,1,1 + +globl(ftr_fixup_test6) +1: or 1,1,1 +BEGIN_FTR_SECTION + or 5,5,5 +2: cmpdi r3,0 + beq 4f + blt 2b + b 1b + b 4f +FTR_SECTION_ELSE +2: or 2,2,2 + cmpdi r3,1 + beq 3f + blt 2b + b 3f + b 1b +ALT_FTR_SECTION_END(0, 1) +3: or 1,1,1 + or 2,2,2 +4: or 3,3,3 + +globl(end_ftr_fixup_test6) + +globl(ftr_fixup_test6_expected) +1: or 1,1,1 +2: or 2,2,2 + cmpdi r3,1 + beq 3f + blt 2b + b 3f + b 1b +2: or 1,1,1 + or 2,2,2 +3: or 3,3,3 + + +#define MAKE_MACRO_TEST(TYPE) \ +globl(ftr_fixup_test_ ##TYPE##_macros) \ + or 1,1,1; \ + /* Basic test, this section should all be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic test, this section should NOT be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, inner section should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, whole section should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 0, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, none should be nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(80) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 0, 80) \ + or 2,2,2; \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, default case should be taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, else case should be taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ + or 31,31,31; \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt with smaller else case, should be padded with nops */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in default case */ \ + /* Default case should be taken, with nop'ed inner section */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 2,2,2; \ + or 2,2,2; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, else taken & nop */ \ +BEGIN_##TYPE##_SECTION \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +##TYPE##_SECTION_ELSE \ + or 5,5,5; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 3,3,3; \ +END_##TYPE##_SECTION_NESTED(0, 1, 95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, all nop'ed */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +END_##TYPE##_SECTION(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 0) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner default taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner else taken */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ +BEGIN_##TYPE##_SECTION_NESTED(95) \ + or 1,1,1; \ +##TYPE##_SECTION_ELSE_NESTED(95) \ + or 5,5,5; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ + or 31,31,31; \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ + or 31,31,31; \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else can have large else case */ \ +BEGIN_##TYPE##_SECTION \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +##TYPE##_SECTION_ELSE \ +BEGIN_##TYPE##_SECTION_NESTED(94) \ + or 5,5,5; \ + or 5,5,5; \ + or 5,5,5; \ + or 5,5,5; \ +##TYPE##_SECTION_ELSE_NESTED(94) \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ +ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) \ +ALT_##TYPE##_SECTION_END(0, 1) \ + or 1,1,1; \ + or 1,1,1; + +#define MAKE_MACRO_TEST_EXPECTED(TYPE) \ +globl(ftr_fixup_test_ ##TYPE##_macros_expected) \ + or 1,1,1; \ + /* Basic test, this section should all be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic test, this section should NOT be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, inner section should be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 80) */ \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, whole section should be nop'ed */ \ + /* NB. inner section is not nop'ed, but then entire outer is */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ + nop; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nesting test, none should be nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(80) */ \ + or 3,3,3; \ + or 3,3,3; \ +/* END_##TYPE##_SECTION_NESTED(0, 0, 80) */ \ + or 2,2,2; \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, default case should be taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Basic alt section test, else case should be taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ + or 31,31,31; \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt with smaller else case, should be padded with nops */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ + nop; \ + nop; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in default case */ \ + /* Default case should be taken, with nop'ed inner section */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 3,3,3; \ + or 3,3,3; \ + or 3,3,3; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 5,5,5; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 3,3,3; */ \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Alt section with nested section in else, else taken & nop */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ + /* or 3,3,3; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 5,5,5; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ +/* END_##TYPE##_SECTION_NESTED(0, 1, 95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + or 1,1,1; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + or 2,2,2; \ +/* END_##TYPE##_SECTION(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Feature section with nested alt section, all nop'ed */ \ +/* BEGIN_##TYPE##_SECTION */ \ + nop; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + nop; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + nop; \ +/* END_##TYPE##_SECTION(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + or 1,1,1; \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 95) */ \ + or 2,2,2; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 31,31,31; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + /* or 31,31,31; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, default with inner else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + or 2,2,2; \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + or 5,5,5; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + or 2,2,2; \ +/* ##TYPE##_SECTION_ELSE */ \ + /* or 31,31,31; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + /* or 31,31,31; */ \ +/* ALT_##TYPE##_SECTION_END(0, 0) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner default taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + or 5,5,5; \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + /* or 1,1,1; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 0, 94) */ \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else with inner else taken */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(95) */ \ + /* or 1,1,1; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(95) */ \ + /* or 5,5,5; */ \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 95) */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ + or 31,31,31; \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + or 1,1,1; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ + or 31,31,31; \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; \ + /* Nested alt sections, else can have large else case */ \ +/* BEGIN_##TYPE##_SECTION */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ + /* or 2,2,2; */ \ +/* ##TYPE##_SECTION_ELSE */ \ +/* BEGIN_##TYPE##_SECTION_NESTED(94) */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ + /* or 5,5,5; */ \ +/* ##TYPE##_SECTION_ELSE_NESTED(94) */ \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ + or 1,1,1; \ +/* ALT_##TYPE##_SECTION_END_NESTED(0, 1, 94) */ \ +/* ALT_##TYPE##_SECTION_END(0, 1) */ \ + or 1,1,1; \ + or 1,1,1; + +MAKE_MACRO_TEST(FTR); +MAKE_MACRO_TEST_EXPECTED(FTR); + +#ifdef CONFIG_PPC64 +MAKE_MACRO_TEST(FW_FTR); +MAKE_MACRO_TEST_EXPECTED(FW_FTR); +#endif diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 174c1eba9ac..48e1ed89052 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -13,6 +13,8 @@ */ #include +#include +#include #include #include @@ -107,3 +109,207 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } } + +#ifdef CONFIG_FTR_FIXUP_SELFTEST + +#define check(x) \ + if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__); + +/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */ +static struct fixup_entry fixup; + +static long calc_offset(struct fixup_entry *entry, unsigned int *p) +{ + return (unsigned long)p - (unsigned long)entry; +} + +void test_basic_patching(void) +{ + extern unsigned int ftr_fixup_test1; + extern unsigned int end_ftr_fixup_test1; + extern unsigned int ftr_fixup_test1_orig; + extern unsigned int ftr_fixup_test1_expected; + int size = &end_ftr_fixup_test1 - &ftr_fixup_test1; + + fixup.value = fixup.mask = 8; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2); + fixup.alt_start_off = fixup.alt_end_off = 0; + + /* Sanity check */ + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(8, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + patch_feature_section(~8, &fixup); + check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); +} + +static void test_alternative_patching(void) +{ + extern unsigned int ftr_fixup_test2; + extern unsigned int end_ftr_fixup_test2; + extern unsigned int ftr_fixup_test2_orig; + extern unsigned int ftr_fixup_test2_alt; + extern unsigned int ftr_fixup_test2_expected; + int size = &end_ftr_fixup_test2 - &ftr_fixup_test2; + + fixup.value = fixup.mask = 0xF; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(0xF, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + patch_feature_section(~0xF, &fixup); + check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); +} + +static void test_alternative_case_too_big(void) +{ + extern unsigned int ftr_fixup_test3; + extern unsigned int end_ftr_fixup_test3; + extern unsigned int ftr_fixup_test3_orig; + extern unsigned int ftr_fixup_test3_alt; + int size = &end_ftr_fixup_test3 - &ftr_fixup_test3; + + fixup.value = fixup.mask = 0xC; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + + /* Expect nothing to be patched, and the error returned to us */ + check(patch_feature_section(0xF, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(patch_feature_section(0, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(patch_feature_section(~0xF, &fixup) == 1); + check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); +} + +static void test_alternative_case_too_small(void) +{ + extern unsigned int ftr_fixup_test4; + extern unsigned int end_ftr_fixup_test4; + extern unsigned int ftr_fixup_test4_orig; + extern unsigned int ftr_fixup_test4_alt; + extern unsigned int ftr_fixup_test4_expected; + int size = &end_ftr_fixup_test4 - &ftr_fixup_test4; + unsigned long flag; + + /* Check a high-bit flag */ + flag = 1UL << ((sizeof(unsigned long) - 1) * 8); + fixup.value = fixup.mask = flag; + fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1); + fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5); + fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt); + fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2); + + /* Sanity check */ + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + + /* Check we don't patch if the value matches */ + patch_feature_section(flag, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + + /* Check we do patch if the value doesn't match */ + patch_feature_section(0, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + + /* Check we do patch if the mask doesn't match */ + memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + patch_feature_section(~flag, &fixup); + check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); +} + +static void test_alternative_case_with_branch(void) +{ + extern unsigned int ftr_fixup_test5; + extern unsigned int end_ftr_fixup_test5; + extern unsigned int ftr_fixup_test5_expected; + int size = &end_ftr_fixup_test5 - &ftr_fixup_test5; + + check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0); +} + +static void test_alternative_case_with_external_branch(void) +{ + extern unsigned int ftr_fixup_test6; + extern unsigned int end_ftr_fixup_test6; + extern unsigned int ftr_fixup_test6_expected; + int size = &end_ftr_fixup_test6 - &ftr_fixup_test6; + + check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0); +} + +static void test_cpu_macros(void) +{ + extern void ftr_fixup_test_FTR_macros; + extern void ftr_fixup_test_FTR_macros_expected; + unsigned long size = &ftr_fixup_test_FTR_macros_expected - + &ftr_fixup_test_FTR_macros; + + /* The fixups have already been done for us during boot */ + check(memcmp(&ftr_fixup_test_FTR_macros, + &ftr_fixup_test_FTR_macros_expected, size) == 0); +} + +static void test_fw_macros(void) +{ +#ifdef CONFIG_PPC64 + extern void ftr_fixup_test_FW_FTR_macros; + extern void ftr_fixup_test_FW_FTR_macros_expected; + unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected - + &ftr_fixup_test_FW_FTR_macros; + + /* The fixups have already been done for us during boot */ + check(memcmp(&ftr_fixup_test_FW_FTR_macros, + &ftr_fixup_test_FW_FTR_macros_expected, size) == 0); +#endif +} + +static int __init test_feature_fixups(void) +{ + printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); + + test_basic_patching(); + test_alternative_patching(); + test_alternative_case_too_big(); + test_alternative_case_too_small(); + test_alternative_case_with_branch(); + test_alternative_case_with_external_branch(); + test_cpu_macros(); + test_fw_macros(); + + return 0; +} +late_initcall(test_feature_fixups); + +#endif /* CONFIG_FTR_FIXUP_SELFTEST */ -- cgit v1.2.3-70-g09d2 From 2d1b2027626d5151fff8ef7c06ca8e7876a1a510 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 2 Jul 2008 01:16:40 +1000 Subject: powerpc: Fixup lwsync at runtime To allow for a single kernel image on e500 v1/v2/mc we need to fixup lwsync at runtime. On e500v1/v2 lwsync causes an illop so we need to patch up the code. We default to 'sync' since that is always safe and if the cpu is capable we will replace 'sync' with 'lwsync'. We introduce CPU_FTR_LWSYNC as a way to determine at runtime if this is needed. This flag could be moved elsewhere since we dont really use it for the normal CPU_FTR purpose. Finally we only store the relative offset in the fixup section to keep it as small as possible rather than using a full fixup_entry. Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/module.c | 6 ++++++ arch/powerpc/kernel/setup_32.c | 4 ++++ arch/powerpc/kernel/setup_64.c | 2 ++ arch/powerpc/kernel/vdso.c | 10 +++++++++ arch/powerpc/kernel/vdso32/vdso32.lds.S | 3 +++ arch/powerpc/kernel/vdso64/vdso64.lds.S | 3 +++ arch/powerpc/kernel/vmlinux.lds.S | 6 ++++++ arch/powerpc/lib/feature-fixups-test.S | 15 +++++++++++++ arch/powerpc/lib/feature-fixups.c | 36 +++++++++++++++++++++++++++++++ include/asm-powerpc/code-patching.h | 3 ++- include/asm-powerpc/cputable.h | 21 +++++++++--------- include/asm-powerpc/feature-fixups.h | 10 +++++++++ include/asm-powerpc/synch.h | 38 ++++++++++++++++++++------------- 13 files changed, 131 insertions(+), 26 deletions(-) (limited to 'arch/powerpc/lib/feature-fixups.c') diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 40dd52d81c1..af07003573c 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -86,6 +86,12 @@ int module_finalize(const Elf_Ehdr *hdr, (void *)sect->sh_addr + sect->sh_size); #endif + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); + if (sect != NULL) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); + return 0; } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 9e83add5429..0109e7f0ccf 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -101,6 +101,10 @@ unsigned long __init early_init(unsigned long dt_ptr) PTRRELOC(&__start___ftr_fixup), PTRRELOC(&__stop___ftr_fixup)); + do_lwsync_fixups(spec->cpu_features, + PTRRELOC(&__start___lwsync_fixup), + PTRRELOC(&__stop___lwsync_fixup)); + return KERNELBASE + offset; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 098fd96a394..04d8de9f0fc 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -363,6 +363,8 @@ void __init setup_system(void) &__start___ftr_fixup, &__stop___ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); + do_lwsync_fixups(cur_cpu_spec->cpu_features, + &__start___lwsync_fixup, &__stop___lwsync_fixup); /* * Unflatten the device-tree passed by prom_init or kexec diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index ce245a850db..f177c60ea76 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -571,6 +571,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, if (start64) do_feature_fixups(powerpc_firmware_features, start64, start64 + size64); + + start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64); + if (start64) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + start64, start64 + size64); #endif /* CONFIG_PPC64 */ start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); @@ -585,6 +590,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32, start32, start32 + size32); #endif /* CONFIG_PPC64 */ + start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32); + if (start32) + do_lwsync_fixups(cur_cpu_spec->cpu_features, + start32, start32 + size32); + return 0; } diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S index 271793577cd..be3b6a41dc0 100644 --- a/arch/powerpc/kernel/vdso32/vdso32.lds.S +++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S @@ -33,6 +33,9 @@ SECTIONS . = ALIGN(8); __ftr_fixup : { *(__ftr_fixup) } + . = ALIGN(8); + __lwsync_fixup : { *(__lwsync_fixup) } + #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : { *(__fw_ftr_fixup) } diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S index e608d1bd3bf..d0b2526dd38 100644 --- a/arch/powerpc/kernel/vdso64/vdso64.lds.S +++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S @@ -34,6 +34,9 @@ SECTIONS . = ALIGN(8); __ftr_fixup : { *(__ftr_fixup) } + . = ALIGN(8); + __lwsync_fixup : { *(__lwsync_fixup) } + . = ALIGN(8); __fw_ftr_fixup : { *(__fw_ftr_fixup) } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 3c07811989f..6856f6c1572 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -127,6 +127,12 @@ SECTIONS *(__ftr_fixup) __stop___ftr_fixup = .; } + . = ALIGN(8); + __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { + __start___lwsync_fixup = .; + *(__lwsync_fixup) + __stop___lwsync_fixup = .; + } #ifdef CONFIG_PPC64 . = ALIGN(8); __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S index 0549be04399..cb737484c5a 100644 --- a/arch/powerpc/lib/feature-fixups-test.S +++ b/arch/powerpc/lib/feature-fixups-test.S @@ -10,6 +10,7 @@ #include #include +#include .text @@ -725,3 +726,17 @@ MAKE_MACRO_TEST_EXPECTED(FTR); MAKE_MACRO_TEST(FW_FTR); MAKE_MACRO_TEST_EXPECTED(FW_FTR); #endif + +globl(lwsync_fixup_test) +1: or 1,1,1 + LWSYNC +globl(end_lwsync_fixup_test) + +globl(lwsync_fixup_test_expected_LWSYNC) +1: or 1,1,1 + lwsync + +globl(lwsync_fixup_test_expected_SYNC) +1: or 1,1,1 + sync + diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 48e1ed89052..4e43702b981 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -110,6 +110,22 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } +void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) +{ + unsigned int *start, *end, *dest; + + if (!(value & CPU_FTR_LWSYNC)) + return ; + + start = fixup_start; + end = fixup_end; + + for (; start < end; start++) { + dest = (void *)start + *start; + patch_instruction(dest, PPC_LWSYNC_INSTR); + } +} + #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ @@ -295,6 +311,25 @@ static void test_fw_macros(void) #endif } +static void test_lwsync_macros(void) +{ + extern void lwsync_fixup_test; + extern void end_lwsync_fixup_test; + extern void lwsync_fixup_test_expected_LWSYNC; + extern void lwsync_fixup_test_expected_SYNC; + unsigned long size = &end_lwsync_fixup_test - + &lwsync_fixup_test; + + /* The fixups have already been done for us during boot */ + if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { + check(memcmp(&lwsync_fixup_test, + &lwsync_fixup_test_expected_LWSYNC, size) == 0); + } else { + check(memcmp(&lwsync_fixup_test, + &lwsync_fixup_test_expected_SYNC, size) == 0); + } +} + static int __init test_feature_fixups(void) { printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); @@ -307,6 +342,7 @@ static int __init test_feature_fixups(void) test_alternative_case_with_external_branch(); test_cpu_macros(); test_fw_macros(); + test_lwsync_macros(); return 0; } diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h index ef3a5d156db..107d9b915e3 100644 --- a/include/asm-powerpc/code-patching.h +++ b/include/asm-powerpc/code-patching.h @@ -12,7 +12,8 @@ #include -#define PPC_NOP_INSTR 0x60000000 +#define PPC_NOP_INSTR 0x60000000 +#define PPC_LWSYNC_INSTR 0x7c2004ac /* Flags for create_branch: * "b" == create_branch(addr, target, 0); diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 4e4491cb9d3..3171ac904b9 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -156,6 +156,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) +#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) /* * Add the 64-bit processor unique features in the top half of the word; @@ -369,43 +370,43 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_NODSISRALIGN) #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ - CPU_FTR_L2CSR) + CPU_FTR_L2CSR | CPU_FTR_LWSYNC) #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) /* 64-bit CPUs */ -#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) -#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ CPU_FTR_MMCRA | CPU_FTR_CTRL) -#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA) -#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) -#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR) -#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR) -#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \ +#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ CPU_FTR_DSCR) -#define CPU_FTRS_CELL (CPU_FTR_USE_TB | \ +#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) -#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \ +#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h index ab30129dced..a1029967620 100644 --- a/include/asm-powerpc/feature-fixups.h +++ b/include/asm-powerpc/feature-fixups.h @@ -113,4 +113,14 @@ label##5: \ #endif /* __ASSEMBLY__ */ +/* LWSYNC feature sections */ +#define START_LWSYNC_SECTION(label) label##1: +#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \ +label##2: \ + .pushsection sect,"a"; \ + .align 2; \ +label##3: \ + .long label##1b-label##3b; \ + .popsection; + #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index 42a1ef59069..45963e80f55 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h @@ -3,34 +3,42 @@ #ifdef __KERNEL__ #include +#include -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) -#define __SUBARCH_HAS_LWSYNC -#endif +#ifndef __ASSEMBLY__ +extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; +extern void do_lwsync_fixups(unsigned long value, void *fixup_start, + void *fixup_end); + +static inline void eieio(void) +{ + __asm__ __volatile__ ("eieio" : : : "memory"); +} + +static inline void isync(void) +{ + __asm__ __volatile__ ("isync" : : : "memory"); +} +#endif /* __ASSEMBLY__ */ -#ifdef __SUBARCH_HAS_LWSYNC +#if defined(__powerpc64__) # define LWSYNC lwsync +#elif defined(CONFIG_E500) +# define LWSYNC \ + START_LWSYNC_SECTION(96); \ + sync; \ + MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup); #else # define LWSYNC sync #endif #ifdef CONFIG_SMP #define ISYNC_ON_SMP "\n\tisync\n" -#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" +#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" #else #define ISYNC_ON_SMP #define LWSYNC_ON_SMP #endif -static inline void eieio(void) -{ - __asm__ __volatile__ ("eieio" : : : "memory"); -} - -static inline void isync(void) -{ - __asm__ __volatile__ ("isync" : : : "memory"); -} - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYNCH_H */ -- cgit v1.2.3-70-g09d2