diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 08:16:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-05 08:16:29 -0700 |
commit | 046f153343e33dcad1be7f6249ea6ff1c6fd9b58 (patch) | |
tree | 46da03ea84e1a4fe8eff53891332e715cbf31f3e /arch/x86/include | |
parent | a0abcf2e8f8017051830f738ac1bf5ef42703243 (diff) | |
parent | e33655a386ed3b26ad36fb97a47ebb1c2ca1e928 (diff) |
Merge branch 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 EFI updates from Peter Anvin:
"A collection of EFI changes. The perhaps most important one is to
fully save and restore the FPU state around each invocation of EFI
runtime, and to not choke on non-ASCII characters in the boot stub"
* 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
efivars: Add compatibility code for compat tasks
efivars: Refactor sanity checking code into separate function
efivars: Stop passing a struct argument to efivar_validate()
efivars: Check size of user object
efivars: Use local variables instead of a pointer dereference
x86/efi: Save and restore FPU context around efi_calls (i386)
x86/efi: Save and restore FPU context around efi_calls (x86_64)
x86/efi: Implement a __efi_call_virt macro
x86, fpu: Extend the use of static_cpu_has_safe
x86/efi: Delete most of the efi_call* macros
efi: x86: Handle arbitrary Unicode characters
efi: Add get_dram_base() helper function
efi: Add shared printk wrapper for consistent prefixing
efi: create memory map iteration helper
efi: efi-stub-helper cleanup
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/efi.h | 100 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 10 |
2 files changed, 39 insertions, 71 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 0869434eaf7..1eb5f6433ad 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_EFI_H #define _ASM_X86_EFI_H +#include <asm/i387.h> /* * We map the EFI regions needed for runtime services non-contiguously, * with preserved alignment on virtual addresses starting from -4G down @@ -27,91 +28,58 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); -#define efi_call_phys0(f) efi_call_phys(f) -#define efi_call_phys1(f, a1) efi_call_phys(f, a1) -#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2) -#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3) -#define efi_call_phys4(f, a1, a2, a3, a4) \ - efi_call_phys(f, a1, a2, a3, a4) -#define efi_call_phys5(f, a1, a2, a3, a4, a5) \ - efi_call_phys(f, a1, a2, a3, a4, a5) -#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ - efi_call_phys(f, a1, a2, a3, a4, a5, a6) /* * Wrap all the virtual calls in a way that forces the parameters on the stack. */ +/* Use this macro if your virtual returns a non-void value */ #define efi_call_virt(f, args...) \ - ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) - -#define efi_call_virt0(f) efi_call_virt(f) -#define efi_call_virt1(f, a1) efi_call_virt(f, a1) -#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2) -#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3) -#define efi_call_virt4(f, a1, a2, a3, a4) \ - efi_call_virt(f, a1, a2, a3, a4) -#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - efi_call_virt(f, a1, a2, a3, a4, a5) -#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - efi_call_virt(f, a1, a2, a3, a4, a5, a6) +({ \ + efi_status_t __s; \ + kernel_fpu_begin(); \ + __s = ((efi_##f##_t __attribute__((regparm(0)))*) \ + efi.systab->runtime->f)(args); \ + kernel_fpu_end(); \ + __s; \ +}) + +/* Use this macro if your virtual call does not return any value */ +#define __efi_call_virt(f, args...) \ +({ \ + kernel_fpu_begin(); \ + ((efi_##f##_t __attribute__((regparm(0)))*) \ + efi.systab->runtime->f)(args); \ + kernel_fpu_end(); \ +}) #define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) #else /* !CONFIG_X86_32 */ -extern u64 efi_call0(void *fp); -extern u64 efi_call1(void *fp, u64 arg1); -extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); -extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3); -extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4); -extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3, - u64 arg4, u64 arg5); -extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, - u64 arg4, u64 arg5, u64 arg6); - -#define efi_call_phys0(f) \ - efi_call0((f)) -#define efi_call_phys1(f, a1) \ - efi_call1((f), (u64)(a1)) -#define efi_call_phys2(f, a1, a2) \ - efi_call2((f), (u64)(a1), (u64)(a2)) -#define efi_call_phys3(f, a1, a2, a3) \ - efi_call3((f), (u64)(a1), (u64)(a2), (u64)(a3)) -#define efi_call_phys4(f, a1, a2, a3, a4) \ - efi_call4((f), (u64)(a1), (u64)(a2), (u64)(a3), \ - (u64)(a4)) -#define efi_call_phys5(f, a1, a2, a3, a4, a5) \ - efi_call5((f), (u64)(a1), (u64)(a2), (u64)(a3), \ - (u64)(a4), (u64)(a5)) -#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \ - efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \ - (u64)(a4), (u64)(a5), (u64)(a6)) - -#define _efi_call_virtX(x, f, ...) \ +#define EFI_LOADER_SIGNATURE "EL64" + +extern u64 asmlinkage efi_call(void *fp, ...); + +#define efi_call_phys(f, args...) efi_call((f), args) + +#define efi_call_virt(f, ...) \ ({ \ efi_status_t __s; \ \ efi_sync_low_kernel_mappings(); \ preempt_disable(); \ - __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ + __kernel_fpu_begin(); \ + __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ + __kernel_fpu_end(); \ preempt_enable(); \ __s; \ }) -#define efi_call_virt0(f) \ - _efi_call_virtX(0, f) -#define efi_call_virt1(f, a1) \ - _efi_call_virtX(1, f, (u64)(a1)) -#define efi_call_virt2(f, a1, a2) \ - _efi_call_virtX(2, f, (u64)(a1), (u64)(a2)) -#define efi_call_virt3(f, a1, a2, a3) \ - _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3)) -#define efi_call_virt4(f, a1, a2, a3, a4) \ - _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4)) -#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ - _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5)) -#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ - _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) +/* + * All X86_64 virt calls return non-void values. Thus, use non-void call for + * virt calls that would be void on X86_32. + */ +#define __efi_call_virt(f, args...) efi_call_virt(f, args) extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index cea1c76d49b..115e3689cd5 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -87,22 +87,22 @@ static inline int is_x32_frame(void) static __always_inline __pure bool use_eager_fpu(void) { - return static_cpu_has(X86_FEATURE_EAGER_FPU); + return static_cpu_has_safe(X86_FEATURE_EAGER_FPU); } static __always_inline __pure bool use_xsaveopt(void) { - return static_cpu_has(X86_FEATURE_XSAVEOPT); + return static_cpu_has_safe(X86_FEATURE_XSAVEOPT); } static __always_inline __pure bool use_xsave(void) { - return static_cpu_has(X86_FEATURE_XSAVE); + return static_cpu_has_safe(X86_FEATURE_XSAVE); } static __always_inline __pure bool use_fxsr(void) { - return static_cpu_has(X86_FEATURE_FXSR); + return static_cpu_has_safe(X86_FEATURE_FXSR); } static inline void fx_finit(struct i387_fxsave_struct *fx) @@ -293,7 +293,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk) /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. "m" is a random variable that should be in L1 */ - if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { + if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) { asm volatile( "fnclex\n\t" "emms\n\t" |