diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-23 22:37:24 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-23 22:37:24 +0200 |
commit | be7baf80a699644850ff27c2105c171177ece4ea (patch) | |
tree | 7dcdee42a6752eae6039cfca7f8c84c87136275e /include/asm-x86 | |
parent | 297a99e1a377f68e5c5bfef8eeafbd115f9fc2fa (diff) |
x86: merge msr_32/64.h
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/Kbuild | 2 | ||||
-rw-r--r-- | include/asm-x86/msr.h | 357 | ||||
-rw-r--r-- | include/asm-x86/msr_32.h | 161 | ||||
-rw-r--r-- | include/asm-x86/msr_64.h | 187 |
4 files changed, 347 insertions, 360 deletions
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index f8592607bed..5e4def7007e 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild @@ -14,8 +14,6 @@ header-y += vsyscall32.h unifdef-y += e820.h unifdef-y += ist.h unifdef-y += mce.h -unifdef-y += msr_32.h -unifdef-y += msr_64.h unifdef-y += msr.h unifdef-y += mtrr.h unifdef-y += page_32.h diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 2f87ce00700..ba4b3143212 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -1,13 +1,350 @@ +#ifndef __ASM_X86_MSR_H_ +#define __ASM_X86_MSR_H_ + +#include <asm/msr-index.h> + +#ifdef __i386__ + #ifdef __KERNEL__ -# ifdef CONFIG_X86_32 -# include "msr_32.h" -# else -# include "msr_64.h" -# endif +#ifndef __ASSEMBLY__ + +#include <asm/errno.h> + +static inline unsigned long long native_read_msr(unsigned int msr) +{ + unsigned long long val; + + asm volatile("rdmsr" : "=A" (val) : "c" (msr)); + return val; +} + +static inline unsigned long long native_read_msr_safe(unsigned int msr, + int *err) +{ + unsigned long long val; + + asm volatile("2: rdmsr ; xorl %0,%0\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: movl %3,%0 ; jmp 1b\n\t" + ".previous\n\t" + ".section __ex_table,\"a\"\n" + " .align 4\n\t" + " .long 2b,3b\n\t" + ".previous" + : "=r" (*err), "=A" (val) + : "c" (msr), "i" (-EFAULT)); + + return val; +} + +static inline void native_write_msr(unsigned int msr, unsigned long long val) +{ + asm volatile("wrmsr" : : "c" (msr), "A"(val)); +} + +static inline int native_write_msr_safe(unsigned int msr, + unsigned long long val) +{ + int err; + asm volatile("2: wrmsr ; xorl %0,%0\n" + "1:\n\t" + ".section .fixup,\"ax\"\n\t" + "3: movl %4,%0 ; jmp 1b\n\t" + ".previous\n\t" + ".section __ex_table,\"a\"\n" + " .align 4\n\t" + " .long 2b,3b\n\t" + ".previous" + : "=a" (err) + : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), + "i" (-EFAULT)); + return err; +} + +static inline unsigned long long native_read_tsc(void) +{ + unsigned long long val; + asm volatile("rdtsc" : "=A" (val)); + return val; +} + +static inline unsigned long long native_read_pmc(void) +{ + unsigned long long val; + asm volatile("rdpmc" : "=A" (val)); + return val; +} + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> #else -# ifdef __i386__ -# include "msr_32.h" -# else -# include "msr_64.h" -# endif +#include <linux/errno.h> +/* + * Access to machine-specific registers (available on 586 and better only) + * Note: the rd* operations modify the parameters directly (without using + * pointer indirection), this allows gcc to optimize better + */ + +#define rdmsr(msr,val1,val2) \ + do { \ + u64 __val = native_read_msr(msr); \ + (val1) = (u32)__val; \ + (val2) = (u32)(__val >> 32); \ + } while(0) + +static inline void wrmsr(u32 __msr, u32 __low, u32 __high) +{ + native_write_msr(__msr, ((u64)__high << 32) | __low); +} + +#define rdmsrl(msr,val) \ + ((val) = native_read_msr(msr)) + +#define wrmsrl(msr,val) native_write_msr(msr, val) + +/* wrmsr with exception handling */ +static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) +{ + return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); +} + +/* rdmsr with exception handling */ +#define rdmsr_safe(msr,p1,p2) \ + ({ \ + int __err; \ + u64 __val = native_read_msr_safe(msr, &__err); \ + (*p1) = (u32)__val; \ + (*p2) = (u32)(__val >> 32); \ + __err; \ + }) + +#define rdtscl(low) \ + ((low) = (u32)native_read_tsc()) + +#define rdtscll(val) \ + ((val) = native_read_tsc()) + +#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) + +#define rdpmc(counter,low,high) \ + do { \ + u64 _l = native_read_pmc(); \ + (low) = (u32)_l; \ + (high) = (u32)(_l >> 32); \ + } while(0) +#endif /* !CONFIG_PARAVIRT */ + +#ifdef CONFIG_SMP +void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +#else /* CONFIG_SMP */ +static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +{ + rdmsr(msr_no, *l, *h); +} +static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +{ + wrmsr(msr_no, l, h); +} +static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +{ + return rdmsr_safe(msr_no, l, h); +} +static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +{ + return wrmsr_safe(msr_no, l, h); +} +#endif /* CONFIG_SMP */ +#endif /* ! __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#else /* __i386__ */ + +#ifndef __ASSEMBLY__ +#include <linux/errno.h> +/* + * Access to machine-specific registers (available on 586 and better only) + * Note: the rd* operations modify the parameters directly (without using + * pointer indirection), this allows gcc to optimize better + */ + +#define rdmsr(msr,val1,val2) \ + __asm__ __volatile__("rdmsr" \ + : "=a" (val1), "=d" (val2) \ + : "c" (msr)) + + +#define rdmsrl(msr,val) do { unsigned long a__,b__; \ + __asm__ __volatile__("rdmsr" \ + : "=a" (a__), "=d" (b__) \ + : "c" (msr)); \ + val = a__ | (b__<<32); \ +} while(0) + +#define wrmsr(msr,val1,val2) \ + __asm__ __volatile__("wrmsr" \ + : /* no outputs */ \ + : "c" (msr), "a" (val1), "d" (val2)) + +#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) + +/* wrmsr with exception handling */ +#define wrmsr_safe(msr,a,b) ({ int ret__; \ + asm volatile("2: wrmsr ; xorl %0,%0\n" \ + "1:\n\t" \ + ".section .fixup,\"ax\"\n\t" \ + "3: movl %4,%0 ; jmp 1b\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n\t" \ + " .quad 2b,3b\n\t" \ + ".previous" \ + : "=a" (ret__) \ + : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ + ret__; }) + +#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) + +#define rdmsr_safe(msr,a,b) \ + ({ int ret__; \ + asm volatile ("1: rdmsr\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl %4,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n" \ + " .quad 1b,3b\n" \ + ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b)) \ + :"c"(msr), "i"(-EIO), "0"(0)); \ + ret__; }) + +#define rdtsc(low,high) \ + __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) + +#define rdtscl(low) \ + __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") + +#define rdtscp(low,high,aux) \ + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) + +#define rdtscll(val) do { \ + unsigned int __a,__d; \ + asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ + (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ +} while(0) + +#define rdtscpll(val, aux) do { \ + unsigned long __a, __d; \ + asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ + (val) = (__d << 32) | __a; \ +} while (0) + +#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) + +#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) + +#define rdpmc(counter,low,high) \ + __asm__ __volatile__("rdpmc" \ + : "=a" (low), "=d" (high) \ + : "c" (counter)) + +static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (op)); +} + +/* Some CPUID calls want 'count' to be placed in ecx */ +static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, + int *edx) +{ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (op), "c" (count)); +} + +/* + * CPUID functions returning a single datum + */ +static inline unsigned int cpuid_eax(unsigned int op) +{ + unsigned int eax; + + __asm__("cpuid" + : "=a" (eax) + : "0" (op) + : "bx", "cx", "dx"); + return eax; +} +static inline unsigned int cpuid_ebx(unsigned int op) +{ + unsigned int eax, ebx; + + __asm__("cpuid" + : "=a" (eax), "=b" (ebx) + : "0" (op) + : "cx", "dx" ); + return ebx; +} +static inline unsigned int cpuid_ecx(unsigned int op) +{ + unsigned int eax, ecx; + + __asm__("cpuid" + : "=a" (eax), "=c" (ecx) + : "0" (op) + : "bx", "dx" ); + return ecx; +} +static inline unsigned int cpuid_edx(unsigned int op) +{ + unsigned int eax, edx; + + __asm__("cpuid" + : "=a" (eax), "=d" (edx) + : "0" (op) + : "bx", "cx"); + return edx; +} + +#ifdef CONFIG_SMP +void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); +int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); +#else /* CONFIG_SMP */ +static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +{ + rdmsr(msr_no, *l, *h); +} +static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +{ + wrmsr(msr_no, l, h); +} +static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) +{ + return rdmsr_safe(msr_no, l, h); +} +static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) +{ + return wrmsr_safe(msr_no, l, h); +} +#endif /* CONFIG_SMP */ +#endif /* __ASSEMBLY__ */ + +#endif /* !__i386__ */ + #endif diff --git a/include/asm-x86/msr_32.h b/include/asm-x86/msr_32.h deleted file mode 100644 index df21ea04936..00000000000 --- a/include/asm-x86/msr_32.h +++ /dev/null @@ -1,161 +0,0 @@ -#ifndef __ASM_MSR_H -#define __ASM_MSR_H - -#include <asm/msr-index.h> - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <asm/errno.h> - -static inline unsigned long long native_read_msr(unsigned int msr) -{ - unsigned long long val; - - asm volatile("rdmsr" : "=A" (val) : "c" (msr)); - return val; -} - -static inline unsigned long long native_read_msr_safe(unsigned int msr, - int *err) -{ - unsigned long long val; - - asm volatile("2: rdmsr ; xorl %0,%0\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: movl %3,%0 ; jmp 1b\n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" - ".previous" - : "=r" (*err), "=A" (val) - : "c" (msr), "i" (-EFAULT)); - - return val; -} - -static inline void native_write_msr(unsigned int msr, unsigned long long val) -{ - asm volatile("wrmsr" : : "c" (msr), "A"(val)); -} - -static inline int native_write_msr_safe(unsigned int msr, - unsigned long long val) -{ - int err; - asm volatile("2: wrmsr ; xorl %0,%0\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: movl %4,%0 ; jmp 1b\n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" - ".previous" - : "=a" (err) - : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), - "i" (-EFAULT)); - return err; -} - -static inline unsigned long long native_read_tsc(void) -{ - unsigned long long val; - asm volatile("rdtsc" : "=A" (val)); - return val; -} - -static inline unsigned long long native_read_pmc(void) -{ - unsigned long long val; - asm volatile("rdpmc" : "=A" (val)); - return val; -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#include <linux/errno.h> -/* - * Access to machine-specific registers (available on 586 and better only) - * Note: the rd* operations modify the parameters directly (without using - * pointer indirection), this allows gcc to optimize better - */ - -#define rdmsr(msr,val1,val2) \ - do { \ - u64 __val = native_read_msr(msr); \ - (val1) = (u32)__val; \ - (val2) = (u32)(__val >> 32); \ - } while(0) - -static inline void wrmsr(u32 __msr, u32 __low, u32 __high) -{ - native_write_msr(__msr, ((u64)__high << 32) | __low); -} - -#define rdmsrl(msr,val) \ - ((val) = native_read_msr(msr)) - -#define wrmsrl(msr,val) native_write_msr(msr, val) - -/* wrmsr with exception handling */ -static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) -{ - return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); -} - -/* rdmsr with exception handling */ -#define rdmsr_safe(msr,p1,p2) \ - ({ \ - int __err; \ - u64 __val = native_read_msr_safe(msr, &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ - __err; \ - }) - -#define rdtscl(low) \ - ((low) = (u32)native_read_tsc()) - -#define rdtscll(val) \ - ((val) = native_read_tsc()) - -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - -#define rdpmc(counter,low,high) \ - do { \ - u64 _l = native_read_pmc(); \ - (low) = (u32)_l; \ - (high) = (u32)(_l >> 32); \ - } while(0) -#endif /* !CONFIG_PARAVIRT */ - -#ifdef CONFIG_SMP -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -#else /* CONFIG_SMP */ -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - rdmsr(msr_no, *l, *h); -} -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - wrmsr(msr_no, l, h); -} -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - return rdmsr_safe(msr_no, l, h); -} -static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - return wrmsr_safe(msr_no, l, h); -} -#endif /* CONFIG_SMP */ -#endif -#endif -#endif /* __ASM_MSR_H */ diff --git a/include/asm-x86/msr_64.h b/include/asm-x86/msr_64.h deleted file mode 100644 index d5c55b80da5..00000000000 --- a/include/asm-x86/msr_64.h +++ /dev/null @@ -1,187 +0,0 @@ -#ifndef X86_64_MSR_H -#define X86_64_MSR_H 1 - -#include <asm/msr-index.h> - -#ifndef __ASSEMBLY__ -#include <linux/errno.h> -/* - * Access to machine-specific registers (available on 586 and better only) - * Note: the rd* operations modify the parameters directly (without using - * pointer indirection), this allows gcc to optimize better - */ - -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) - - -#define rdmsrl(msr,val) do { unsigned long a__,b__; \ - __asm__ __volatile__("rdmsr" \ - : "=a" (a__), "=d" (b__) \ - : "c" (msr)); \ - val = a__ | (b__<<32); \ -} while(0) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - -#define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32) - -/* wrmsr with exception handling */ -#define wrmsr_safe(msr,a,b) ({ int ret__; \ - asm volatile("2: wrmsr ; xorl %0,%0\n" \ - "1:\n\t" \ - ".section .fixup,\"ax\"\n\t" \ - "3: movl %4,%0 ; jmp 1b\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n\t" \ - " .quad 2b,3b\n\t" \ - ".previous" \ - : "=a" (ret__) \ - : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \ - ret__; }) - -#define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32)) - -#define rdmsr_safe(msr,a,b) \ - ({ int ret__; \ - asm volatile ("1: rdmsr\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %4,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 1b,3b\n" \ - ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\ - :"c"(msr), "i"(-EIO), "0"(0)); \ - ret__; }) - -#define rdtsc(low,high) \ - __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high)) - -#define rdtscl(low) \ - __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx") - -#define rdtscp(low,high,aux) \ - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux)) - -#define rdtscll(val) do { \ - unsigned int __a,__d; \ - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ - (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \ -} while(0) - -#define rdtscpll(val, aux) do { \ - unsigned long __a, __d; \ - asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \ - (val) = (__d << 32) | __a; \ -} while (0) - -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - -#define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0) - -#define rdpmc(counter,low,high) \ - __asm__ __volatile__("rdpmc" \ - : "=a" (low), "=d" (high) \ - : "c" (counter)) - -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op)); -} - -/* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) -{ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (op), "c" (count)); -} - -/* - * CPUID functions returning a single datum - */ -static inline unsigned int cpuid_eax(unsigned int op) -{ - unsigned int eax; - - __asm__("cpuid" - : "=a" (eax) - : "0" (op) - : "bx", "cx", "dx"); - return eax; -} -static inline unsigned int cpuid_ebx(unsigned int op) -{ - unsigned int eax, ebx; - - __asm__("cpuid" - : "=a" (eax), "=b" (ebx) - : "0" (op) - : "cx", "dx" ); - return ebx; -} -static inline unsigned int cpuid_ecx(unsigned int op) -{ - unsigned int eax, ecx; - - __asm__("cpuid" - : "=a" (eax), "=c" (ecx) - : "0" (op) - : "bx", "dx" ); - return ecx; -} -static inline unsigned int cpuid_edx(unsigned int op) -{ - unsigned int eax, edx; - - __asm__("cpuid" - : "=a" (eax), "=d" (edx) - : "0" (op) - : "bx", "cx"); - return edx; -} - -#ifdef CONFIG_SMP -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -#else /* CONFIG_SMP */ -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - rdmsr(msr_no, *l, *h); -} -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - wrmsr(msr_no, l, h); -} -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - return rdmsr_safe(msr_no, l, h); -} -static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - return wrmsr_safe(msr_no, l, h); -} -#endif /* CONFIG_SMP */ -#endif /* __ASSEMBLY__ */ -#endif /* X86_64_MSR_H */ |