diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/desc.h | 4 | ||||
-rw-r--r-- | include/asm-x86/geode.h | 4 | ||||
-rw-r--r-- | include/asm-x86/i387.h | 2 | ||||
-rw-r--r-- | include/asm-x86/kvm_host.h | 4 | ||||
-rw-r--r-- | include/asm-x86/kvm_para.h | 33 | ||||
-rw-r--r-- | include/asm-x86/kvm_x86_emulate.h | 1 | ||||
-rw-r--r-- | include/asm-x86/msr.h | 2 | ||||
-rw-r--r-- | include/asm-x86/page.h | 15 | ||||
-rw-r--r-- | include/asm-x86/page_32.h | 3 | ||||
-rw-r--r-- | include/asm-x86/pgtable-3level.h | 6 | ||||
-rw-r--r-- | include/asm-x86/pgtable.h | 3 | ||||
-rw-r--r-- | include/asm-x86/pgtable_32.h | 4 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 12 | ||||
-rw-r--r-- | include/asm-x86/pvclock-abi.h | 42 | ||||
-rw-r--r-- | include/asm-x86/pvclock.h | 13 | ||||
-rw-r--r-- | include/asm-x86/tlbflush.h | 13 | ||||
-rw-r--r-- | include/asm-x86/xen/page.h | 6 |
17 files changed, 114 insertions, 53 deletions
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h index 268a012bcd7..28bddbcb38b 100644 --- a/include/asm-x86/desc.h +++ b/include/asm-x86/desc.h @@ -192,8 +192,8 @@ static inline void native_set_ldt(const void *addr, unsigned int entries) unsigned cpu = smp_processor_id(); ldt_desc ldt; - set_tssldt_descriptor(&ldt, (unsigned long)addr, - DESC_LDT, entries * sizeof(ldt) - 1); + set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, + entries * LDT_ENTRY_SIZE - 1); write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h index 6e6458853a3..bb06027fc83 100644 --- a/include/asm-x86/geode.h +++ b/include/asm-x86/geode.h @@ -112,8 +112,8 @@ extern int geode_get_dev_base(unsigned int dev); #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ #define VSA_VR_SIGNATURE 0x0003 #define VSA_VR_MEM_SIZE 0x0200 -#define VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ - +#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ +#define GSW_VSA_SIG 0x534d /* General Software signature */ /* GPIO */ #define GPIO_OUTPUT_VAL 0x00 diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 6b722d31593..37672f79dcc 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -193,6 +193,8 @@ static inline int restore_i387(struct _fpstate __user *buf) #else /* CONFIG_X86_32 */ +extern void finit(void); + static inline void tolerant_fwait(void) { asm volatile("fnclex ; fwait"); diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 1d8cd01fa51..844f2a89afb 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -18,6 +18,7 @@ #include <linux/kvm_para.h> #include <linux/kvm_types.h> +#include <asm/pvclock-abi.h> #include <asm/desc.h> #define KVM_MAX_VCPUS 16 @@ -282,7 +283,8 @@ struct kvm_vcpu_arch { struct x86_emulate_ctxt emulate_ctxt; gpa_t time; - struct kvm_vcpu_time_info hv_clock; + struct pvclock_vcpu_time_info hv_clock; + unsigned int hv_clock_tsc_khz; unsigned int time_offset; struct page *time_page; }; diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h index 50984594207..76f392146da 100644 --- a/include/asm-x86/kvm_para.h +++ b/include/asm-x86/kvm_para.h @@ -48,24 +48,6 @@ struct kvm_mmu_op_release_pt { #ifdef __KERNEL__ #include <asm/processor.h> -/* xen binary-compatible interface. See xen headers for details */ -struct kvm_vcpu_time_info { - uint32_t version; - uint32_t pad0; - uint64_t tsc_timestamp; - uint64_t system_time; - uint32_t tsc_to_system_mul; - int8_t tsc_shift; - int8_t pad[3]; -} __attribute__((__packed__)); /* 32 bytes */ - -struct kvm_wall_clock { - uint32_t wc_version; - uint32_t wc_sec; - uint32_t wc_nsec; -} __attribute__((__packed__)); - - extern void kvmclock_init(void); @@ -89,7 +71,8 @@ static inline long kvm_hypercall0(unsigned int nr) long ret; asm volatile(KVM_HYPERCALL : "=a"(ret) - : "a"(nr)); + : "a"(nr) + : "memory"); return ret; } @@ -98,7 +81,8 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) long ret; asm volatile(KVM_HYPERCALL : "=a"(ret) - : "a"(nr), "b"(p1)); + : "a"(nr), "b"(p1) + : "memory"); return ret; } @@ -108,7 +92,8 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, long ret; asm volatile(KVM_HYPERCALL : "=a"(ret) - : "a"(nr), "b"(p1), "c"(p2)); + : "a"(nr), "b"(p1), "c"(p2) + : "memory"); return ret; } @@ -118,7 +103,8 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, long ret; asm volatile(KVM_HYPERCALL : "=a"(ret) - : "a"(nr), "b"(p1), "c"(p2), "d"(p3)); + : "a"(nr), "b"(p1), "c"(p2), "d"(p3) + : "memory"); return ret; } @@ -129,7 +115,8 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, long ret; asm volatile(KVM_HYPERCALL : "=a"(ret) - : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)); + : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4) + : "memory"); return ret; } diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h index d6337f941c9..b877bbd2d3a 100644 --- a/include/asm-x86/kvm_x86_emulate.h +++ b/include/asm-x86/kvm_x86_emulate.h @@ -135,6 +135,7 @@ struct decode_cache { u8 modrm_rm; u8 use_modrm_ea; unsigned long modrm_ea; + void *modrm_ptr; unsigned long modrm_val; struct fetch_cache fetch; }; diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h index 3707650a169..2b5f2c91db2 100644 --- a/include/asm-x86/msr.h +++ b/include/asm-x86/msr.h @@ -18,7 +18,7 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) unsigned long low, high; asm volatile(".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (*aux)); - return low | ((u64)high >> 32); + return low | ((u64)high << 32); } /* diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h index b381f4a5a0b..dc936dddf16 100644 --- a/include/asm-x86/page.h +++ b/include/asm-x86/page.h @@ -10,8 +10,16 @@ #ifdef __KERNEL__ -#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK) -#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK)) +#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) +#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) + +/* Cast PAGE_MASK to a signed type so that it is sign-extended if + virtual addresses are 32-bits but physical addresses are larger + (ie, 32-bit PAE). */ +#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) + +/* PTE_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +#define PTE_MASK ((pteval_t)PHYSICAL_PAGE_MASK) #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) @@ -24,9 +32,6 @@ /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) -#define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1) -#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1) - #ifndef __ASSEMBLY__ #include <linux/types.h> #endif diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h index 424e82f8ae2..ccf0ba3c3ab 100644 --- a/include/asm-x86/page_32.h +++ b/include/asm-x86/page_32.h @@ -14,7 +14,8 @@ #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #ifdef CONFIG_X86_PAE -#define __PHYSICAL_MASK_SHIFT 36 +/* 44=32+12, the limit we can fit into an unsigned long pfn */ +#define __PHYSICAL_MASK_SHIFT 44 #define __VIRTUAL_MASK_SHIFT 32 #define PAGETABLE_LEVELS 3 diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 8b4a9d44b7f..c93dbb6c262 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h @@ -120,9 +120,9 @@ static inline void pud_clear(pud_t *pudp) write_cr3(pgd); } -#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_MASK)) -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_MASK)) /* Find an entry in the second-level page table.. */ @@ -160,7 +160,7 @@ static inline int pte_none(pte_t pte) static inline unsigned long pte_pfn(pte_t pte) { - return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; + return (pte_val(pte) & PTE_MASK) >> PAGE_SHIFT; } /* diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 55c3a0e3a8c..97c271b2910 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -57,6 +57,7 @@ #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY) +/* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_ACCESSED | _PAGE_DIRTY) @@ -304,7 +305,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) return __pgprot(preservebits | addbits); } -#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) +#define pte_pgprot(x) __pgprot(pte_val(x) & ~PTE_MASK) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index d7f0403bbec..32ca03109a4 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h @@ -88,7 +88,7 @@ extern unsigned long pg0[]; /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ #define pmd_none(x) (!(unsigned long)pmd_val((x))) #define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) +#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) @@ -159,7 +159,7 @@ static inline int pud_large(pud_t pud) { return 0; } #define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT)) #define pmd_page_vaddr(pmd) \ - ((unsigned long)__va(pmd_val((pmd)) & PAGE_MASK)) + ((unsigned long)__va(pmd_val((pmd)) & PTE_MASK)) #if defined(CONFIG_HIGHPTE) #define pte_offset_map(dir, address) \ diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index efe83dcbd41..1cc50d22d73 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -151,19 +151,19 @@ static inline void native_pgd_clear(pgd_t *pgd) #ifndef __ASSEMBLY__ -static inline unsigned long pgd_bad(pgd_t pgd) +static inline int pgd_bad(pgd_t pgd) { - return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return (pgd_val(pgd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; } -static inline unsigned long pud_bad(pud_t pud) +static inline int pud_bad(pud_t pud) { - return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return (pud_val(pud) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; } -static inline unsigned long pmd_bad(pmd_t pmd) +static inline int pmd_bad(pmd_t pmd) { - return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); + return (pmd_val(pmd) & ~(PTE_MASK | _PAGE_USER)) != _KERNPG_TABLE; } #define pte_none(x) (!pte_val((x))) diff --git a/include/asm-x86/pvclock-abi.h b/include/asm-x86/pvclock-abi.h new file mode 100644 index 00000000000..6857f840b24 --- /dev/null +++ b/include/asm-x86/pvclock-abi.h @@ -0,0 +1,42 @@ +#ifndef _ASM_X86_PVCLOCK_ABI_H_ +#define _ASM_X86_PVCLOCK_ABI_H_ +#ifndef __ASSEMBLY__ + +/* + * These structs MUST NOT be changed. + * They are the ABI between hypervisor and guest OS. + * Both Xen and KVM are using this. + * + * pvclock_vcpu_time_info holds the system time and the tsc timestamp + * of the last update. So the guest can use the tsc delta to get a + * more precise system time. There is one per virtual cpu. + * + * pvclock_wall_clock references the point in time when the system + * time was zero (usually boot time), thus the guest calculates the + * current wall clock by adding the system time. + * + * Protocol for the "version" fields is: hypervisor raises it (making + * it uneven) before it starts updating the fields and raises it again + * (making it even) when it is done. Thus the guest can make sure the + * time values it got are consistent by checking the version before + * and after reading them. + */ + +struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 pad[3]; +} __attribute__((__packed__)); /* 32 bytes */ + +struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; +} __attribute__((__packed__)); + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PVCLOCK_ABI_H_ */ diff --git a/include/asm-x86/pvclock.h b/include/asm-x86/pvclock.h new file mode 100644 index 00000000000..85b1bba8e0a --- /dev/null +++ b/include/asm-x86/pvclock.h @@ -0,0 +1,13 @@ +#ifndef _ASM_X86_PVCLOCK_H_ +#define _ASM_X86_PVCLOCK_H_ + +#include <linux/clocksource.h> +#include <asm/pvclock-abi.h> + +/* some helper functions for xen and kvm pv clock sources */ +cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); +void pvclock_read_wallclock(struct pvclock_wall_clock *wall, + struct pvclock_vcpu_time_info *vcpu, + struct timespec *ts); + +#endif /* _ASM_X86_PVCLOCK_H_ */ diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h index 0c0674d9425..35c76ceb9f4 100644 --- a/include/asm-x86/tlbflush.h +++ b/include/asm-x86/tlbflush.h @@ -22,12 +22,23 @@ static inline void __native_flush_tlb(void) static inline void __native_flush_tlb_global(void) { - unsigned long cr4 = read_cr4(); + unsigned long flags; + unsigned long cr4; + /* + * Read-modify-write to CR4 - protect it from preemption and + * from interrupts. (Use the raw variant because this code can + * be called from deep inside debugging code.) + */ + raw_local_irq_save(flags); + + cr4 = read_cr4(); /* clear PGE */ write_cr4(cr4 & ~X86_CR4_PGE); /* write old PGE again and flush TLBs */ write_cr4(cr4); + + raw_local_irq_restore(flags); } static inline void __native_flush_tlb_single(unsigned long addr) diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h index 01799305f02..e11f24038b1 100644 --- a/include/asm-x86/xen/page.h +++ b/include/asm-x86/xen/page.h @@ -127,7 +127,7 @@ static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn) static inline unsigned long pte_mfn(pte_t pte) { - return (pte.pte & ~_PAGE_NX) >> PAGE_SHIFT; + return (pte.pte & PTE_MASK) >> PAGE_SHIFT; } static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot) @@ -150,13 +150,9 @@ static inline pte_t __pte_ma(pteval_t x) return (pte_t) { .pte = x }; } -#ifdef CONFIG_X86_PAE #define pmd_val_ma(v) ((v).pmd) #define pud_val_ma(v) ((v).pgd.pgd) #define __pmd_ma(x) ((pmd_t) { (x) } ) -#else /* !X86_PAE */ -#define pmd_val_ma(v) ((v).pud.pgd.pgd) -#endif /* CONFIG_X86_PAE */ #define pgd_val_ma(x) ((x).pgd) |