diff options
author | H. Peter Anvin <hpa@zytor.com> | 2009-05-23 16:42:19 -0700 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-05-23 16:42:19 -0700 |
commit | ee0736627d3347be0be2769fa7b26431f9726c9d (patch) | |
tree | 203e2204daaec4cf005463fdf2c7bf380d6eef36 /arch/x86/include | |
parent | cf9972a921470b0a2da7906104bcd540b20e33bf (diff) | |
parent | 0af48f42df15b97080b450d24219dd95db7b929a (diff) |
Merge branch 'x86/urgent' into x86/setup
Resolved conflicts:
arch/x86/boot/memory.c
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/desc.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/fixmap.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/hardirq.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/io.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/lguest_hcall.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/mce.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/pat.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/percpu.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 12 | ||||
-rw-r--r-- | arch/x86/include/asm/ptrace.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/required-features.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/sigcontext.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/spinlock.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/topology.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/uv/uv_mmrs.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/page.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/xsave.h | 3 |
19 files changed, 51 insertions, 32 deletions
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 5623c50d67b..c45f415ce31 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -37,7 +37,7 @@ extern gate_desc idt_table[]; struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU(struct gdt_page, gdt_page); +DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 81937a5dc77..2d81af3974a 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -151,11 +151,11 @@ extern pte_t *pkmap_page_table; void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); void native_set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); + phys_addr_t phys, pgprot_t flags); #ifndef CONFIG_PARAVIRT static inline void __set_fixmap(enum fixed_addresses idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { native_set_fixmap(idx, phys, flags); } diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 039db6aa8e0..37555e52f98 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -26,7 +26,7 @@ typedef struct { #endif } ____cacheline_aligned irq_cpustat_t; -DECLARE_PER_CPU(irq_cpustat_t, irq_stat); +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index e5383e3d2f8..73739322b6d 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -193,8 +193,10 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); */ extern void early_ioremap_init(void); extern void early_ioremap_reset(void); -extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); -extern void __iomem *early_memremap(unsigned long offset, unsigned long size); +extern void __iomem *early_ioremap(resource_size_t phys_addr, + unsigned long size); +extern void __iomem *early_memremap(resource_size_t phys_addr, + unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); #define IO_SPACE_LIMIT 0xffff diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 0f4ee7148af..faae1996487 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -5,7 +5,6 @@ #define LHCALL_FLUSH_ASYNC 0 #define LHCALL_LGUEST_INIT 1 #define LHCALL_SHUTDOWN 2 -#define LHCALL_LOAD_GDT 3 #define LHCALL_NEW_PGTABLE 4 #define LHCALL_FLUSH_TLB 5 #define LHCALL_LOAD_IDT_ENTRY 6 @@ -17,6 +16,7 @@ #define LHCALL_SET_PMD 15 #define LHCALL_LOAD_TLS 16 #define LHCALL_NOTIFY 17 +#define LHCALL_LOAD_GDT_ENTRY 18 #define LGUEST_TRAP_ENTRY 0x1F diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 563933e06a3..4f8c199584e 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -137,6 +137,7 @@ DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); enum mcp_flags { MCP_TIMESTAMP = (1 << 0), /* log time stamp */ MCP_UC = (1 << 1), /* log uncorrected errors */ + MCP_DONTLOG = (1 << 2), /* only clear, don't log */ }; extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 7727aa8b7dd..a53da004e08 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -347,7 +347,7 @@ struct pv_mmu_ops { /* Sometimes the physical address is a pfn, and sometimes its an mfn. We can tell which is which from the index. */ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, - unsigned long phys, pgprot_t flags); + phys_addr_t phys, pgprot_t flags); }; struct raw_spinlock; @@ -1432,7 +1432,7 @@ static inline void arch_leave_lazy_mmu_mode(void) void arch_flush_lazy_mmu_mode(void); static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, - unsigned long phys, pgprot_t flags) + phys_addr_t phys, pgprot_t flags) { pv_mmu_ops.set_fixmap(idx, phys, flags); } @@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64); #define paravirt_nop ((void *)_paravirt_nop) -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) static inline int __raw_spin_is_locked(struct raw_spinlock *lock) { diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 2cd07b9422f..7af14e512f9 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end); extern int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flag); -extern void map_devmem(unsigned long pfn, unsigned long size, - struct pgprot vma_prot); -extern void unmap_devmem(unsigned long pfn, unsigned long size, - struct pgprot vma_prot); #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index aee103b26d0..02ecb30982a 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -82,22 +82,22 @@ do { \ case 1: \ asm(op "b %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "qi" ((T__)(val))); \ break; \ case 2: \ asm(op "w %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "ri" ((T__)(val))); \ break; \ case 4: \ asm(op "l %1,"__percpu_arg(0) \ : "+m" (var) \ - : "ri" ((T__)val)); \ + : "ri" ((T__)(val))); \ break; \ case 8: \ asm(op "q %1,"__percpu_arg(0) \ : "+m" (var) \ - : "re" ((T__)val)); \ + : "re" ((T__)(val))); \ break; \ default: __bad_percpu_size(); \ } \ @@ -109,7 +109,7 @@ do { \ switch (sizeof(var)) { \ case 1: \ asm(op "b "__percpu_arg(1)",%0" \ - : "=r" (ret__) \ + : "=q" (ret__) \ : "m" (var)); \ break; \ case 2: \ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 34c52370f2f..c2cceae709c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss; extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP -DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); +DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); #define cpu_data(cpu) per_cpu(cpu_info, cpu) #define current_cpu_data __get_cpu_var(cpu_info) #else @@ -270,7 +270,7 @@ struct tss_struct { } ____cacheline_aligned; -DECLARE_PER_CPU(struct tss_struct, init_tss); +DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); /* * Save the original ist values for checking stack pointers during debugging @@ -352,6 +352,11 @@ struct i387_soft_struct { u32 entry_eip; }; +struct ymmh_struct { + /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ + u32 ymmh_space[64]; +}; + struct xsave_hdr_struct { u64 xstate_bv; u64 reserved1[2]; @@ -361,6 +366,7 @@ struct xsave_hdr_struct { struct xsave_struct { struct i387_fxsave_struct i387; struct xsave_hdr_struct xsave_hdr; + struct ymmh_struct ymmh; /* new processor state extensions will go here */ } __attribute__ ((packed, aligned (64))); @@ -387,7 +393,7 @@ union irq_stack_union { }; }; -DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); +DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index e304b66abee..624f133943e 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs) /* * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode - * when it traps. So regs will be the current sp. + * when it traps. The previous stack will be directly underneath the saved + * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. * * This is valid only for kernel mode traps. */ -static inline unsigned long kernel_trap_sp(struct pt_regs *regs) +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { #ifdef CONFIG_X86_32 - return (unsigned long)regs; + return (unsigned long)(®s->sp); #else return regs->sp; #endif diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index d5cd6c58688..a4737dddfd5 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -50,7 +50,7 @@ #ifdef CONFIG_X86_64 #define NEED_PSE 0 #define NEED_MSR (1<<(X86_FEATURE_MSR & 31)) -#define NEED_PGE (1<<(X86_FEATURE_PGE & 31)) +#define NEED_PGE 0 #define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31)) #define NEED_XMM (1<<(X86_FEATURE_XMM & 31)) #define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31)) diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index ec666491aaa..72e5a449166 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -269,6 +269,11 @@ struct _xsave_hdr { __u64 reserved2[5]; }; +struct _ymmh_state { + /* 16 * 16 bytes for each YMMH-reg */ + __u32 ymmh_space[64]; +}; + /* * Extended state pointed by the fpstate pointer in the sigcontext. * In addition to the fpstate, information encoded in the xstate_hdr @@ -278,6 +283,7 @@ struct _xsave_hdr { struct _xstate { struct _fpstate fpstate; struct _xsave_hdr xstate_hdr; + struct _ymmh_state ymmh; /* new processor state extensions go here */ }; diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index e5e6caffec8..b7e5db87639 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; } -#ifndef CONFIG_PARAVIRT +#ifndef CONFIG_PARAVIRT_SPINLOCKS static inline int __raw_spin_is_locked(raw_spinlock_t *lock) { @@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, __raw_spin_lock(lock); } -#endif +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d3539f998f8..16a5c84b032 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -152,7 +152,7 @@ struct tlb_state { struct mm_struct *active_mm; int state; }; -DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); +DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); static inline void reset_lazy_tlbstate(void) { diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 892b119dba6..f44b49abca4 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) } struct pci_bus; -void set_pci_bus_resources_arch_default(struct pci_bus *b); +void x86_pci_root_bus_res_quirks(struct pci_bus *b); #ifdef CONFIG_SMP #define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index db68ac8a5ac..2cae46c7c8a 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -17,6 +17,11 @@ /* ========================================================================= */ /* UVH_BAU_DATA_CONFIG */ /* ========================================================================= */ +#define UVH_LB_BAU_MISC_CONTROL 0x320170UL +#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15 +#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16 +#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL +/* 1011 timebase 7 (168millisec) * 3 ticks -> 500ms */ #define UVH_BAU_DATA_CONFIG 0x61680UL #define UVH_BAU_DATA_CONFIG_32 0x0438 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 1a918dde46b..018a0a40079 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -124,7 +124,8 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn) /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) -#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v)))) +#define virt_to_pfn(v) (PFN_DOWN(__pa(v))) +#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) static inline unsigned long pte_mfn(pte_t pte) diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 08e9a1ac07a..727acc15234 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -7,6 +7,7 @@ #define XSTATE_FP 0x1 #define XSTATE_SSE 0x2 +#define XSTATE_YMM 0x4 #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) @@ -15,7 +16,7 @@ /* * These are the features that the OS can handle currently. */ -#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) #ifdef CONFIG_X86_64 #define REX_PREFIX "0x48, " |