summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 15:06:00 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-19 15:06:00 -0700
commit60812a4a99b796d894d2522dc63cb0fafc3be25e (patch)
treebbf3a441b71e3b9b670d91652094114852272db8 /include
parentb04cde34cf1d006dfaf8523640f3a18bbb15ebaa (diff)
parent92cb7612aee39642d109b8d935ad265e602c0563 (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (33 commits) x86: convert cpuinfo_x86 array to a per_cpu array x86: introduce frame_pointer() and stack_pointer() x86 & generic: change to __builtin_prefetch() i386: do not BUG_ON() when MSR is unknown x86: acpi use cpu_physical_id x86: convert cpu_llc_id to be a per cpu variable x86: convert cpu_to_apicid to be a per cpu variable i386: introduce "used_vectors" bitmap which can be used to reserve vectors. x86: use raw locks during oopses x86: honor _PAGE_PSE bit on page walks i386: do cpuid_device_create() in CPU_UP_PREPARE instead of CPU_ONLINE. x86: implement missing x86_64 function smp_call_function_mask() x86: use descriptor's functions instead of inline assembly i386: consolidate show_regs and show_registers for i386 i386: make callgraph use dump_trace() on i386/x86_64 x86: enable iommu_merge by default i386: i386 add AMD64 Barcelona PMU MSR definitions to msr.h x86: Unify i386 and x86-64 early quirks x86: enable HPET on ICH3 and ICH4 x86: force enable HPET on VT8235/8237 chipsets ... Manually fix trivial conflict with task pid container helper changes in arch/x86/kernel/process_32.c
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/acpi_32.h6
-rw-r--r--include/asm-x86/compat.h6
-rw-r--r--include/asm-x86/desc_64.h30
-rw-r--r--include/asm-x86/geode.h2
-rw-r--r--include/asm-x86/hpet.h1
-rw-r--r--include/asm-x86/io_apic_64.h2
-rw-r--r--include/asm-x86/ipi.h2
-rw-r--r--include/asm-x86/irq_32.h3
-rw-r--r--include/asm-x86/msr-index.h36
-rw-r--r--include/asm-x86/processor_32.h16
-rw-r--r--include/asm-x86/processor_64.h16
-rw-r--r--include/asm-x86/proto.h2
-rw-r--r--include/asm-x86/ptrace_32.h2
-rw-r--r--include/asm-x86/ptrace_64.h2
-rw-r--r--include/asm-x86/smp_32.h6
-rw-r--r--include/asm-x86/smp_64.h11
-rw-r--r--include/asm-x86/system_32.h1
-rw-r--r--include/asm-x86/topology_32.h4
-rw-r--r--include/asm-x86/topology_64.h4
-rw-r--r--include/linux/prefetch.h9
20 files changed, 111 insertions, 50 deletions
diff --git a/include/asm-x86/acpi_32.h b/include/asm-x86/acpi_32.h
index 125179adf04..723493e6c85 100644
--- a/include/asm-x86/acpi_32.h
+++ b/include/asm-x86/acpi_32.h
@@ -81,11 +81,7 @@ int __acpi_release_global_lock(unsigned int *lock);
:"=r"(n_hi), "=r"(n_lo) \
:"0"(n_hi), "1"(n_lo))
-#ifdef CONFIG_X86_IO_APIC
-extern void check_acpi_pci(void);
-#else
-static inline void check_acpi_pci(void) { }
-#endif
+extern void early_quirks(void);
#ifdef CONFIG_ACPI
extern int acpi_lapic;
diff --git a/include/asm-x86/compat.h b/include/asm-x86/compat.h
index 53cb96b68a6..66ba7987184 100644
--- a/include/asm-x86/compat.h
+++ b/include/asm-x86/compat.h
@@ -6,6 +6,7 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
+#include <asm/user32.h>
#define COMPAT_USER_HZ 100
@@ -181,6 +182,11 @@ struct compat_shmid64_ds {
};
/*
+ * The type of struct elf_prstatus.pr_reg in compatible core dumps.
+ */
+typedef struct user_regs_struct32 compat_elf_gregset_t;
+
+/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
diff --git a/include/asm-x86/desc_64.h b/include/asm-x86/desc_64.h
index ac991b5ca0f..7d9c938e69f 100644
--- a/include/asm-x86/desc_64.h
+++ b/include/asm-x86/desc_64.h
@@ -20,6 +20,16 @@ extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
+static inline unsigned long __store_tr(void)
+{
+ unsigned long tr;
+
+ asm volatile ("str %w0":"=r" (tr));
+ return tr;
+}
+
+#define store_tr(tr) (tr) = __store_tr()
+
/*
* This is the ldt that every process will get unless we need
* something other than this.
@@ -31,6 +41,16 @@ extern struct desc_ptr cpu_gdt_descr[];
/* the cpu gdt accessor */
#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
+static inline void load_gdt(const struct desc_ptr *ptr)
+{
+ asm volatile("lgdt %w0"::"m" (*ptr));
+}
+
+static inline void store_gdt(struct desc_ptr *ptr)
+{
+ asm("sgdt %w0":"=m" (*ptr));
+}
+
static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
{
struct gate_struct s;
@@ -71,6 +91,16 @@ static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
_set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
}
+static inline void load_idt(const struct desc_ptr *ptr)
+{
+ asm volatile("lidt %w0"::"m" (*ptr));
+}
+
+static inline void store_idt(struct desc_ptr *dtr)
+{
+ asm("sidt %w0":"=m" (*dtr));
+}
+
static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
unsigned size)
{
diff --git a/include/asm-x86/geode.h b/include/asm-x86/geode.h
index d94898831ba..771af336734 100644
--- a/include/asm-x86/geode.h
+++ b/include/asm-x86/geode.h
@@ -38,6 +38,8 @@ extern int geode_get_dev_base(unsigned int dev);
#define MSR_LBAR_ACPI 0x5140000E
#define MSR_LBAR_PMS 0x5140000F
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
#define MSR_PIC_YSEL_LOW 0x51400020
#define MSR_PIC_YSEL_HIGH 0x51400021
#define MSR_PIC_ZSEL_LOW 0x51400022
diff --git a/include/asm-x86/hpet.h b/include/asm-x86/hpet.h
index d4ab6db050b..4f51519fc19 100644
--- a/include/asm-x86/hpet.h
+++ b/include/asm-x86/hpet.h
@@ -64,6 +64,7 @@
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
+extern int hpet_force_user;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern unsigned long hpet_readl(unsigned long a);
diff --git a/include/asm-x86/io_apic_64.h b/include/asm-x86/io_apic_64.h
index d9f2e54324d..e2c13675ee4 100644
--- a/include/asm-x86/io_apic_64.h
+++ b/include/asm-x86/io_apic_64.h
@@ -133,4 +133,6 @@ void enable_NMI_through_LVT0 (void * dummy);
extern spinlock_t i8259A_lock;
+extern int timer_over_8254;
+
#endif
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h
index a7c75ea408a..6d011bd6067 100644
--- a/include/asm-x86/ipi.h
+++ b/include/asm-x86/ipi.h
@@ -119,7 +119,7 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
*/
local_irq_save(flags);
for_each_cpu_mask(query_cpu, mask) {
- __send_IPI_dest_field(x86_cpu_to_apicid[query_cpu],
+ __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
diff --git a/include/asm-x86/irq_32.h b/include/asm-x86/irq_32.h
index 36f310632c4..aca9c96e8e6 100644
--- a/include/asm-x86/irq_32.h
+++ b/include/asm-x86/irq_32.h
@@ -45,4 +45,7 @@ unsigned int do_IRQ(struct pt_regs *regs);
void init_IRQ(void);
void __init native_init_IRQ(void);
+/* Interrupt vector management */
+extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
+
#endif /* _ASM_IRQ_H */
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h
index a02eb299134..a4944732be0 100644
--- a/include/asm-x86/msr-index.h
+++ b/include/asm-x86/msr-index.h
@@ -73,8 +73,32 @@
#define MSR_P6_EVNTSEL0 0x00000186
#define MSR_P6_EVNTSEL1 0x00000187
-/* K7/K8 MSRs. Not complete. See the architecture manual for a more
+/* AMD64 MSRs. Not complete. See the architecture manual for a more
complete list. */
+
+#define MSR_AMD64_IBSFETCHCTL 0xc0011030
+#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
+#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
+#define MSR_AMD64_IBSOPCTL 0xc0011033
+#define MSR_AMD64_IBSOPRIP 0xc0011034
+#define MSR_AMD64_IBSOPDATA 0xc0011035
+#define MSR_AMD64_IBSOPDATA2 0xc0011036
+#define MSR_AMD64_IBSOPDATA3 0xc0011037
+#define MSR_AMD64_IBSDCLINAD 0xc0011038
+#define MSR_AMD64_IBSDCPHYSAD 0xc0011039
+#define MSR_AMD64_IBSCTL 0xc001103a
+
+/* K8 MSRs */
+#define MSR_K8_TOP_MEM1 0xc001001a
+#define MSR_K8_TOP_MEM2 0xc001001d
+#define MSR_K8_SYSCFG 0xc0010010
+#define MSR_K8_HWCR 0xc0010015
+#define MSR_K8_ENABLE_C1E 0xc0010055
+#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
+
+/* K7 MSRs */
#define MSR_K7_EVNTSEL0 0xc0010000
#define MSR_K7_PERFCTR0 0xc0010004
#define MSR_K7_EVNTSEL1 0xc0010001
@@ -83,20 +107,10 @@
#define MSR_K7_PERFCTR2 0xc0010006
#define MSR_K7_EVNTSEL3 0xc0010003
#define MSR_K7_PERFCTR3 0xc0010007
-#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K7_CLK_CTL 0xc001001b
-#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-
-#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
-#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
-#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
-
#define MSR_K7_HWCR 0xc0010015
-#define MSR_K8_HWCR 0xc0010015
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
-#define MSR_K8_ENABLE_C1E 0xc0010055
/* K6 MSRs */
#define MSR_K6_EFER 0xc0000080
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 83800e7496e..13976b08683 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -79,6 +79,7 @@ struct cpuinfo_x86 {
unsigned char booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical processor id. */
__u8 cpu_core_id; /* Core id */
+ __u8 cpu_index; /* index into per_cpu list */
#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -103,14 +104,19 @@ extern struct tss_struct doublefault_tss;
DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
+#define cpu_data(cpu) per_cpu(cpu_info, cpu)
+#define current_cpu_data cpu_data(smp_processor_id())
#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
+#define cpu_data(cpu) boot_cpu_data
+#define current_cpu_data boot_cpu_data
#endif
-extern int cpu_llc_id[NR_CPUS];
+/*
+ * the following now lives in the per cpu area:
+ * extern int cpu_llc_id[NR_CPUS];
+ */
+DECLARE_PER_CPU(u8, cpu_llc_id);
extern char ignore_fpu_irq;
void __init cpu_detect(struct cpuinfo_x86 *c);
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h
index f422becbddd..e4f19970a82 100644
--- a/include/asm-x86/processor_64.h
+++ b/include/asm-x86/processor_64.h
@@ -74,6 +74,7 @@ struct cpuinfo_x86 {
__u8 booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical Processor id. */
__u8 cpu_core_id; /* Core id. */
+ __u8 cpu_index; /* index into per_cpu list */
#endif
} ____cacheline_aligned;
@@ -88,11 +89,12 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UNKNOWN 0xff
#ifdef CONFIG_SMP
-extern struct cpuinfo_x86 cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
+#define cpu_data(cpu) per_cpu(cpu_info, cpu)
+#define current_cpu_data cpu_data(smp_processor_id())
#else
-#define cpu_data (&boot_cpu_data)
-#define current_cpu_data boot_cpu_data
+#define cpu_data(cpu) boot_cpu_data
+#define current_cpu_data boot_cpu_data
#endif
extern char ignore_irq13;
@@ -390,12 +392,6 @@ static inline void sync_core(void)
asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
}
-#define ARCH_HAS_PREFETCH
-static inline void prefetch(void *x)
-{
- asm volatile("prefetcht0 (%0)" :: "r" (x));
-}
-
#define ARCH_HAS_PREFETCHW 1
static inline void prefetchw(void *x)
{
diff --git a/include/asm-x86/proto.h b/include/asm-x86/proto.h
index c44a3a93b5a..dabba55f7ed 100644
--- a/include/asm-x86/proto.h
+++ b/include/asm-x86/proto.h
@@ -83,8 +83,6 @@ extern unsigned tsc_khz;
extern int reboot_force;
extern int notsc_setup(char *);
-extern int timer_over_8254;
-
extern int gsi_irq_sharing(int gsi);
extern int force_mwait;
diff --git a/include/asm-x86/ptrace_32.h b/include/asm-x86/ptrace_32.h
index 6002597b9e1..78d063dabe0 100644
--- a/include/asm-x86/ptrace_32.h
+++ b/include/asm-x86/ptrace_32.h
@@ -55,6 +55,8 @@ static inline int v8086_mode(struct pt_regs *regs)
}
#define instruction_pointer(regs) ((regs)->eip)
+#define frame_pointer(regs) ((regs)->ebp)
+#define stack_pointer(regs) ((regs)->esp)
#define regs_return_value(regs) ((regs)->eax)
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/include/asm-x86/ptrace_64.h b/include/asm-x86/ptrace_64.h
index 7f166ccb060..7bfe61e1b70 100644
--- a/include/asm-x86/ptrace_64.h
+++ b/include/asm-x86/ptrace_64.h
@@ -40,6 +40,8 @@ struct pt_regs {
#define user_mode(regs) (!!((regs)->cs & 3))
#define user_mode_vm(regs) user_mode(regs)
#define instruction_pointer(regs) ((regs)->rip)
+#define frame_pointer(regs) ((regs)->rbp)
+#define stack_pointer(regs) ((regs)->rsp)
#define regs_return_value(regs) ((regs)->rax)
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 1f576a93368..7056d868452 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -39,9 +39,11 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
#define MAX_APICID 256
-extern u8 x86_cpu_to_apicid[];
+extern u8 __initdata x86_cpu_to_apicid_init[];
+extern void *x86_cpu_to_apicid_ptr;
+DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
extern void set_cpu_sibling_map(int cpu);
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index d30e9b684fd..6f0e0273b64 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -37,6 +37,8 @@ extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
extern int smp_num_siblings;
extern void smp_send_reschedule(int cpu);
+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
+ void *info, int wait);
/*
* cpu_sibling_map and cpu_core_map now live
@@ -47,7 +49,7 @@ extern void smp_send_reschedule(int cpu);
*/
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
-extern u8 cpu_llc_id[NR_CPUS];
+DECLARE_PER_CPU(u8, cpu_llc_id);
#define SMP_TRAMPOLINE_BASE 0x6000
@@ -84,7 +86,9 @@ static inline int hard_smp_processor_id(void)
* Some lowlevel functions might want to know about
* the real APIC ID <-> CPU # mapping.
*/
-extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
+extern u8 __initdata x86_cpu_to_apicid_init[];
+extern void *x86_cpu_to_apicid_ptr;
+DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */
extern u8 bios_cpu_apicid[];
static inline int cpu_present_to_apicid(int mps_cpu)
@@ -115,8 +119,9 @@ static __inline int logical_smp_processor_id(void)
}
#ifdef CONFIG_SMP
-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
#else
+extern unsigned int boot_cpu_id;
#define cpu_physical_id(cpu) boot_cpu_id
#endif /* !CONFIG_SMP */
#endif
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index db6283eb5e4..ef8468883ba 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -315,5 +315,6 @@ extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
+void __show_registers(struct pt_regs *, int all);
#endif
diff --git a/include/asm-x86/topology_32.h b/include/asm-x86/topology_32.h
index ae1074603c4..9040f5a6127 100644
--- a/include/asm-x86/topology_32.h
+++ b/include/asm-x86/topology_32.h
@@ -28,8 +28,8 @@
#define _ASM_I386_TOPOLOGY_H
#ifdef CONFIG_X86_HT
-#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
-#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
+#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif
diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h
index c0c93d74467..a718dda037e 100644
--- a/include/asm-x86/topology_64.h
+++ b/include/asm-x86/topology_64.h
@@ -56,8 +56,8 @@ extern int __node_distance(int, int);
#endif
#ifdef CONFIG_SMP
-#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
-#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
+#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
+#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index 1adfe668d03..af7c36a5a52 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -34,17 +34,12 @@
*/
-/*
- * These cannot be do{}while(0) macros. See the mental gymnastics in
- * the loop macro.
- */
-
#ifndef ARCH_HAS_PREFETCH
-static inline void prefetch(const void *x) {;}
+#define prefetch(x) __builtin_prefetch(x)
#endif
#ifndef ARCH_HAS_PREFETCHW
-static inline void prefetchw(const void *x) {;}
+#define prefetchw(x) __builtin_prefetch(x,1)
#endif
#ifndef ARCH_HAS_SPINLOCK_PREFETCH