summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 13:07:55 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 13:07:55 -0700
commitb278240839e20fa9384ea430df463b367b90e04e (patch)
treef99f0c8cdd4cc7f177cd75440e6bd181cded7fb3 /include/asm-i386
parentdd77a4ee0f3981693d4229aa1d57cea9e526ff47 (diff)
parent3f75f42d7733e73aca5c78326489efd4189e0111 (diff)
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (225 commits) [PATCH] Don't set calgary iommu as default y [PATCH] i386/x86-64: New Intel feature flags [PATCH] x86: Add a cumulative thermal throttle event counter. [PATCH] i386: Make the jiffies compares use the 64bit safe macros. [PATCH] x86: Refactor thermal throttle processing [PATCH] Add 64bit jiffies compares (for use with get_jiffies_64) [PATCH] Fix unwinder warning in traps.c [PATCH] x86: Allow disabling early pci scans with pci=noearly or disallowing conf1 [PATCH] x86: Move direct PCI scanning functions out of line [PATCH] i386/x86-64: Make all early PCI scans dependent on CONFIG_PCI [PATCH] Don't leak NT bit into next task [PATCH] i386/x86-64: Work around gcc bug with noreturn functions in unwinder [PATCH] Fix some broken white space in ia32_signal.c [PATCH] Initialize argument registers for 32bit signal handlers. [PATCH] Remove all traces of signal number conversion [PATCH] Don't synchronize time reading on single core AMD systems [PATCH] Remove outdated comment in x86-64 mmconfig code [PATCH] Use string instructions for Core2 copy/clear [PATCH] x86: - restore i8259A eoi status on resume [PATCH] i386: Split multi-line printk in oops output. ...
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/acpi.h14
-rw-r--r--include/asm-i386/alternative-asm.i14
-rw-r--r--include/asm-i386/apic.h16
-rw-r--r--include/asm-i386/desc.h121
-rw-r--r--include/asm-i386/dwarf2.h11
-rw-r--r--include/asm-i386/e820.h2
-rw-r--r--include/asm-i386/frame.i24
-rw-r--r--include/asm-i386/genapic.h69
-rw-r--r--include/asm-i386/intel_arch_perfmon.h14
-rw-r--r--include/asm-i386/io_apic.h11
-rw-r--r--include/asm-i386/kexec.h27
-rw-r--r--include/asm-i386/mach-es7000/mach_apic.h4
-rw-r--r--include/asm-i386/mach-summit/mach_apic.h11
-rw-r--r--include/asm-i386/mutex.h16
-rw-r--r--include/asm-i386/nmi.h37
-rw-r--r--include/asm-i386/pgtable.h2
-rw-r--r--include/asm-i386/ptrace.h9
-rw-r--r--include/asm-i386/rwlock.h48
-rw-r--r--include/asm-i386/rwsem.h62
-rw-r--r--include/asm-i386/segment.h17
-rw-r--r--include/asm-i386/semaphore.h49
-rw-r--r--include/asm-i386/smp.h20
-rw-r--r--include/asm-i386/spinlock.h134
-rw-r--r--include/asm-i386/stacktrace.h1
-rw-r--r--include/asm-i386/therm_throt.h9
-rw-r--r--include/asm-i386/tlbflush.h4
-rw-r--r--include/asm-i386/tsc.h1
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/unwind.h8
29 files changed, 406 insertions, 352 deletions
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 20f52395421..6016632d032 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -131,21 +131,7 @@ static inline void disable_acpi(void)
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
#ifdef CONFIG_X86_IO_APIC
-extern int skip_ioapic_setup;
extern int acpi_skip_timer_override;
-
-static inline void disable_ioapic_setup(void)
-{
- skip_ioapic_setup = 1;
-}
-
-static inline int ioapic_setup_disabled(void)
-{
- return skip_ioapic_setup;
-}
-
-#else
-static inline void disable_ioapic_setup(void) { }
#endif
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i
new file mode 100644
index 00000000000..6c47e3b9484
--- /dev/null
+++ b/include/asm-i386/alternative-asm.i
@@ -0,0 +1,14 @@
+#include <linux/config.h>
+
+#ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+1: lock
+ .section .smp_locks,"a"
+ .align 4
+ .long 1b
+ .previous
+ .endm
+#else
+ .macro LOCK_PREFIX
+ .endm
+#endif
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 2c1e371cebb..3a42b7d6fc9 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -16,20 +16,8 @@
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
-extern int enable_local_apic;
extern int apic_verbosity;
-static inline void lapic_disable(void)
-{
- enable_local_apic = -1;
- clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
-}
-
-static inline void lapic_enable(void)
-{
- enable_local_apic = 1;
-}
-
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
@@ -42,6 +30,8 @@ static inline void lapic_enable(void)
} while (0)
+extern void generic_apic_probe(void);
+
#ifdef CONFIG_X86_LOCAL_APIC
/*
@@ -117,8 +107,6 @@ extern void enable_APIC_timer(void);
extern void enable_NMI_through_LVT0 (void * dummy);
-extern int disable_timer_pin_1;
-
void smp_send_timer_broadcast_ipi(struct pt_regs *regs);
void switch_APIC_timer_to_ipi(void *cpumask);
void switch_ipi_to_APIC_timer(void *cpumask);
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 89b8b82c82b..5874ef119ff 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -33,50 +33,99 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
}
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt[];
+extern struct desc_struct idt_table[];
+extern void set_intr_gate(unsigned int irq, void * addr);
+
+static inline void pack_descriptor(__u32 *a, __u32 *b,
+ unsigned long base, unsigned long limit, unsigned char type, unsigned char flags)
+{
+ *a = ((base & 0xffff) << 16) | (limit & 0xffff);
+ *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
+ (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20);
+}
+
+static inline void pack_gate(__u32 *a, __u32 *b,
+ unsigned long base, unsigned short seg, unsigned char type, unsigned char flags)
+{
+ *a = (seg << 16) | (base & 0xffff);
+ *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff);
+}
+
+#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */
+#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */
+#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */
+#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */
+#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */
+#define DESCTYPE_DPL3 0x60 /* DPL-3 */
+#define DESCTYPE_S 0x10 /* !system */
+
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
-#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
-#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
+#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr))
+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt))
#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
-#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
-#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
+#define store_tr(tr) __asm__ ("str %0":"=m" (tr))
+#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt))
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
-extern void set_intr_gate(unsigned int irq, void * addr);
+#if TLS_SIZE != 24
+# error update this code.
+#endif
-#define _set_tssldt_desc(n,addr,limit,type) \
-__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
- "movw %w1,2(%2)\n\t" \
- "rorl $16,%1\n\t" \
- "movb %b1,4(%2)\n\t" \
- "movb %4,5(%2)\n\t" \
- "movb $0,6(%2)\n\t" \
- "movb %h1,7(%2)\n\t" \
- "rorl $16,%1" \
- : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
-
-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
{
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
- offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
+#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
+ C(0); C(1); C(2);
+#undef C
}
-#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
+{
+ __u32 *lp = (__u32 *)((char *)dt + entry*8);
+ *lp = entry_a;
+ *(lp+1) = entry_b;
+}
+
+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
+static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
+{
+ __u32 a, b;
+ pack_gate(&a, &b, (unsigned long)addr, seg, type, 0);
+ write_idt_entry(idt_table, gate, a, b);
+}
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
{
- _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
+ __u32 a, b;
+ pack_descriptor(&a, &b, (unsigned long)addr,
+ offsetof(struct tss_struct, __cacheline_filler) - 1,
+ DESCTYPE_TSS, 0);
+ write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
}
+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
+{
+ __u32 a, b;
+ pack_descriptor(&a, &b, (unsigned long)addr,
+ entries * sizeof(struct desc_struct) - 1,
+ DESCTYPE_LDT, 0);
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+}
+
+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
#define LDT_entry_a(info) \
((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
@@ -102,24 +151,6 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
(info)->seg_not_present == 1 && \
(info)->useable == 0 )
-static inline void write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
-{
- __u32 *lp = (__u32 *)((char *)ldt + entry*8);
- *lp = entry_a;
- *(lp+1) = entry_b;
-}
-
-#if TLS_SIZE != 24
-# error update this code.
-#endif
-
-static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
-{
-#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
- C(0); C(1); C(2);
-#undef C
-}
-
static inline void clear_LDT(void)
{
int cpu = get_cpu();
diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h
index 2280f6272f8..6d66398a307 100644
--- a/include/asm-i386/dwarf2.h
+++ b/include/asm-i386/dwarf2.h
@@ -1,8 +1,6 @@
#ifndef _DWARF2_H
#define _DWARF2_H
-#include <linux/config.h>
-
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
@@ -28,6 +26,13 @@
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
+#define CFI_UNDEFINED .cfi_undefined
+
+#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
+#define CFI_SIGNAL_FRAME .cfi_signal_frame
+#else
+#define CFI_SIGNAL_FRAME
+#endif
#else
@@ -48,6 +53,8 @@
#define CFI_RESTORE ignore
#define CFI_REMEMBER_STATE ignore
#define CFI_RESTORE_STATE ignore
+#define CFI_UNDEFINED ignore
+#define CFI_SIGNAL_FRAME ignore
#endif
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index ca82acb8cb1..f7514fb6e8e 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -18,7 +18,7 @@
#define E820_RAM 1
#define E820_RESERVED 2
-#define E820_ACPI 3 /* usable as RAM once ACPI tables have been read */
+#define E820_ACPI 3
#define E820_NVS 4
#define HIGH_MEMORY (1024*1024)
diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i
new file mode 100644
index 00000000000..4d68ddce18b
--- /dev/null
+++ b/include/asm-i386/frame.i
@@ -0,0 +1,24 @@
+#include <linux/config.h>
+#include <asm/dwarf2.h>
+
+/* The annotation hides the frame from the unwinder and makes it look
+ like a ordinary ebp save/restore. This avoids some special cases for
+ frame pointer later */
+#ifdef CONFIG_FRAME_POINTER
+ .macro FRAME
+ pushl %ebp
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebp,0
+ movl %esp,%ebp
+ .endm
+ .macro ENDFRAME
+ popl %ebp
+ CFI_ADJUST_CFA_OFFSET -4
+ CFI_RESTORE ebp
+ .endm
+#else
+ .macro FRAME
+ .endm
+ .macro ENDFRAME
+ .endm
+#endif
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index b3783a32abe..8ffbb0f0745 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -1,6 +1,8 @@
#ifndef _ASM_GENAPIC_H
#define _ASM_GENAPIC_H 1
+#include <asm/mpspec.h>
+
/*
* Generic APIC driver interface.
*
@@ -63,14 +65,25 @@ struct genapic {
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
-
+
+#ifdef CONFIG_SMP
/* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
+#endif
};
-#define APICFUNC(x) .x = x
+#define APICFUNC(x) .x = x,
+
+/* More functions could be probably marked IPIFUNC and save some space
+ in UP GENERICARCH kernels, but I don't have the nerve right now
+ to untangle this mess. -AK */
+#ifdef CONFIG_SMP
+#define IPIFUNC(x) APICFUNC(x)
+#else
+#define IPIFUNC(x)
+#endif
#define APIC_INIT(aname, aprobe) { \
.name = aname, \
@@ -80,33 +93,33 @@ struct genapic {
.no_balance_irq = NO_BALANCE_IRQ, \
.ESR_DISABLE = esr_disable, \
.apic_destination_logical = APIC_DEST_LOGICAL, \
- APICFUNC(apic_id_registered), \
- APICFUNC(target_cpus), \
- APICFUNC(check_apicid_used), \
- APICFUNC(check_apicid_present), \
- APICFUNC(init_apic_ldr), \
- APICFUNC(ioapic_phys_id_map), \
- APICFUNC(clustered_apic_check), \
- APICFUNC(multi_timer_check), \
- APICFUNC(apicid_to_node), \
- APICFUNC(cpu_to_logical_apicid), \
- APICFUNC(cpu_present_to_apicid), \
- APICFUNC(apicid_to_cpu_present), \
- APICFUNC(mpc_apic_id), \
- APICFUNC(setup_portio_remap), \
- APICFUNC(check_phys_apicid_present), \
- APICFUNC(mpc_oem_bus_info), \
- APICFUNC(mpc_oem_pci_bus), \
- APICFUNC(mps_oem_check), \
- APICFUNC(get_apic_id), \
+ APICFUNC(apic_id_registered) \
+ APICFUNC(target_cpus) \
+ APICFUNC(check_apicid_used) \
+ APICFUNC(check_apicid_present) \
+ APICFUNC(init_apic_ldr) \
+ APICFUNC(ioapic_phys_id_map) \
+ APICFUNC(clustered_apic_check) \
+ APICFUNC(multi_timer_check) \
+ APICFUNC(apicid_to_node) \
+ APICFUNC(cpu_to_logical_apicid) \
+ APICFUNC(cpu_present_to_apicid) \
+ APICFUNC(apicid_to_cpu_present) \
+ APICFUNC(mpc_apic_id) \
+ APICFUNC(setup_portio_remap) \
+ APICFUNC(check_phys_apicid_present) \
+ APICFUNC(mpc_oem_bus_info) \
+ APICFUNC(mpc_oem_pci_bus) \
+ APICFUNC(mps_oem_check) \
+ APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \
- APICFUNC(cpu_mask_to_apicid), \
- APICFUNC(acpi_madt_oem_check), \
- APICFUNC(send_IPI_mask), \
- APICFUNC(send_IPI_allbutself), \
- APICFUNC(send_IPI_all), \
- APICFUNC(enable_apic_mode), \
- APICFUNC(phys_pkg_id), \
+ APICFUNC(cpu_mask_to_apicid) \
+ APICFUNC(acpi_madt_oem_check) \
+ IPIFUNC(send_IPI_mask) \
+ IPIFUNC(send_IPI_allbutself) \
+ IPIFUNC(send_IPI_all) \
+ APICFUNC(enable_apic_mode) \
+ APICFUNC(phys_pkg_id) \
}
extern struct genapic *genapic;
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h
index 134ea9cc528..b52cd60a075 100644
--- a/include/asm-i386/intel_arch_perfmon.h
+++ b/include/asm-i386/intel_arch_perfmon.h
@@ -14,6 +14,18 @@
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
+ (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
+
+union cpuid10_eax {
+ struct {
+ unsigned int version_id:8;
+ unsigned int num_counters:8;
+ unsigned int bit_width:8;
+ unsigned int mask_length:8;
+ } split;
+ unsigned int full;
+};
#endif /* X86_INTEL_ARCH_PERFMON_H */
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 5092e819b8a..5d309275a1d 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -188,6 +188,16 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
/* 1 if "noapic" boot option passed */
extern int skip_ioapic_setup;
+static inline void disable_ioapic_setup(void)
+{
+ skip_ioapic_setup = 1;
+}
+
+static inline int ioapic_setup_disabled(void)
+{
+ return skip_ioapic_setup;
+}
+
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -206,6 +216,7 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
+static inline void disable_ioapic_setup(void) { }
#endif
extern int assign_irq_vector(int irq);
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index 53f0e06672d..4dfc9f5ed03 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -1,6 +1,26 @@
#ifndef _I386_KEXEC_H
#define _I386_KEXEC_H
+#define PA_CONTROL_PAGE 0
+#define VA_CONTROL_PAGE 1
+#define PA_PGD 2
+#define VA_PGD 3
+#define PA_PTE_0 4
+#define VA_PTE_0 5
+#define PA_PTE_1 6
+#define VA_PTE_1 7
+#ifdef CONFIG_X86_PAE
+#define PA_PMD_0 8
+#define VA_PMD_0 9
+#define PA_PMD_1 10
+#define VA_PMD_1 11
+#define PAGES_NR 12
+#else
+#define PAGES_NR 8
+#endif
+
+#ifndef __ASSEMBLY__
+
#include <asm/fixmap.h>
#include <asm/ptrace.h>
#include <asm/string.h>
@@ -72,5 +92,12 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
newregs->eip = (unsigned long)current_text_addr();
}
}
+asmlinkage NORET_TYPE void
+relocate_kernel(unsigned long indirection_page,
+ unsigned long control_page,
+ unsigned long start_address,
+ unsigned int has_pae) ATTRIB_NORET;
+
+#endif /* __ASSEMBLY__ */
#endif /* _I386_KEXEC_H */
diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h
index b5f3f0d0b2b..26333685a7f 100644
--- a/include/asm-i386/mach-es7000/mach_apic.h
+++ b/include/asm-i386/mach-es7000/mach_apic.h
@@ -123,9 +123,13 @@ extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
+#ifdef CONFIG_SMP
if (cpu >= NR_CPUS)
return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu];
+#else
+ return logical_smp_processor_id();
+#endif
}
static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused)
diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h
index 9fd07328628..a81b0596159 100644
--- a/include/asm-i386/mach-summit/mach_apic.h
+++ b/include/asm-i386/mach-summit/mach_apic.h
@@ -46,10 +46,12 @@ extern u8 cpu_2_logical_apicid[];
static inline void init_apic_ldr(void)
{
unsigned long val, id;
- int i, count;
- u8 lid;
+ int count = 0;
u8 my_id = (u8)hard_smp_processor_id();
u8 my_cluster = (u8)apicid_cluster(my_id);
+#ifdef CONFIG_SMP
+ u8 lid;
+ int i;
/* Create logical APIC IDs by counting CPUs already in cluster. */
for (count = 0, i = NR_CPUS; --i >= 0; ) {
@@ -57,6 +59,7 @@ static inline void init_apic_ldr(void)
if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
++count;
}
+#endif
/* We only have a 4 wide bitmap in cluster mode. If a deranged
* BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
@@ -91,9 +94,13 @@ static inline int apicid_to_node(int logical_apicid)
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
+#ifdef CONFIG_SMP
if (cpu >= NR_CPUS)
return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu];
+#else
+ return logical_smp_processor_id();
+#endif
}
static inline int cpu_present_to_apicid(int mps_cpu)
diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h
index 05a53853122..7a17d9e58ad 100644
--- a/include/asm-i386/mutex.h
+++ b/include/asm-i386/mutex.h
@@ -30,14 +30,10 @@ do { \
\
__asm__ __volatile__( \
LOCK_PREFIX " decl (%%eax) \n" \
- " js 2f \n" \
+ " jns 1f \n" \
+ " call "#fail_fn" \n" \
"1: \n" \
\
- LOCK_SECTION_START("") \
- "2: call "#fail_fn" \n" \
- " jmp 1b \n" \
- LOCK_SECTION_END \
- \
:"=a" (dummy) \
: "a" (count) \
: "memory", "ecx", "edx"); \
@@ -86,14 +82,10 @@ do { \
\
__asm__ __volatile__( \
LOCK_PREFIX " incl (%%eax) \n" \
- " jle 2f \n" \
+ " jg 1f \n" \
+ " call "#fail_fn" \n" \
"1: \n" \
\
- LOCK_SECTION_START("") \
- "2: call "#fail_fn" \n" \
- " jmp 1b \n" \
- LOCK_SECTION_END \
- \
:"=a" (dummy) \
: "a" (count) \
: "memory", "ecx", "edx"); \
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 67d99479999..303bcd4592b 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -6,32 +6,29 @@
#include <linux/pm.h>
-struct pt_regs;
-
-typedef int (*nmi_callback_t)(struct pt_regs * regs, int cpu);
-
/**
- * set_nmi_callback
+ * do_nmi_callback
*
- * Set a handler for an NMI. Only one handler may be
- * set. Return 1 if the NMI was handled.
+ * Check to see if a callback exists and execute it. Return 1
+ * if the handler exists and was handled successfully.
*/
-void set_nmi_callback(nmi_callback_t callback);
-
-/**
- * unset_nmi_callback
- *
- * Remove the handler previously set.
- */
-void unset_nmi_callback(void);
-
-extern void setup_apic_nmi_watchdog (void);
-extern int reserve_lapic_nmi(void);
-extern void release_lapic_nmi(void);
+int do_nmi_callback(struct pt_regs *regs, int cpu);
+
+extern int nmi_watchdog_enabled;
+extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
+extern int avail_to_resrv_perfctr_nmi(unsigned int);
+extern int reserve_perfctr_nmi(unsigned int);
+extern void release_perfctr_nmi(unsigned int);
+extern int reserve_evntsel_nmi(unsigned int);
+extern void release_evntsel_nmi(unsigned int);
+
+extern void setup_apic_nmi_watchdog (void *);
+extern void stop_apic_nmi_watchdog (void *);
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
-extern void nmi_watchdog_tick (struct pt_regs * regs);
+extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
+extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
#define NMI_DEFAULT -1
#define NMI_NONE 0
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 0dc051a8078..541b3e23433 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -411,8 +411,6 @@ extern pte_t *lookup_address(unsigned long address);
static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
#endif
-extern void noexec_setup(const char *str);
-
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index 1910880fcd4..a4a0e5207db 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -27,6 +27,7 @@ struct pt_regs {
#ifdef __KERNEL__
#include <asm/vm86.h>
+#include <asm/segment.h>
struct task_struct;
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
@@ -40,18 +41,14 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int erro
*/
static inline int user_mode(struct pt_regs *regs)
{
- return (regs->xcs & 3) != 0;
+ return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
}
static inline int user_mode_vm(struct pt_regs *regs)
{
- return ((regs->xcs & 3) | (regs->eflags & VM_MASK)) != 0;
+ return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
}
#define instruction_pointer(regs) ((regs)->eip)
-#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
#endif /* __KERNEL__ */
#endif
diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h
index 87c069ccba0..c3e5db32fa4 100644
--- a/include/asm-i386/rwlock.h
+++ b/include/asm-i386/rwlock.h
@@ -20,52 +20,6 @@
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
-#define __build_read_lock_ptr(rw, helper) \
- asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" \
- "jns 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
-
-#define __build_read_lock_const(rw, helper) \
- asm volatile(LOCK_PREFIX " subl $1,%0\n\t" \
- "jns 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"+m" (*(volatile int *)rw) : : "memory")
-
-#define __build_read_lock(rw, helper) do { \
- if (__builtin_constant_p(rw)) \
- __build_read_lock_const(rw, helper); \
- else \
- __build_read_lock_ptr(rw, helper); \
- } while (0)
-
-#define __build_write_lock_ptr(rw, helper) \
- asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \
- "jz 1f\n" \
- "call " helper "\n\t" \
- "1:\n" \
- ::"a" (rw) : "memory")
-
-#define __build_write_lock_const(rw, helper) \
- asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",%0\n\t" \
- "jz 1f\n" \
- "pushl %%eax\n\t" \
- "leal %0,%%eax\n\t" \
- "call " helper "\n\t" \
- "popl %%eax\n\t" \
- "1:\n" \
- :"+m" (*(volatile int *)rw) : : "memory")
-
-#define __build_write_lock(rw, helper) do { \
- if (__builtin_constant_p(rw)) \
- __build_write_lock_const(rw, helper); \
- else \
- __build_write_lock_ptr(rw, helper); \
- } while (0)
+/* Code is in asm-i386/spinlock.h */
#endif
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index 43113f5608e..bc598d6388e 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -99,17 +99,9 @@ static inline void __down_read(struct rw_semaphore *sem)
__asm__ __volatile__(
"# beginning down_read\n\t"
LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */
- " js 2f\n\t" /* jump if we weren't granted the lock */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
"1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " pushl %%ecx\n\t"
- " pushl %%edx\n\t"
- " call rwsem_down_read_failed\n\t"
- " popl %%edx\n\t"
- " popl %%ecx\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
"# ending down_read\n\t"
: "+m" (sem->count)
: "a" (sem)
@@ -151,15 +143,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
"# beginning down_write\n\t"
LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */
" testl %%edx,%%edx\n\t" /* was the count 0 before? */
- " jnz 2f\n\t" /* jump if we weren't granted the lock */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " pushl %%ecx\n\t"
- " call rwsem_down_write_failed\n\t"
- " popl %%ecx\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
+ " jz 1f\n"
+ " call call_rwsem_down_write_failed\n"
+ "1:\n"
"# ending down_write"
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (tmp)
@@ -193,17 +179,9 @@ static inline void __up_read(struct rw_semaphore *sem)
__asm__ __volatile__(
"# beginning __up_read\n\t"
LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */
- " js 2f\n\t" /* jump if the lock is being waited upon */
- "1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " decw %%dx\n\t" /* do nothing if still outstanding active readers */
- " jnz 1b\n\t"
- " pushl %%ecx\n\t"
- " call rwsem_wake\n\t"
- " popl %%ecx\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n"
+ "1:\n"
"# ending __up_read\n"
: "+m" (sem->count), "=d" (tmp)
: "a" (sem), "1" (tmp)
@@ -219,17 +197,9 @@ static inline void __up_write(struct rw_semaphore *sem)
"# beginning __up_write\n\t"
" movl %2,%%edx\n\t"
LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
- " jnz 2f\n\t" /* jump if the lock is being waited upon */
+ " jz 1f\n"
+ " call call_rwsem_wake\n"
"1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " decw %%dx\n\t" /* did the active count reduce to 0? */
- " jnz 1b\n\t" /* jump back if not */
- " pushl %%ecx\n\t"
- " call rwsem_wake\n\t"
- " popl %%ecx\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
"# ending __up_write\n"
: "+m" (sem->count)
: "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS)
@@ -244,17 +214,9 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
__asm__ __volatile__(
"# beginning __downgrade_write\n\t"
LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
- " js 2f\n\t" /* jump if the lock is being waited upon */
+ " jns 1f\n\t"
+ " call call_rwsem_downgrade_wake\n"
"1:\n\t"
- LOCK_SECTION_START("")
- "2:\n\t"
- " pushl %%ecx\n\t"
- " pushl %%edx\n\t"
- " call rwsem_downgrade_wake\n\t"
- " popl %%edx\n\t"
- " popl %%ecx\n\t"
- " jmp 1b\n"
- LOCK_SECTION_END
"# ending __downgrade_write\n"
: "+m" (sem->count)
: "a" (sem), "i" (-RWSEM_WAITING_BIAS)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index faf995307b9..b7ab59685ba 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -83,6 +83,11 @@
#define GDT_SIZE (GDT_ENTRIES * 8)
+/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
+#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
+/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
+
/* Simple and small GDT entries for booting only */
#define GDT_ENTRY_BOOT_CS 2
@@ -112,4 +117,16 @@
*/
#define IDT_ENTRIES 256
+/* Bottom two bits of selector give the ring privilege level */
+#define SEGMENT_RPL_MASK 0x3
+/* Bit 2 is table indicator (LDT/GDT) */
+#define SEGMENT_TI_MASK 0x4
+
+/* User mode is privilege level 3 */
+#define USER_RPL 0x3
+/* LDT segment has TI set, GDT has it cleared */
+#define SEGMENT_LDT 0x4
+#define SEGMENT_GDT 0x0
+
+#define get_kernel_rpl() 0
#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index d51e800acf2..e63b6a68f04 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -100,13 +100,10 @@ static inline void down(struct semaphore * sem)
__asm__ __volatile__(
"# atomic down operation\n\t"
LOCK_PREFIX "decl %0\n\t" /* --sem->count */
- "js 2f\n"
- "1:\n"
- LOCK_SECTION_START("")
- "2:\tlea %0,%%eax\n\t"
- "call __down_failed\n\t"
- "jmp 1b\n"
- LOCK_SECTION_END
+ "jns 2f\n"
+ "\tlea %0,%%eax\n\t"
+ "call __down_failed\n"
+ "2:"
:"+m" (sem->count)
:
:"memory","ax");
@@ -123,15 +120,12 @@ static inline int down_interruptible(struct semaphore * sem)
might_sleep();
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "js 2f\n\t"
- "xorl %0,%0\n"
- "1:\n"
- LOCK_SECTION_START("")
- "2:\tlea %1,%%eax\n\t"
- "call __down_failed_interruptible\n\t"
- "jmp 1b\n"
- LOCK_SECTION_END
+ "jns 2f\n\t"
+ "lea %1,%%eax\n\t"
+ "call __down_failed_interruptible\n"
+ "2:"
:"=a" (result), "+m" (sem->count)
:
:"memory");
@@ -148,15 +142,12 @@ static inline int down_trylock(struct semaphore * sem)
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
+ "xorl %0,%0\n\t"
LOCK_PREFIX "decl %1\n\t" /* --sem->count */
- "js 2f\n\t"
- "xorl %0,%0\n"
- "1:\n"
- LOCK_SECTION_START("")
- "2:\tlea %1,%%eax\n\t"
+ "jns 2f\n\t"
+ "lea %1,%%eax\n\t"
"call __down_failed_trylock\n\t"
- "jmp 1b\n"
- LOCK_SECTION_END
+ "2:\n"
:"=a" (result), "+m" (sem->count)
:
:"memory");
@@ -166,22 +157,16 @@ static inline int down_trylock(struct semaphore * sem)
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
*/
static inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
- "jle 2f\n"
- "1:\n"
- LOCK_SECTION_START("")
- "2:\tlea %0,%%eax\n\t"
- "call __up_wakeup\n\t"
- "jmp 1b\n"
- LOCK_SECTION_END
- ".subsection 0\n"
+ "jg 1f\n\t"
+ "lea %0,%%eax\n\t"
+ "call __up_wakeup\n"
+ "1:"
:"+m" (sem->count)
:
:"memory","ax");
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 142d10e34ad..32ac8c91d5c 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -80,17 +80,12 @@ static inline int hard_smp_processor_id(void)
return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
}
#endif
-
-static __inline int logical_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
-}
-
#endif
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
+extern unsigned int num_processors;
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_SMP */
@@ -100,4 +95,15 @@ extern void __cpu_die(unsigned int cpu);
#define NO_PROC_ID 0xFF /* No processor magic marker */
#endif
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_LOCAL_APIC
+static __inline int logical_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
+}
+#endif
+#endif
+
#endif
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index d1020363c41..b0b3043f05e 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -4,8 +4,12 @@
#include <asm/atomic.h>
#include <asm/rwlock.h>
#include <asm/page.h>
+#include <asm/processor.h>
#include <linux/compiler.h>
+#define CLI_STRING "cli"
+#define STI_STRING "sti"
+
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
@@ -17,67 +21,64 @@
* (the type definitions are in asm/spinlock_types.h)
*/
-#define __raw_spin_is_locked(x) \
- (*(volatile signed char *)(&(x)->slock) <= 0)
-
-#define __raw_spin_lock_string \
- "\n1:\t" \
- LOCK_PREFIX " ; decb %0\n\t" \
- "jns 3f\n" \
- "2:\t" \
- "rep;nop\n\t" \
- "cmpb $0,%0\n\t" \
- "jle 2b\n\t" \
- "jmp 1b\n" \
- "3:\n\t"
-
-/*
- * NOTE: there's an irqs-on section here, which normally would have to be
- * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
- * __raw_spin_lock_string_flags().
- */
-#define __raw_spin_lock_string_flags \
- "\n1:\t" \
- LOCK_PREFIX " ; decb %0\n\t" \
- "jns 5f\n" \
- "2:\t" \
- "testl $0x200, %1\n\t" \
- "jz 4f\n\t" \
- "sti\n" \
- "3:\t" \
- "rep;nop\n\t" \
- "cmpb $0, %0\n\t" \
- "jle 3b\n\t" \
- "cli\n\t" \
- "jmp 1b\n" \
- "4:\t" \
- "rep;nop\n\t" \
- "cmpb $0, %0\n\t" \
- "jg 1b\n\t" \
- "jmp 4b\n" \
- "5:\n\t"
+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+{
+ return *(volatile signed char *)(&(x)->slock) <= 0;
+}
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- asm(__raw_spin_lock_string : "+m" (lock->slock) : : "memory");
+ asm volatile("\n1:\t"
+ LOCK_PREFIX " ; decb %0\n\t"
+ "jns 3f\n"
+ "2:\t"
+ "rep;nop\n\t"
+ "cmpb $0,%0\n\t"
+ "jle 2b\n\t"
+ "jmp 1b\n"
+ "3:\n\t"
+ : "+m" (lock->slock) : : "memory");
}
/*
* It is easier for the lock validator if interrupts are not re-enabled
* in the middle of a lock-acquire. This is a performance feature anyway
* so we turn it off:
+ *
+ * NOTE: there's an irqs-on section here, which normally would have to be
+ * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
*/
#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
- asm(__raw_spin_lock_string_flags : "+m" (lock->slock) : "r" (flags) : "memory");
+ asm volatile(
+ "\n1:\t"
+ LOCK_PREFIX " ; decb %0\n\t"
+ "jns 5f\n"
+ "2:\t"
+ "testl $0x200, %1\n\t"
+ "jz 4f\n\t"
+ STI_STRING "\n"
+ "3:\t"
+ "rep;nop\n\t"
+ "cmpb $0, %0\n\t"
+ "jle 3b\n\t"
+ CLI_STRING "\n\t"
+ "jmp 1b\n"
+ "4:\t"
+ "rep;nop\n\t"
+ "cmpb $0, %0\n\t"
+ "jg 1b\n\t"
+ "jmp 4b\n"
+ "5:\n\t"
+ : "+m" (lock->slock) : "r" (flags) : "memory");
}
#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
char oldval;
- __asm__ __volatile__(
+ asm volatile(
"xchgb %b0,%1"
:"=q" (oldval), "+m" (lock->slock)
:"0" (0) : "memory");
@@ -93,38 +94,29 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
-#define __raw_spin_unlock_string \
- "movb $1,%0" \
- :"+m" (lock->slock) : : "memory"
-
-
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(
- __raw_spin_unlock_string
- );
+ asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
}
#else
-#define __raw_spin_unlock_string \
- "xchgb %b0, %1" \
- :"=q" (oldval), "+m" (lock->slock) \
- :"0" (oldval) : "memory"
-
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
char oldval = 1;
- __asm__ __volatile__(
- __raw_spin_unlock_string
- );
+ asm volatile("xchgb %b0, %1"
+ : "=q" (oldval), "+m" (lock->slock)
+ : "0" (oldval) : "memory");
}
#endif
-#define __raw_spin_unlock_wait(lock) \
- do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+{
+ while (__raw_spin_is_locked(lock))
+ cpu_relax();
+}
/*
* Read-write spinlocks, allowing multiple readers
@@ -151,22 +143,36 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_read_can_lock(x) ((int)(x)->lock > 0)
+static inline int __raw_read_can_lock(raw_rwlock_t *x)
+{
+ return (int)(x)->lock > 0;
+}
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+static inline int __raw_write_can_lock(raw_rwlock_t *x)
+{
+ return (x)->lock == RW_LOCK_BIAS;
+}
static inline void __raw_read_lock(raw_rwlock_t *rw)
{
- __build_read_lock(rw, "__read_lock_failed");
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+ ::"a" (rw) : "memory");
}
static inline void __raw_write_lock(raw_rwlock_t *rw)
{
- __build_write_lock(rw, "__write_lock_failed");
+ asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+ ::"a" (rw) : "memory");
}
static inline int __raw_read_trylock(raw_rwlock_t *lock)
diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h
new file mode 100644
index 00000000000..7d1f6a5cbfc
--- /dev/null
+++ b/include/asm-i386/stacktrace.h
@@ -0,0 +1 @@
+#include <asm-x86_64/stacktrace.h>
diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h
new file mode 100644
index 00000000000..399bf6026b1
--- /dev/null
+++ b/include/asm-i386/therm_throt.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_I386_THERM_THROT_H__
+#define __ASM_I386_THERM_THROT_H__ 1
+
+#include <asm/atomic.h>
+
+extern atomic_t therm_throt_en;
+int therm_throt_process(int curr);
+
+#endif /* __ASM_I386_THERM_THROT_H__ */
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index d57ca5c540b..360648b0f2b 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -36,8 +36,6 @@
: "memory"); \
} while (0)
-extern unsigned long pgkern_mask;
-
# define __flush_tlb_all() \
do { \
if (cpu_has_pge) \
@@ -49,7 +47,7 @@ extern unsigned long pgkern_mask;
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#define __flush_tlb_single(addr) \
- __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+ __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 97b828ce31e..c13933185c1 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -6,7 +6,6 @@
#ifndef _ASM_i386_TSC_H
#define _ASM_i386_TSC_H
-#include <linux/config.h>
#include <asm/processor.h>
/*
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index fc1c8ddae14..565d0897b20 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -323,10 +323,11 @@
#define __NR_tee 315
#define __NR_vmsplice 316
#define __NR_move_pages 317
+#define __NR_getcpu 318
#ifdef __KERNEL__
-#define NR_syscalls 318
+#define NR_syscalls 319
/*
* user-visible error numbers are in the range -1 - -128: see
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 4c1a0b96856..5031d693b89 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -18,6 +18,7 @@ struct unwind_frame_info
{
struct pt_regs regs;
struct task_struct *task;
+ unsigned call_frame:1;
};
#define UNW_PC(frame) (frame)->regs.eip
@@ -28,6 +29,8 @@ struct unwind_frame_info
#define FRAME_LINK_OFFSET 0
#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.esp0)
#define STACK_TOP(tsk) ((tsk)->thread.esp0)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
#endif
#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
@@ -42,6 +45,10 @@ struct unwind_frame_info
PTREGS_INFO(edi), \
PTREGS_INFO(eip)
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && \
+ !((raItem).value * (dataAlign) + 4))
+
static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
/*const*/ struct pt_regs *regs)
{
@@ -88,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
#define UNW_PC(frame) ((void)(frame), 0)
#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
static inline int arch_unw_user_mode(const void *info)
{