summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/Kbuild1
-rw-r--r--include/asm-x86_64/atomic.h65
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/const.h20
-rw-r--r--include/asm-x86_64/kdebug.h25
-rw-r--r--include/asm-x86_64/kexec.h2
-rw-r--r--include/asm-x86_64/local.h196
-rw-r--r--include/asm-x86_64/page.h9
-rw-r--r--include/asm-x86_64/pgtable.h17
-rw-r--r--include/asm-x86_64/serial.h16
-rw-r--r--include/asm-x86_64/system.h96
-rw-r--r--include/asm-x86_64/termbits.h2
-rw-r--r--include/asm-x86_64/unistd.h2
13 files changed, 363 insertions, 222 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 89ad1fc27c8..75a2deffca6 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -19,4 +19,3 @@ unifdef-y += mce.h
unifdef-y += msr.h
unifdef-y += mtrr.h
unifdef-y += vsyscall.h
-unifdef-y += const.h
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 706ca4b6000..f2e64634fa4 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
#define __ARCH_X86_64_ATOMIC__
#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
/* atomic_t should be 32 bit signed type */
@@ -375,8 +376,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
long __i = i;
__asm__ __volatile__(
LOCK_PREFIX "xaddq %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}
@@ -388,7 +389,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -400,22 +404,49 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
new file mode 100644
index 00000000000..09a6b6b6b74
--- /dev/null
+++ b/include/asm-x86_64/cmpxchg.h
@@ -0,0 +1,134 @@
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <asm/alternative.h> /* Provides LOCK_PREFIX */
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define __xg(x) ((volatile long *)(x))
+
+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+{
+ *ptr = val;
+}
+
+#define _set_64bit set_64bit
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__("cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#endif
diff --git a/include/asm-x86_64/const.h b/include/asm-x86_64/const.h
deleted file mode 100644
index 54fb08f3db9..00000000000
--- a/include/asm-x86_64/const.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* const.h: Macros for dealing with constants. */
-
-#ifndef _X86_64_CONST_H
-#define _X86_64_CONST_H
-
-/* Some constant macros are used in both assembler and
- * C code. Therefore we cannot annotate them always with
- * 'UL' and other type specificers unilaterally. We
- * use the following macros to deal with this.
- */
-
-#ifdef __ASSEMBLY__
-#define _AC(X,Y) X
-#else
-#define __AC(X,Y) (X##Y)
-#define _AC(X,Y) __AC(X,Y)
-#endif
-
-
-#endif /* !(_X86_64_CONST_H) */
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index 2b0c088e295..74feae945a2 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,19 +5,8 @@
struct pt_regs;
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
-extern struct atomic_notifier_head die_chain;
/* Grossly misnamed. */
enum die_val {
@@ -33,22 +22,10 @@ enum die_val {
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
+ DIE_NMI_POST,
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&die_chain, val, &args);
-}
-
extern void printk_address(unsigned long address);
extern void die(const char *,struct pt_regs *,long);
extern void __die(const char *,struct pt_regs *,long);
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 5fab957e109..738e581b67f 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -48,8 +48,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_X86_64
-#define MAX_NOTE_BYTES 1024
-
/*
* Saving the registers of the cpu on which panic occured in
* crash_kexec to save a valid sp. The registers of other cpus
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index e769e620022..e87492bb069 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -2,49 +2,183 @@
#define _ARCH_X8664_LOCAL_H
#include <linux/percpu.h>
+#include <asm/atomic.h>
typedef struct
{
- volatile long counter;
+ atomic_long_t a;
} local_t;
-#define LOCAL_INIT(i) { (i) }
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define local_read(v) ((v)->counter)
-#define local_set(v,i) (((v)->counter) = (i))
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-static inline void local_inc(local_t *v)
+static inline void local_inc(local_t *l)
{
__asm__ __volatile__(
"incq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"=m" (l->a.counter)
+ :"m" (l->a.counter));
}
-static inline void local_dec(local_t *v)
+static inline void local_dec(local_t *l)
{
__asm__ __volatile__(
"decq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"=m" (l->a.counter)
+ :"m" (l->a.counter));
}
-static inline void local_add(long i, local_t *v)
+static inline void local_add(long i, local_t *l)
{
__asm__ __volatile__(
"addq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"=m" (l->a.counter)
+ :"ir" (i), "m" (l->a.counter));
}
-static inline void local_sub(long i, local_t *v)
+static inline void local_sub(long i, local_t *l)
{
__asm__ __volatile__(
"subq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"=m" (l->a.counter)
+ :"ir" (i), "m" (l->a.counter));
}
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer to type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_sub_and_test(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "subq %2,%0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"ir" (i), "m" (l->a.counter) : "memory");
+ return c;
+}
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer to type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __inline__ int local_dec_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "decq %0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"m" (l->a.counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer to type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_inc_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "incq %0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"m" (l->a.counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * local_add_negative - add and test if negative
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __inline__ int local_add_negative(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "addq %2,%0; sets %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"ir" (i), "m" (l->a.counter) : "memory");
+ return c;
+}
+
+/**
+ * local_add_return - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static __inline__ long local_add_return(long i, local_t *l)
+{
+ long __i = i;
+ __asm__ __volatile__(
+ "xaddq %0, %1;"
+ :"+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+}
+
+static __inline__ long local_sub_return(long i, local_t *l)
+{
+ return local_add_return(-i,l);
+}
+
+#define local_inc_return(l) (local_add_return(1,l))
+#define local_dec_return(l) (local_sub_return(1,l))
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+/* Always has a lock prefix */
+#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+/**
+ * atomic_up_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = local_cmpxchg((l), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
/* On x86-64 these are better than the atomic variants on SMP kernels
because they dont use a lock prefix. */
#define __local_inc(l) local_inc(l)
@@ -62,27 +196,27 @@ static inline void local_sub(long i, local_t *v)
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(v) \
+#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
- res__ = (v); \
+ res__ = (l); \
preempt_enable(); \
res__; })
-#define cpu_local_wrap(v) \
+#define cpu_local_wrap(l) \
({ preempt_disable(); \
- v; \
+ l; \
preempt_enable(); }) \
-#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
-#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
-#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
-#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
-#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
-#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-#define __cpu_local_inc(v) cpu_local_inc(v)
-#define __cpu_local_dec(v) cpu_local_dec(v)
-#define __cpu_local_add(i, v) cpu_local_add((i), (v))
-#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-#endif /* _ARCH_I386_LOCAL_H */
+#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 4d04e247956..dee632fa457 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,7 +1,7 @@
#ifndef _X86_64_PAGE_H
#define _X86_64_PAGE_H
-#include <asm/const.h>
+#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
@@ -79,9 +79,10 @@ extern unsigned long phys_base;
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
+
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map 0xffffffff80000000
-#define __PAGE_OFFSET 0xffff810000000000
+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
@@ -93,7 +94,7 @@ extern unsigned long phys_base;
#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
#define KERNEL_TEXT_SIZE (40*1024*1024)
-#define KERNEL_TEXT_START 0xffffffff80000000
+#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
#define PAGE_OFFSET __PAGE_OFFSET
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index da3390faaea..08b9831f2e1 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,7 +1,7 @@
#ifndef _X86_64_PGTABLE_H
#define _X86_64_PGTABLE_H
-#include <asm/const.h>
+#include <linux/const.h>
#ifndef __ASSEMBLY__
/*
@@ -134,11 +134,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
#define FIRST_USER_ADDRESS 0
-#define MAXMEM 0x3fffffffffff
-#define VMALLOC_START 0xffffc20000000000
-#define VMALLOC_END 0xffffe1ffffffffff
-#define MODULES_VADDR 0xffffffff88000000
-#define MODULES_END 0xfffffffffff00000
+#define MAXMEM _AC(0x3fffffffffff, UL)
+#define VMALLOC_START _AC(0xffffc20000000000, UL)
+#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
+#define MODULES_VADDR _AC(0xffffffff88000000, UL)
+#define MODULES_END _AC(0xfffffffffff00000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define _PAGE_BIT_PRESENT 0
@@ -411,17 +411,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
-void vmalloc_sync_all(void);
extern int kern_addr_valid(unsigned long addr);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define HAVE_ARCH_UNMAPPED_AREA
#define pgtable_cache_init() do { } while (0)
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
index b0496e0d72a..8ebd765c674 100644
--- a/include/asm-x86_64/serial.h
+++ b/include/asm-x86_64/serial.h
@@ -11,19 +11,3 @@
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 213b7fe5d99..b7b8021e8c4 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <asm/segment.h>
-#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
#ifdef __KERNEL__
@@ -124,100 +124,6 @@ static inline void sched_cacheflush(void)
#define nop() __asm__ __volatile__ ("nop")
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __xg(x) ((volatile long *)(x))
-
-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-{
- *ptr = val;
-}
-
-#define _set_64bit set_64bit
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
index 6cfc3bb10c1..7405756dd41 100644
--- a/include/asm-x86_64/termbits.h
+++ b/include/asm-x86_64/termbits.h
@@ -160,7 +160,7 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
-#define IBSHIFT 8 /* Shift from CBAUD to CIBAUD */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 26e23e01c54..595703949df 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -619,6 +619,8 @@ __SYSCALL(__NR_sync_file_range, sys_sync_file_range)
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_move_pages 279
__SYSCALL(__NR_move_pages, sys_move_pages)
+#define __NR_utimensat 280
+__SYSCALL(__NR_utimensat, sys_utimensat)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR