summaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/apicdef.h1
-rw-r--r--include/asm-x86_64/atomic.h8
-rw-r--r--include/asm-x86_64/bitops.h42
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/dmi.h27
-rw-r--r--include/asm-x86_64/elf.h4
-rw-r--r--include/asm-x86_64/floppy.h2
-rw-r--r--include/asm-x86_64/futex.h27
-rw-r--r--include/asm-x86_64/io.h38
-rw-r--r--include/asm-x86_64/kdebug.h23
-rw-r--r--include/asm-x86_64/local.h18
-rw-r--r--include/asm-x86_64/mmu_context.h6
-rw-r--r--include/asm-x86_64/mmzone.h13
-rw-r--r--include/asm-x86_64/numa.h4
-rw-r--r--include/asm-x86_64/page.h3
-rw-r--r--include/asm-x86_64/pda.h2
-rw-r--r--include/asm-x86_64/percpu.h7
-rw-r--r--include/asm-x86_64/pgalloc.h28
-rw-r--r--include/asm-x86_64/pgtable.h14
-rw-r--r--include/asm-x86_64/poll.h1
-rw-r--r--include/asm-x86_64/processor.h7
-rw-r--r--include/asm-x86_64/proto.h3
-rw-r--r--include/asm-x86_64/smp.h1
-rw-r--r--include/asm-x86_64/string.h17
-rw-r--r--include/asm-x86_64/suspend.h4
-rw-r--r--include/asm-x86_64/system.h6
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/unistd.h10
28 files changed, 172 insertions, 148 deletions
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h
index decaa2d540e..5a48e9bcf21 100644
--- a/include/asm-x86_64/apicdef.h
+++ b/include/asm-x86_64/apicdef.h
@@ -39,6 +39,7 @@
#define APIC_SPIV_FOCUS_DISABLED (1<<9)
#define APIC_SPIV_APIC_ENABLED (1<<8)
#define APIC_ISR 0x100
+#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 4b5cd553e77..cecbf7baa6a 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
({ \
int c, old; \
c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
c = old; \
+ } \
c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h
index eb4df23e1e4..79212128d0f 100644
--- a/include/asm-x86_64/bitops.h
+++ b/include/asm-x86_64/bitops.h
@@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word)
#ifdef __KERNEL__
-static inline int sched_find_first_bit(const unsigned long *b)
-{
- if (b[0])
- return __ffs(b[0]);
- if (b[1])
- return __ffs(b[1]) + 64;
- return __ffs(b[2]) + 128;
-}
+#include <asm-generic/bitops/sched.h>
/**
* ffs - find first bit set
@@ -412,43 +405,20 @@ static __inline__ int fls(int x)
return r+1;
}
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-#define hweight64(x) generic_hweight64(x)
-#define hweight32(x) generic_hweight32(x)
-#define hweight16(x) generic_hweight16(x)
-#define hweight8(x) generic_hweight8(x)
+#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#ifdef __KERNEL__
-#define ext2_set_bit(nr,addr) \
- __test_and_set_bit((nr),(unsigned long*)addr)
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
#define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr),(unsigned long*)addr)
-#define ext2_clear_bit(nr, addr) \
- __test_and_clear_bit((nr),(unsigned long*)addr)
#define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr),(unsigned long*)addr)
-#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr)
-#define ext2_find_first_zero_bit(addr, size) \
- find_first_zero_bit((unsigned long*)addr, size)
-#define ext2_find_next_zero_bit(addr, size, off) \
- find_next_zero_bit((unsigned long*)addr, size, off)
-
-/* Bitmap functions for the minix filesystem. */
-#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr)
-#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr)
-#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr)
-#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr)
-#define minix_find_first_zero_bit(addr,size) \
- find_first_zero_bit((void*)addr,size)
+
+#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index 263f0a211ed..c8043a16152 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -20,6 +20,8 @@
__attribute__((__section__(".data.page_aligned")))
#endif
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
#endif
#endif
diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h
new file mode 100644
index 00000000000..93b2b15d432
--- /dev/null
+++ b/include/asm-x86_64/dmi.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_DMI_H
+#define _ASM_DMI_H 1
+
+#include <asm/io.h>
+
+extern void *dmi_ioremap(unsigned long addr, unsigned long size);
+extern void dmi_iounmap(void *addr, unsigned long size);
+
+#define DMI_MAX_DATA 2048
+
+extern int dmi_alloc_index;
+extern char dmi_alloc_data[DMI_MAX_DATA];
+
+/* This is so early that there is no good way to allocate dynamic memory.
+ Allocate data in an BSS array. */
+static inline void *dmi_alloc(unsigned len)
+{
+ int idx = dmi_alloc_index;
+ if ((dmi_alloc_index += len) > DMI_MAX_DATA)
+ return NULL;
+ return dmi_alloc_data + idx;
+}
+
+#define dmi_ioremap early_ioremap
+#define dmi_iounmap early_iounmap
+
+#endif
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index 43862cd6a56..c98633af07d 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -8,6 +8,7 @@
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/processor.h>
+#include <asm/compat.h>
/* x86-64 relocation types */
#define R_X86_64_NONE 0 /* No reloc */
@@ -157,6 +158,9 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
+/* 1GB for 64bit, 8MB for 32bit */
+#define STACK_RND_MASK (is_compat_task() ? 0x7ff : 0x3fffff)
+
#endif
#endif
diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h
index af7ded63b51..52825ce689f 100644
--- a/include/asm-x86_64/floppy.h
+++ b/include/asm-x86_64/floppy.h
@@ -155,7 +155,7 @@ static int fd_request_irq(void)
static unsigned long dma_mem_alloc(unsigned long size)
{
- return __get_dma_pages(GFP_KERNEL,get_order(size));
+ return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size));
}
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 8602c09bf89..9804bf07b09 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
return ret;
}
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ __asm__ __volatile__(
+ "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n"
+
+ "2: .section .fixup, \"ax\" \n"
+ "3: mov %2, %0 \n"
+ " jmp 2b \n"
+ " .previous \n"
+
+ " .section __ex_table, \"a\" \n"
+ " .align 8 \n"
+ " .quad 1b,3b \n"
+ " .previous \n"
+
+ : "=a" (oldval), "=m" (*uaddr)
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
+ : "memory"
+ );
+
+ return oldval;
+}
+
#endif
#endif
diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h
index 9dac18db829..cafdfb37f0d 100644
--- a/include/asm-x86_64/io.h
+++ b/include/asm-x86_64/io.h
@@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
return __ioremap(offset, size, 0);
}
+extern void *early_ioremap(unsigned long addr, unsigned long size);
+extern void early_iounmap(void *addr, unsigned long size);
+
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
@@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
-/* Use normal IO mappings for DMI */
-#define dmi_ioremap ioremap
-#define dmi_iounmap(x,l) iounmap(x)
-#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
-
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
@@ -202,23 +200,6 @@ static inline __u64 __readq(const volatile void __iomem *addr)
#define mmiowb()
-#ifdef CONFIG_UNORDERED_IO
-static inline void __writel(__u32 val, volatile void __iomem *addr)
-{
- volatile __u32 __iomem *target = addr;
- asm volatile("movnti %1,%0"
- : "=m" (*target)
- : "r" (val) : "memory");
-}
-
-static inline void __writeq(__u64 val, volatile void __iomem *addr)
-{
- volatile __u64 __iomem *target = addr;
- asm volatile("movnti %1,%0"
- : "=m" (*target)
- : "r" (val) : "memory");
-}
-#else
static inline void __writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)addr = b;
@@ -227,7 +208,6 @@ static inline void __writeq(__u64 b, volatile void __iomem *addr)
{
*(__force volatile __u64 *)addr = b;
}
-#endif
static inline void __writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)addr = b;
@@ -269,23 +249,11 @@ void memset_io(volatile void __iomem *a, int b, size_t c);
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
-#define isa_readb(a) readb(__ISA_IO_base + (a))
-#define isa_readw(a) readw(__ISA_IO_base + (a))
-#define isa_readl(a) readl(__ISA_IO_base + (a))
-#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
-#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
-#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
-#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
-#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
-#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
-
-
/*
* Again, x86-64 does not require mem IO specific function.
*/
#define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d))
-#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d))
/**
* check_signature - find BIOS signatures
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index b9ed4c0c878..cf795631d9b 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,21 +5,20 @@
struct pt_regs;
-struct die_args {
+struct die_args {
struct pt_regs *regs;
const char *str;
- long err;
+ long err;
int trapnr;
int signr;
-};
+};
+
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
+extern struct atomic_notifier_head die_chain;
-/* Note - you should never unregister because that can race with NMIs.
- If you really want to do it first unregister - then synchronize_sched - then free.
- */
-int register_die_notifier(struct notifier_block *nb);
-extern struct notifier_block *die_chain;
/* Grossly misnamed. */
-enum die_val {
+enum die_val {
DIE_OOPS = 1,
DIE_INT3,
DIE_DEBUG,
@@ -33,8 +32,8 @@ enum die_val {
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
-};
-
+};
+
static inline int notify_die(enum die_val val, const char *str,
struct pt_regs *regs, long err, int trap, int sig)
{
@@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str,
.trapnr = trap,
.signr = sig
};
- return notifier_call_chain(&die_chain, val, &args);
+ return atomic_notifier_call_chain(&die_chain, val, &args);
}
extern int printk_address(unsigned long address);
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index 3e72c41727c..cd17945bf21 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -5,7 +5,7 @@
typedef struct
{
- volatile unsigned int counter;
+ volatile long counter;
} local_t;
#define LOCAL_INIT(i) { (i) }
@@ -13,34 +13,34 @@ typedef struct
#define local_read(v) ((v)->counter)
#define local_set(v,i) (((v)->counter) = (i))
-static __inline__ void local_inc(local_t *v)
+static inline void local_inc(local_t *v)
{
__asm__ __volatile__(
- "incl %0"
+ "incq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
-static __inline__ void local_dec(local_t *v)
+static inline void local_dec(local_t *v)
{
__asm__ __volatile__(
- "decl %0"
+ "decq %0"
:"=m" (v->counter)
:"m" (v->counter));
}
-static __inline__ void local_add(unsigned int i, local_t *v)
+static inline void local_add(long i, local_t *v)
{
__asm__ __volatile__(
- "addl %1,%0"
+ "addq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
-static __inline__ void local_sub(unsigned int i, local_t *v)
+static inline void local_sub(long i, local_t *v)
{
__asm__ __volatile__(
- "subl %1,%0"
+ "subq %1,%0"
:"=m" (v->counter)
:"ir" (i), "m" (v->counter));
}
diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h
index 16e4be4de0c..19f0c83d079 100644
--- a/include/asm-x86_64/mmu_context.h
+++ b/include/asm-x86_64/mmu_context.h
@@ -34,12 +34,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
- clear_bit(cpu, &prev->cpu_vm_mask);
+ cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
- set_bit(cpu, &next->cpu_vm_mask);
+ cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
out_of_line_bug();
- if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index 972c9359f7d..6b18cd8f293 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -15,8 +15,13 @@
#define NODEMAPSIZE 0xfff
/* Simple perfect hash to map physical addresses to node numbers */
-extern int memnode_shift;
-extern u8 memnodemap[NODEMAPSIZE];
+struct memnode {
+ int shift;
+ u8 map[NODEMAPSIZE];
+} ____cacheline_aligned;
+extern struct memnode memnode;
+#define memnode_shift memnode.shift
+#define memnodemap memnode.map
extern struct pglist_data *node_data[];
@@ -39,12 +44,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
-extern struct page *pfn_to_page(unsigned long pfn);
-extern unsigned long page_to_pfn(struct page *page);
extern int pfn_valid(unsigned long pfn);
#endif
-#define local_mapnr(kvaddr) \
- ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) )
#endif
#endif
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index dffe276ca2d..f6cbb4cbb5a 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -4,11 +4,11 @@
#include <linux/nodemask.h>
#include <asm/numnodes.h>
-struct node {
+struct bootnode {
u64 start,end;
};
-extern int compute_hash_shift(struct node *nodes, int numnodes);
+extern int compute_hash_shift(struct bootnode *nodes, int numnodes);
extern int pxm_to_node(int nid);
#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 615e3e49492..408185bac35 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __boot_va(x) __va(x)
#define __boot_pa(x) __pa(x)
#ifdef CONFIG_FLATMEM
-#define pfn_to_page(pfn) (mem_map + (pfn))
-#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < end_pfn)
#endif
@@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#endif /* __KERNEL__ */
+#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* _X86_64_PAGE_H */
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index c7ab38a601a..b47c3df9ed1 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -22,8 +22,8 @@ struct x8664_pda {
int nodenumber; /* number of current node */
unsigned int __softirq_pending;
unsigned int __nmi_count; /* number of NMI on this CPUs */
- struct mm_struct *active_mm;
int mmu_state;
+ struct mm_struct *active_mm;
unsigned apic_timer_irqs;
} ____cacheline_aligned_in_smp;
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 29a6b0408f7..4405b4adeab 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -26,10 +26,9 @@
#define percpu_modcopy(pcpudst, src, size) \
do { \
unsigned int __i; \
- for (__i = 0; __i < NR_CPUS; __i++) \
- if (cpu_possible(__i)) \
- memcpy((pcpudst)+__per_cpu_offset(__i), \
- (src), (size)); \
+ for_each_cpu(__i) \
+ memcpy((pcpudst)+__per_cpu_offset(__i), \
+ (src), (size)); \
} while (0)
extern void setup_per_cpu_areas(void);
diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h
index 08cad2482bc..43d4c333a8b 100644
--- a/include/asm-x86_64/pgalloc.h
+++ b/include/asm-x86_64/pgalloc.h
@@ -45,12 +45,39 @@ static inline void pud_free (pud_t *pud)
free_page((unsigned long)pud);
}
+static inline void pgd_list_add(pgd_t *pgd)
+{
+ struct page *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+ page->index = (pgoff_t)pgd_list;
+ if (pgd_list)
+ pgd_list->private = (unsigned long)&page->index;
+ pgd_list = page;
+ page->private = (unsigned long)&pgd_list;
+ spin_unlock(&pgd_lock);
+}
+
+static inline void pgd_list_del(pgd_t *pgd)
+{
+ struct page *next, **pprev, *page = virt_to_page(pgd);
+
+ spin_lock(&pgd_lock);
+ next = (struct page *)page->index;
+ pprev = (struct page **)page->private;
+ *pprev = next;
+ if (next)
+ next->private = (unsigned long)pprev;
+ spin_unlock(&pgd_lock);
+}
+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned boundary;
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!pgd)
return NULL;
+ pgd_list_add(pgd);
/*
* Copy kernel pointers in from init.
* Could keep a freelist or slab cache of those because the kernel
@@ -67,6 +94,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(pgd_t *pgd)
{
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ pgd_list_del(pgd);
free_page((unsigned long)pgd);
}
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 715fd94cf57..31e83c3bd02 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; }
+static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; }
static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; }
@@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; }
struct vm_area_struct;
@@ -293,19 +293,19 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
{
if (!pte_dirty(*ptep))
return 0;
- return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
+ return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (!pte_young(*ptep))
return 0;
- return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
+ return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- clear_bit(_PAGE_BIT_RW, ptep);
+ clear_bit(_PAGE_BIT_RW, &ptep->pte);
}
/*
@@ -420,6 +420,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+extern spinlock_t pgd_lock;
+extern struct page *pgd_list;
+void vmalloc_sync_all(void);
+
#endif /* !__ASSEMBLY__ */
extern int kern_addr_valid(unsigned long addr);
diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h
index c43cbba3191..c0475a9d8bb 100644
--- a/include/asm-x86_64/poll.h
+++ b/include/asm-x86_64/poll.h
@@ -16,6 +16,7 @@
#define POLLWRBAND 0x0200
#define POLLMSG 0x0400
#define POLLREMOVE 0x1000
+#define POLLRDHUP 0x2000
struct pollfd {
int fd;
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 8c8d88c036e..37a3ec433ee 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -20,6 +20,7 @@
#include <asm/mmsegment.h>
#include <asm/percpu.h>
#include <linux/personality.h>
+#include <linux/cpumask.h>
#define TF_MASK 0x00000100
#define IF_MASK 0x00000200
@@ -65,6 +66,9 @@ struct cpuinfo_x86 {
__u32 x86_power;
__u32 extended_cpuid_level; /* Max extended CPUID function supported */
unsigned long loops_per_jiffy;
+#ifdef CONFIG_SMP
+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
+#endif
__u8 apicid;
__u8 booted_cores; /* number of cores as seen by OS */
} ____cacheline_aligned;
@@ -354,9 +358,6 @@ struct extended_sigtable {
struct extended_signature sigs[0];
};
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
-
#define ASM_NOP1 K8_NOP1
#define ASM_NOP2 K8_NOP2
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index 3ba8fd45fcb..8abf2a43c94 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -53,8 +53,6 @@ extern int sysctl_vsyscall;
extern int nohpet;
extern unsigned long vxtime_hz;
-extern void do_softirq_thunk(void);
-
extern int numa_setup(char *opt);
extern int setup_early_printk(char *);
@@ -129,7 +127,6 @@ extern int fix_aperture;
#define iommu_aperture 0
#define iommu_aperture_allowed 0
#endif
-extern int force_iommu;
extern int reboot_force;
extern int notsc_setup(char *);
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 9ccbb2cfd5c..a4fdaeb5c39 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];
extern u8 phys_proc_id[NR_CPUS];
extern u8 cpu_core_id[NR_CPUS];
+extern u8 cpu_llc_id[NR_CPUS];
#define SMP_TRAMPOLINE_BASE 0x6000
diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h
index a3493ee282b..ee6bf275349 100644
--- a/include/asm-x86_64/string.h
+++ b/include/asm-x86_64/string.h
@@ -40,26 +40,15 @@ extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET
-#define memset __builtin_memset
+void *memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);
-/* Use C out of line version for memcmp */
-#define memcmp __builtin_memcmp
int memcmp(const void * cs,const void * ct,size_t count);
-
-/* out of line string functions use always C versions */
-#define strlen __builtin_strlen
size_t strlen(const char * s);
-
-#define strcpy __builtin_strcpy
-char * strcpy(char * dest,const char *src);
-
-#define strcat __builtin_strcat
-char * strcat(char * dest, const char * src);
-
-#define strcmp __builtin_strcmp
+char *strcpy(char * dest,const char *src);
+char *strcat(char * dest, const char * src);
int strcmp(const char * cs,const char * ct);
#endif /* __KERNEL__ */
diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h
index bb9f40597d0..bc7f81715e5 100644
--- a/include/asm-x86_64/suspend.h
+++ b/include/asm-x86_64/suspend.h
@@ -39,9 +39,7 @@ extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, sa
extern unsigned long saved_context_eflags;
#define loaddebug(thread,register) \
- __asm__("movq %0,%%db" #register \
- : /* no output */ \
- :"r" ((thread)->debugreg##register))
+ set_debugreg((thread)->debugreg##register, register)
extern void fix_processor_context(void);
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index b7f66034ae7..39759898022 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -70,12 +70,6 @@ extern void load_gs_index(unsigned);
".previous" \
: :"r" (value), "r" (0))
-#define set_debug(value,register) \
- __asm__("movq %0,%%db" #register \
- : /* no output */ \
- :"r" ((unsigned long) value))
-
-
#ifdef __KERNEL__
struct alt_instr {
__u8 *instr; /* original instruction */
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index c642f5d9882..9db54e9d17b 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -68,4 +68,6 @@ extern int __node_distance(int, int);
#include <asm-generic/topology.h>
+extern cpumask_t cpu_coregroup_map(int cpu);
+
#endif
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index da0341c5794..f21ff2c1e96 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -605,8 +605,14 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */
__SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */
#define __NR_unshare 272
__SYSCALL(__NR_unshare, sys_unshare)
-
-#define __NR_syscall_max __NR_unshare
+#define __NR_set_robust_list 273
+__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
+#define __NR_get_robust_list 274
+__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
+#define __NR_splice 275
+__SYSCALL(__NR_splice, sys_splice)
+
+#define __NR_syscall_max __NR_splice
#ifndef __NO_STUBS