summaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/elf.h1
-rw-r--r--arch/arm/include/asm/futex.h137
-rw-r--r--arch/arm/include/asm/hardware/timer-sp.h4
-rw-r--r--arch/arm/include/asm/memory.h10
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/sizes.h42
-rw-r--r--arch/arm/include/asm/smp.h12
-rw-r--r--arch/arm/include/asm/spinlock.h2
-rw-r--r--arch/arm/include/asm/tlb.h53
10 files changed, 148 insertions, 123 deletions
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index ca51143f97f..42005542932 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -6,8 +6,10 @@
/*
* This is the maximum virtual address which can be DMA'd from.
*/
-#ifndef MAX_DMA_ADDRESS
+#ifndef ARM_DMA_ZONE_SIZE
#define MAX_DMA_ADDRESS 0xffffffff
+#else
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE)
#endif
#ifdef CONFIG_ISA_DMA_API
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index c3cd8755e64..0e9ce8d9686 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -108,6 +108,7 @@ struct task_struct;
int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 199a6b6de7f..8c73900da9e 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,16 +3,74 @@
#ifdef __KERNEL__
+#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+/* ARM doesn't provide unprivileged exclusive memory accessors */
+#include <asm-generic/futex.h>
+#else
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_ex_table(err_reg) \
+ "3:\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ "4: mov %0, " err_reg "\n" \
+ " b 3b\n" \
+ " .popsection"
+
#ifdef CONFIG_SMP
-#include <asm-generic/futex.h>
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ smp_mb(); \
+ __asm__ __volatile__( \
+ "1: ldrex %1, [%2]\n" \
+ " " insn "\n" \
+ "2: strex %1, %0, [%2]\n" \
+ " teq %1, #0\n" \
+ " bne 1b\n" \
+ " mov %0, #0\n" \
+ __futex_atomic_ex_table("%4") \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ smp_mb();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldrex %1, [%4]\n"
+ " teq %1, %2\n"
+ " ite eq @ explicit IT needed for the 2b label\n"
+ "2: strexeq %0, %3, [%4]\n"
+ " movne %0, #0\n"
+ " teq %0, #0\n"
+ " bne 1b\n"
+ __futex_atomic_ex_table("%5")
+ : "=&r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+ smp_mb();
+
+ *uval = val;
+ return ret;
+}
#else /* !SMP, we can work around lack of atomic ops by disabling preemption */
-#include <linux/futex.h>
#include <linux/preempt.h>
-#include <linux/uaccess.h>
-#include <asm/errno.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
@@ -21,20 +79,38 @@
" " insn "\n" \
"2: " T(str) " %0, [%2]\n" \
" mov %0, #0\n" \
- "3:\n" \
- " .pushsection __ex_table,\"a\"\n" \
- " .align 3\n" \
- " .long 1b, 4f, 2b, 4f\n" \
- " .popsection\n" \
- " .pushsection .fixup,\"ax\"\n" \
- "4: mov %0, %4\n" \
- " b 3b\n" \
- " .popsection" \
+ __futex_atomic_ex_table("%4") \
: "=&r" (ret), "=&r" (oldval) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " T(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+ " it eq @ explicit IT needed for the 2b label\n"
+ "2: " T(streq) " %3, [%4]\n"
+ __futex_atomic_ex_table("%5")
+ : "+r" (ret), "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* !SMP */
+
+static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
@@ -87,39 +163,6 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
return ret;
}
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret = 0;
- u32 val;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " T(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
- " it eq @ explicit IT needed for the 2b label\n"
- "2: " T(streq) " %3, [%4]\n"
- "3:\n"
- " .pushsection __ex_table,\"a\"\n"
- " .align 3\n"
- " .long 1b, 4f, 2b, 4f\n"
- " .popsection\n"
- " .pushsection .fixup,\"ax\"\n"
- "4: mov %0, %5\n"
- " b 3b\n"
- " .popsection"
- : "+r" (ret), "=&r" (val)
- : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
- : "cc", "memory");
-
- *uval = val;
- return ret;
-}
-
-#endif /* !SMP */
-
+#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/hardware/timer-sp.h b/arch/arm/include/asm/hardware/timer-sp.h
index 21e75e30d49..4384d81eee7 100644
--- a/arch/arm/include/asm/hardware/timer-sp.h
+++ b/arch/arm/include/asm/hardware/timer-sp.h
@@ -1,2 +1,2 @@
-void sp804_clocksource_init(void __iomem *);
-void sp804_clockevents_init(void __iomem *, unsigned int);
+void sp804_clocksource_init(void __iomem *, const char *);
+void sp804_clockevents_init(void __iomem *, unsigned int, const char *);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 431077c5a86..af44a8fb348 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -209,14 +209,10 @@ static inline unsigned long __phys_to_virt(unsigned long x)
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
-#ifndef ISA_DMA_THRESHOLD
+#ifndef ARM_DMA_ZONE_SIZE
#define ISA_DMA_THRESHOLD (0xffffffffULL)
-#endif
-
-#ifndef arch_adjust_zones
-#define arch_adjust_zones(size,holes) do { } while (0)
-#elif !defined(CONFIG_ZONE_DMA)
-#error "custom arch_adjust_zones() requires CONFIG_ZONE_DMA"
+#else
+#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
#endif
/*
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index a8ff22b2a39..312d10877bd 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -128,6 +128,12 @@ struct pt_regs {
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
+/*
+ * The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS
+ * and core dumps.
+ */
+#define ARM_VFPREGS_SIZE ( 32 * 8 /*fpregs*/ + 4 /*fpscr*/ )
+
#ifdef __KERNEL__
#define user_mode(regs) \
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h
index 316bb2b2be3..154b89b81d3 100644
--- a/arch/arm/include/asm/sizes.h
+++ b/arch/arm/include/asm/sizes.h
@@ -16,44 +16,6 @@
/* Size definitions
* Copyright (C) ARM Limited 1998. All rights reserved.
*/
+#include <asm-generic/sizes.h>
-#ifndef __sizes_h
-#define __sizes_h 1
-
-/* handy sizes */
-#define SZ_16 0x00000010
-#define SZ_32 0x00000020
-#define SZ_64 0x00000040
-#define SZ_128 0x00000080
-#define SZ_256 0x00000100
-#define SZ_512 0x00000200
-
-#define SZ_1K 0x00000400
-#define SZ_2K 0x00000800
-#define SZ_4K 0x00001000
-#define SZ_8K 0x00002000
-#define SZ_16K 0x00004000
-#define SZ_32K 0x00008000
-#define SZ_64K 0x00010000
-#define SZ_128K 0x00020000
-#define SZ_256K 0x00040000
-#define SZ_512K 0x00080000
-
-#define SZ_1M 0x00100000
-#define SZ_2M 0x00200000
-#define SZ_4M 0x00400000
-#define SZ_8M 0x00800000
-#define SZ_16M 0x01000000
-#define SZ_32M 0x02000000
-#define SZ_48M 0x03000000
-#define SZ_64M 0x04000000
-#define SZ_128M 0x08000000
-#define SZ_256M 0x10000000
-#define SZ_512M 0x20000000
-
-#define SZ_1G 0x40000000
-#define SZ_2G 0x80000000
-
-#endif
-
-/* END */
+#define SZ_48M (SZ_32M + SZ_16M)
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 96ed521f240..d2b514fd76f 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -14,20 +14,12 @@
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#include <mach/smp.h>
-
#ifndef CONFIG_SMP
# error "<asm/smp.h> included in non-SMP build"
#endif
#define raw_smp_processor_id() (current_thread_info()->cpu)
-/*
- * at the moment, there's not a big penalty for changing CPUs
- * (the >big< penalty is running SMP in the first place)
- */
-#define PROC_CHANGE_PENALTY 15
-
struct seq_file;
/*
@@ -47,9 +39,9 @@ extern void smp_init_cpus(void);
/*
- * Raise an IPI cross call on CPUs in callmap.
+ * Provide a function to raise an IPI cross call on CPUs in callmap.
*/
-extern void smp_cross_call(const struct cpumask *mask, int ipi);
+extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
/*
* Boot a secondary CPU, and assign it the specified idle task.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index fdd3820edff..65fa3c88095 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,6 +5,8 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
+#include <asm/processor.h>
+
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 82dfe5d0c41..265f908c4a6 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -41,12 +41,12 @@
*/
#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
#define tlb_fast_mode(tlb) 0
-#define FREE_PTE_NR 500
#else
#define tlb_fast_mode(tlb) 1
-#define FREE_PTE_NR 0
#endif
+#define MMU_GATHER_BUNDLE 8
+
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
@@ -58,7 +58,9 @@ struct mmu_gather {
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
- struct page *pages[FREE_PTE_NR];
+ unsigned int max;
+ struct page **pages;
+ struct page *local[MMU_GATHER_BUNDLE];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -97,26 +99,37 @@ static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
}
}
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
+{
+ unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+
+ if (addr) {
+ tlb->pages = (void *)addr;
+ tlb->max = PAGE_SIZE / sizeof(struct page *);
+ }
+}
+
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush(tlb);
if (!tlb_fast_mode(tlb)) {
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
+ if (tlb->pages == tlb->local)
+ __tlb_alloc_page(tlb);
}
}
-static inline struct mmu_gather *
-tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
{
- struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
-
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = fullmm;
tlb->vma = NULL;
+ tlb->max = ARRAY_SIZE(tlb->local);
+ tlb->pages = tlb->local;
tlb->nr = 0;
-
- return tlb;
+ __tlb_alloc_page(tlb);
}
static inline void
@@ -127,7 +140,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
/* keep the page table cache within bounds */
check_pgt_cache();
- put_cpu_var(mmu_gathers);
+ if (tlb->pages != tlb->local)
+ free_pages((unsigned long)tlb->pages, 0);
}
/*
@@ -162,15 +176,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
tlb_flush(tlb);
}
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
- } else {
- tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
- tlb_flush_mmu(tlb);
+ return 1; /* avoid calling tlb_flush_mmu */
}
+
+ tlb->pages[tlb->nr++] = page;
+ VM_BUG_ON(tlb->nr > tlb->max);
+ return tlb->max - tlb->nr;
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (!__tlb_remove_page(tlb, page))
+ tlb_flush_mmu(tlb);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,