diff options
Diffstat (limited to 'arch/arm/include')
79 files changed, 902 insertions, 704 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 6550db3aa5c..960abceb8e1 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -1,3 +1,20 @@ include include/asm-generic/Kbuild.asm header-y += hwcap.h + +generic-y += auxvec.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += ioctl.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += local.h +generic-y += local64.h +generic-y += percpu.h +generic-y += poll.h +generic-y += resource.h +generic-y += sections.h +generic-y += siginfo.h +generic-y += sizes.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5..29035e86a59 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -293,4 +293,13 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm + +/* Utility macro for declaring string literals */ + .macro string name:req, string + .type \name , #object +\name: + .asciz "\string" + .size \name , . - \name + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 7e79503ab89..86976d03438 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -208,16 +208,15 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic_add_unless(atomic_t *v, int a, int u) +static inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) c = old; - return c != u; + return c; } -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc(v) atomic_add(1, v) #define atomic_dec(v) atomic_sub(1, v) @@ -460,9 +459,6 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -#else /* !CONFIG_GENERIC_ATOMIC64 */ -#include <asm-generic/atomic64.h> -#endif -#include <asm-generic/atomic-long.h> +#endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif #endif diff --git a/arch/arm/include/asm/auxvec.h b/arch/arm/include/asm/auxvec.h deleted file mode 100644 index c0536f6b29a..00000000000 --- a/arch/arm/include/asm/auxvec.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __ASMARM_AUXVEC_H -#define __ASMARM_AUXVEC_H - -#endif diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442..f7419ef9c8f 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h @@ -26,8 +26,8 @@ #include <linux/compiler.h> #include <asm/system.h> -#define smp_mb__before_clear_bit() mb() -#define smp_mb__after_clear_bit() mb() +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() /* * These functions are the basis of our bit ops. @@ -310,10 +310,7 @@ static inline int find_next_bit_le(const void *p, int size, int offset) /* * Ext2 is defined to use little-endian byte ordering. */ -#define ext2_set_bit_atomic(lock, nr, p) \ - test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) \ - test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __KERNEL__ */ diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b..00000000000 --- a/arch/arm/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/bitsperlong.h> diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 4d88425a416..9abe7a07d5a 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -3,21 +3,58 @@ #ifdef CONFIG_BUG -#ifdef CONFIG_DEBUG_BUGVERBOSE -extern void __bug(const char *file, int line) __attribute__((noreturn)); - -/* give file/line information */ -#define BUG() __bug(__FILE__, __LINE__) +/* + * Use a suitable undefined instruction to use for ARM/Thumb2 bug handling. + * We need to be careful not to conflict with those used by other modules and + * the register_undef_hook() system. + */ +#ifdef CONFIG_THUMB2_KERNEL +#define BUG_INSTR_VALUE 0xde02 +#define BUG_INSTR_TYPE ".hword " #else +#define BUG_INSTR_VALUE 0xe7f001f2 +#define BUG_INSTR_TYPE ".word " +#endif -/* this just causes an oops */ -#define BUG() do { *(int *)0 = 0; } while (1) -#endif +#define BUG() _BUG(__FILE__, __LINE__, BUG_INSTR_VALUE) +#define _BUG(file, line, value) __BUG(file, line, value) + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +/* + * The extra indirection is to ensure that the __FILE__ string comes through + * OK. Many version of gcc do not support the asm %c parameter which would be + * preferable to this unpleasantness. We use mergeable string sections to + * avoid multiple copies of the string appearing in the kernel image. + */ + +#define __BUG(__file, __line, __value) \ +do { \ + BUILD_BUG_ON(sizeof(struct bug_entry) != 12); \ + asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \ + ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ + "2:\t.asciz " #__file "\n" \ + ".popsection\n" \ + ".pushsection __bug_table,\"a\"\n" \ + "3:\t.word 1b, 2b\n" \ + "\t.hword " #__line ", 0\n" \ + ".popsection"); \ + unreachable(); \ +} while (0) + +#else /* not CONFIG_DEBUG_BUGVERBOSE */ + +#define __BUG(__file, __line, __value) \ +do { \ + asm volatile(BUG_INSTR_TYPE #__value); \ + unreachable(); \ +} while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ #define HAVE_ARCH_BUG -#endif +#endif /* CONFIG_BUG */ #include <asm-generic/bug.h> diff --git a/arch/arm/include/asm/cachetype.h b/arch/arm/include/asm/cachetype.h index c023db09fcc..7ea78144ae2 100644 --- a/arch/arm/include/asm/cachetype.h +++ b/arch/arm/include/asm/cachetype.h @@ -7,6 +7,7 @@ #define CACHEID_VIPT (CACHEID_VIPT_ALIASING|CACHEID_VIPT_NONALIASING) #define CACHEID_ASID_TAGGED (1 << 3) #define CACHEID_VIPT_I_ALIASING (1 << 4) +#define CACHEID_PIPT (1 << 5) extern unsigned int cacheid; @@ -16,6 +17,7 @@ extern unsigned int cacheid; #define cache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_ALIASING) #define icache_is_vivt_asid_tagged() cacheid_is(CACHEID_ASID_TAGGED) #define icache_is_vipt_aliasing() cacheid_is(CACHEID_VIPT_I_ALIASING) +#define icache_is_pipt() cacheid_is(CACHEID_PIPT) /* * __LINUX_ARM_ARCH__ is the minimum supported CPU architecture @@ -26,7 +28,8 @@ extern unsigned int cacheid; #if __LINUX_ARM_ARCH__ >= 7 #define __CACHEID_ARCH_MIN (CACHEID_VIPT_NONALIASING |\ CACHEID_ASID_TAGGED |\ - CACHEID_VIPT_I_ALIASING) + CACHEID_VIPT_I_ALIASING |\ + CACHEID_PIPT) #elif __LINUX_ARM_ARCH__ >= 6 #define __CACHEID_ARCH_MIN (~CACHEID_VIVT) #else diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index 765d3322236..80751c15c30 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -14,7 +14,12 @@ #include <linux/slab.h> +#ifdef CONFIG_HAVE_MACH_CLKDEV #include <mach/clkdev.h> +#else +#define __clk_get(clk) ({ 1; }) +#define __clk_put(clk) do { } while (0) +#endif static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size) { diff --git a/arch/arm/include/asm/cputime.h b/arch/arm/include/asm/cputime.h deleted file mode 100644 index 3a8002a5fec..00000000000 --- a/arch/arm/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARM_CPUTIME_H -#define __ARM_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __ARM_CPUTIME_H */ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cd4458f6417..cb47d28cbe1 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -8,6 +8,7 @@ #define CPUID_CACHETYPE 1 #define CPUID_TCM 2 #define CPUID_TLBTYPE 3 +#define CPUID_MPIDR 5 #define CPUID_EXT_PFR0 "c1, 0" #define CPUID_EXT_PFR1 "c1, 1" @@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void) return read_cpuid(CPUID_TCM); } +static inline unsigned int __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(CPUID_MPIDR); +} + /* * Intel's XScale3 core supports some v6 features (supersections, L2) * but advertises itself as v5 as it does not support the v6 ISA. For diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 9f390ce335c..7aa368003b0 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -10,9 +10,17 @@ struct dev_archdata { #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif +#ifdef CONFIG_IOMMU_API + void *iommu; /* private IOMMU data */ +#endif }; +struct omap_device; + struct pdev_archdata { +#ifdef CONFIG_ARCH_OMAP + struct omap_device *od; +#endif }; #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363e..cb3b7c981c4 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -32,7 +32,7 @@ static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { - return (void *)__bus_to_virt(addr); + return (void *)__bus_to_virt((unsigned long)addr); } static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) @@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, ___dma_page_dev_to_cpu(page, off, size, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - * - * FIXME: This should really be a platform specific issue - we should - * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. - */ -static inline int dma_supported(struct device *dev, u64 mask) -{ - if (mask < ISA_DMA_THRESHOLD) - return 0; - return 1; -} - -static inline int dma_set_mask(struct device *dev, u64 dma_mask) -{ -#ifdef CONFIG_DMABOUNCE - if (dev->archdata.dmabounce) { - if (dma_mask >= ISA_DMA_THRESHOLD) - return 0; - else - return -EIO; - } -#endif - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - - return 0; -} +extern int dma_supported(struct device *, u64); +extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. @@ -236,6 +205,13 @@ extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, int dma_mmap_writecombine(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t); +/* + * This can be called during boot to increase the size of the consistent + * DMA region above it's default value of 2MB. It must be called before the + * memory allocator is initialised, i.e. before any core_initcall. + */ +extern void __init init_consistent_dma_size(unsigned long size); + #ifdef CONFIG_DMABOUNCE /* @@ -256,14 +232,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, * @dev: valid struct device pointer * @small_buf_size: size of buffers to use with small buffer pool * @large_buf_size: size of buffers to use with large buffer pool (can be 0) + * @needs_bounce_fn: called to determine whether buffer needs bouncing * * This function should be called by low-level platform code to register * a device as requireing DMA buffer bouncing. The function will allocate * appropriate DMA pools for the device. - * */ extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long); + unsigned long, int (*)(struct device *, dma_addr_t, size_t)); /** * dmabounce_unregister_dev @@ -277,31 +253,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/** - * dma_needs_bounce - * - * @dev: valid struct device pointer - * @dma_handle: dma_handle of unbounced buffer - * @size: size of region being mapped - * - * Platforms that utilize the dmabounce mechanism must implement - * this function. - * - * The dmabounce routines call this function whenever a dma-mapping - * is requested to determine whether a given buffer needs to be bounced - * or not. The function must return 0 if the buffer is OK for - * DMA access and 1 if the buffer needs to be bounced. - * - */ -extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); - /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ -extern dma_addr_t __dma_map_single(struct device *, void *, size_t, - enum dma_data_direction); -extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, - enum dma_data_direction); extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, @@ -328,13 +282,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } -static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - __dma_single_cpu_to_dev(cpu_addr, size, dir); - return virt_to_dma(dev, cpu_addr); -} - static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { @@ -342,12 +289,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, return pfn_to_dma(dev, page_to_pfn(page)) + offset; } -static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { @@ -373,14 +314,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { + unsigned long offset; + struct page *page; dma_addr_t addr; + BUG_ON(!virt_addr_valid(cpu_addr)); + BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); BUG_ON(!valid_dma_direction(dir)); - addr = __dma_map_single(dev, cpu_addr, size, dir); - debug_dma_map_page(dev, virt_to_page(cpu_addr), - (unsigned long)cpu_addr & ~PAGE_MASK, size, - dir, addr, true); + page = virt_to_page(cpu_addr); + offset = (unsigned long)cpu_addr & ~PAGE_MASK; + addr = __dma_map_page(dev, page, offset, size, dir); + debug_dma_map_page(dev, page, offset, size, dir, addr, true); return addr; } @@ -430,7 +375,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_single(dev, handle, size, dir); + __dma_unmap_page(dev, handle, size, dir); } /** diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 42005542932..69a5b0b6455 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -1,15 +1,16 @@ #ifndef __ASM_ARM_DMA_H #define __ASM_ARM_DMA_H -#include <asm/memory.h> - /* * This is the maximum virtual address which can be DMA'd from. */ -#ifndef ARM_DMA_ZONE_SIZE -#define MAX_DMA_ADDRESS 0xffffffff +#ifndef CONFIG_ZONE_DMA +#define MAX_DMA_ADDRESS 0xffffffffUL #else -#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) +#define MAX_DMA_ADDRESS ({ \ + extern unsigned long arm_dma_zone_size; \ + arm_dma_zone_size ? \ + (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif #ifdef CONFIG_ISA_DMA_API @@ -33,18 +34,18 @@ #define DMA_MODE_CASCADE 0xc0 #define DMA_AUTOINIT 0x10 -extern spinlock_t dma_spin_lock; +extern raw_spinlock_t dma_spin_lock; static inline unsigned long claim_dma_lock(void) { unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); + raw_spin_lock_irqsave(&dma_spin_lock, flags); return flags; } static inline void release_dma_lock(unsigned long flags) { - spin_unlock_irqrestore(&dma_spin_lock, flags); + raw_spin_unlock_irqrestore(&dma_spin_lock, flags); } /* Clear the 'DMA Pointer Flip Flop'. diff --git a/arch/arm/include/asm/ecard.h b/arch/arm/include/asm/ecard.h index 29f2610efc7..eaea14676d5 100644 --- a/arch/arm/include/asm/ecard.h +++ b/arch/arm/include/asm/ecard.h @@ -161,7 +161,6 @@ struct expansion_card { /* Private internal data */ const char *card_desc; /* Card description */ - CONST unsigned int podaddr; /* Base Linux address for card */ CONST loader_t loader; /* loader program */ u64 dma_mask; }; diff --git a/arch/arm/include/asm/emergency-restart.h b/arch/arm/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42..00000000000 --- a/arch/arm/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include <asm-generic/emergency-restart.h> - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 2da8547de6d..88d61815f0c 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S @@ -4,8 +4,8 @@ * Interrupt handling. Preserves r7, r8, r9 */ .macro arch_irq_handler_default - get_irqnr_preamble r5, lr -1: get_irqnr_and_base r0, r6, r5, lr + get_irqnr_preamble r6, lr +1: get_irqnr_and_base r0, r2, r6, lr movne r1, sp @ @ routine called with r0 = irq number, r1 = struct pt_regs * @@ -17,21 +17,14 @@ /* * XXX * - * this macro assumes that irqstat (r6) and base (r5) are + * this macro assumes that irqstat (r2) and base (r6) are * preserved from get_irqnr_and_base above */ - ALT_SMP(test_for_ipi r0, r6, r5, lr) + ALT_SMP(test_for_ipi r0, r2, r6, lr) ALT_UP_B(9997f) movne r1, sp adrne lr, BSYM(1b) bne do_IPI - -#ifdef CONFIG_LOCAL_TIMERS - test_for_ltirq r0, r6, r5, lr - movne r0, sp - adrne lr, BSYM(1b) - bne do_local_timer -#endif #endif 9997: .endm @@ -40,7 +33,7 @@ .align 5 .global \symbol_name \symbol_name: - mov r4, lr + mov r8, lr arch_irq_handler_default - mov pc, r4 + mov pc, r8 .endm diff --git a/arch/arm/include/asm/errno.h b/arch/arm/include/asm/errno.h deleted file mode 100644 index 6e60f0612bb..00000000000 --- a/arch/arm/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ARM_ERRNO_H -#define _ARM_ERRNO_H - -#include <asm-generic/errno.h> - -#endif diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h new file mode 100644 index 00000000000..5abaf5bbd98 --- /dev/null +++ b/arch/arm/include/asm/exception.h @@ -0,0 +1,19 @@ +/* + * Annotations for marking C functions as exception handlers. + * + * These should only be used for C functions that are called from the low + * level exception entry code and not any intervening C code. + */ +#ifndef __ASM_ARM_EXCEPTION_H +#define __ASM_ARM_EXCEPTION_H + +#include <linux/ftrace.h> + +#define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else +#define __exception_irq_entry __exception +#endif + +#endif /* __ASM_ARM_EXCEPTION_H */ diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 8c73900da9e..253cc86318b 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -25,17 +25,17 @@ #ifdef CONFIG_SMP -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ smp_mb(); \ __asm__ __volatile__( \ - "1: ldrex %1, [%2]\n" \ + "1: ldrex %1, [%3]\n" \ " " insn "\n" \ - "2: strex %1, %0, [%2]\n" \ - " teq %1, #0\n" \ + "2: strex %2, %0, [%3]\n" \ + " teq %2, #0\n" \ " bne 1b\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%4") \ - : "=&r" (ret), "=&r" (oldval) \ + __futex_atomic_ex_table("%5") \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, #include <linux/preempt.h> #include <asm/domain.h> -#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ __asm__ __volatile__( \ - "1: " T(ldr) " %1, [%2]\n" \ + "1: " T(ldr) " %1, [%3]\n" \ " " insn "\n" \ - "2: " T(str) " %0, [%2]\n" \ + "2: " T(str) " %0, [%3]\n" \ " mov %0, #0\n" \ - __futex_atomic_ex_table("%4") \ - : "=&r" (ret), "=&r" (oldval) \ + __futex_atomic_ex_table("%5") \ + : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret; + int oldval = 0, ret, tmp; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; @@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) switch (op) { case FUTEX_OP_SET: - __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_ADD: - __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_OR: - __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; case FUTEX_OP_ANDN: - __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); + __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); break; case FUTEX_OP_XOR: - __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); + __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); break; default: ret = -ENOSYS; diff --git a/arch/arm/include/asm/gpio.h b/arch/arm/include/asm/gpio.h index 166a7a3e284..11ad0bfbb0a 100644 --- a/arch/arm/include/asm/gpio.h +++ b/arch/arm/include/asm/gpio.h @@ -4,4 +4,23 @@ /* not all ARM platforms necessarily support this API ... */ #include <mach/gpio.h> +#ifndef __ARM_GPIOLIB_COMPLEX +/* Note: this may rely upon the value of ARCH_NR_GPIOS set in mach/gpio.h */ +#include <asm-generic/gpio.h> + +/* The trivial gpiolib dispatchers */ +#define gpio_get_value __gpio_get_value +#define gpio_set_value __gpio_set_value +#define gpio_cansleep __gpio_cansleep +#endif + +/* + * Provide a default gpio_to_irq() which should satisfy every case. + * However, some platforms want to do this differently, so allow them + * to override it. + */ +#ifndef gpio_to_irq +#define gpio_to_irq __gpio_to_irq +#endif + #endif /* _ARCH_ARM_GPIO_H */ diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 89ad1805e57..ddf07a92a6c 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -9,9 +9,6 @@ typedef struct { unsigned int __softirq_pending; -#ifdef CONFIG_LOCAL_TIMERS - unsigned int local_timer_irqs; -#endif #ifdef CONFIG_SMP unsigned int ipi_irqs[NR_IPI]; #endif diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 16bd4803158..1db1143a948 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -45,8 +45,15 @@ #define L2X0_CLEAN_INV_LINE_PA 0x7F0 #define L2X0_CLEAN_INV_LINE_IDX 0x7F8 #define L2X0_CLEAN_INV_WAY 0x7FC -#define L2X0_LOCKDOWN_WAY_D 0x900 -#define L2X0_LOCKDOWN_WAY_I 0x904 +/* + * The lockdown registers repeat 8 times for L310, the L210 has only one + * D and one I lockdown register at 0x0900 and 0x0904. + */ +#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 +#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 +#define L2X0_LOCKDOWN_STRIDE 0x08 +#define L2X0_ADDR_FILTER_START 0xC00 +#define L2X0_ADDR_FILTER_END 0xC04 #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 @@ -60,11 +67,26 @@ #define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6) +#define L2X0_CACHE_ID_RTL_MASK 0x3f +#define L2X0_CACHE_ID_RTL_R0P0 0x0 +#define L2X0_CACHE_ID_RTL_R1P0 0x2 +#define L2X0_CACHE_ID_RTL_R2P0 0x4 +#define L2X0_CACHE_ID_RTL_R3P0 0x5 +#define L2X0_CACHE_ID_RTL_R3P1 0x6 +#define L2X0_CACHE_ID_RTL_R3P2 0x8 #define L2X0_AUX_CTRL_MASK 0xc0000fff +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) +#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) +#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) #define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 #define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) +#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) #define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 #define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 #define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 @@ -72,8 +94,40 @@ #define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 #define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 +#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 +#define L2X0_LATENCY_CTRL_RD_SHIFT 4 +#define L2X0_LATENCY_CTRL_WR_SHIFT 8 + +#define L2X0_ADDR_FILTER_EN 1 + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); +#if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF) +extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask); +#else +static inline int l2x0_of_init(__u32 aux_val, __u32 aux_mask) +{ + return -ENODEV; +} #endif +struct l2x0_regs { + unsigned long phy_base; + unsigned long aux_ctrl; + /* + * Whether the following registers need to be saved/restored + * depends on platform + */ + unsigned long tag_latency; + unsigned long data_latency; + unsigned long filter_start; + unsigned long filter_end; + unsigned long prefetch_ctrl; + unsigned long pwr_ctrl; +}; + +extern struct l2x0_regs l2x0_saved_regs; + +#endif /* __ASSEMBLY__ */ + #endif diff --git a/arch/arm/include/asm/hardware/entry-macro-gic.S b/arch/arm/include/asm/hardware/entry-macro-gic.S index c115b82fe80..74ebc803904 100644 --- a/arch/arm/include/asm/hardware/entry-macro-gic.S +++ b/arch/arm/include/asm/hardware/entry-macro-gic.S @@ -22,15 +22,11 @@ * interrupt controller spec. To wit: * * Interrupts 0-15 are IPI - * 16-28 are reserved - * 29-31 are local. We allow 30 to be used for the watchdog. + * 16-31 are local. We allow 30 to be used for the watchdog. * 32-1020 are global * 1021-1022 are reserved * 1023 is "spurious" (no interrupt) * - * For now, we ignore all local interrupts so only return an interrupt if it's - * between 30 and 1020. The test_for_ipi routine below will pick up on IPIs. - * * A simple read from the controller will tell us the number of the highest * priority enabled interrupt. We then just need to check whether it is in the * valid range for an IRQ (30-1020 inclusive). @@ -43,7 +39,7 @@ ldr \tmp, =1021 bic \irqnr, \irqstat, #0x1c00 - cmp \irqnr, #29 + cmp \irqnr, #15 cmpcc \irqnr, \irqnr cmpne \irqnr, \tmp cmpcs \irqnr, \irqnr @@ -62,14 +58,3 @@ strcc \irqstat, [\base, #GIC_CPU_EOI] cmpcs \irqnr, \irqnr .endm - -/* As above, this assumes that irqstat and base are preserved.. */ - - .macro test_for_ltirq, irqnr, irqstat, base, tmp - bic \irqnr, \irqstat, #0x1c00 - mov \tmp, #0 - cmp \irqnr, #29 - moveq \tmp, #1 - streq \irqstat, [\base, #GIC_CPU_EOI] - cmp \tmp, #0 - .endm diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h index 0691f9dcc50..3e91f22046f 100644 --- a/arch/arm/include/asm/hardware/gic.h +++ b/arch/arm/include/asm/hardware/gic.h @@ -33,14 +33,33 @@ #define GIC_DIST_SOFTINT 0xf00 #ifndef __ASSEMBLY__ +#include <linux/irqdomain.h> +struct device_node; + extern void __iomem *gic_cpu_base_addr; extern struct irq_chip gic_arch_extn; -void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); +void gic_init(unsigned int, int, void __iomem *, void __iomem *); +int gic_of_init(struct device_node *node, struct device_node *parent); void gic_secondary_init(unsigned int); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); -void gic_enable_ppi(unsigned int); + +struct gic_chip_data { + void __iomem *dist_base; + void __iomem *cpu_base; +#ifdef CONFIG_CPU_PM + u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; + u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; + u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; + u32 __percpu *saved_ppi_enable; + u32 __percpu *saved_ppi_conf; +#endif +#ifdef CONFIG_IRQ_DOMAIN + struct irq_domain domain; +#endif + unsigned int gic_irqs; +}; #endif #endif diff --git a/arch/arm/include/asm/hardware/iop3xx-gpio.h b/arch/arm/include/asm/hardware/iop3xx-gpio.h index b69d972b1f7..9eda7dc92ad 100644 --- a/arch/arm/include/asm/hardware/iop3xx-gpio.h +++ b/arch/arm/include/asm/hardware/iop3xx-gpio.h @@ -28,6 +28,8 @@ #include <mach/hardware.h> #include <asm-generic/gpio.h> +#define __ARM_GPIOLIB_COMPLEX + #define IOP3XX_N_GPIOS 8 static inline int gpio_get_value(unsigned gpio) diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index b2f95c72287..43cab498bc2 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -9,7 +9,7 @@ #ifndef __ASM_HARDWARE_IT8152_H #define __ASM_HARDWARE_IT8152_H -extern unsigned long it8152_base_address; +extern void __iomem *it8152_base_address; #define IT8152_IO_BASE (it8152_base_address + 0x03e00000) #define IT8152_CFGREG_BASE (it8152_base_address + 0x03f00000) @@ -105,7 +105,7 @@ struct pci_sys_data; extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc); extern void it8152_init_irq(void); -extern int it8152_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin); +extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys); diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h index e4a04e4e562..33c78d7af2e 100644 --- a/arch/arm/include/asm/hardware/pl080.h +++ b/arch/arm/include/asm/hardware/pl080.h @@ -21,6 +21,9 @@ * OneNAND features. */ +#ifndef ASM_PL080_H +#define ASM_PL080_H + #define PL080_INT_STATUS (0x00) #define PL080_TC_STATUS (0x04) #define PL080_TC_CLEAR (0x08) @@ -138,3 +141,4 @@ struct pl080s_lli { u32 control1; }; +#endif /* ASM_PL080_H */ diff --git a/arch/arm/include/asm/hardware/scoop.h b/arch/arm/include/asm/hardware/scoop.h index ebb3ceaa8fa..58cdf5d8412 100644 --- a/arch/arm/include/asm/hardware/scoop.h +++ b/arch/arm/include/asm/hardware/scoop.h @@ -61,7 +61,6 @@ struct scoop_pcmcia_dev { struct scoop_pcmcia_config { struct scoop_pcmcia_dev *devs; int num_devs; - void (*pcmcia_init)(void); void (*power_ctrl)(struct device *scoop, unsigned short cpr, int nr); }; diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index f389b2704d8..c190bc992f0 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -50,6 +50,7 @@ static inline void decode_ctrl_reg(u32 reg, #define ARM_DEBUG_ARCH_V6_1 2 #define ARM_DEBUG_ARCH_V7_ECP14 3 #define ARM_DEBUG_ARCH_V7_MM 4 +#define ARM_DEBUG_ARCH_V7_1 5 /* Breakpoint */ #define ARM_BREAKPOINT_EXECUTE 0 @@ -57,6 +58,7 @@ static inline void decode_ctrl_reg(u32 reg, /* Watchpoints */ #define ARM_BREAKPOINT_LOAD 1 #define ARM_BREAKPOINT_STORE 2 +#define ARM_FSR_ACCESS_MASK (1 << 11) /* Privilege Levels */ #define ARM_BREAKPOINT_PRIV 1 diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c1062c31710..c93a22a8b92 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -4,22 +4,26 @@ /* * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP */ -#define HWCAP_SWP 1 -#define HWCAP_HALF 2 -#define HWCAP_THUMB 4 -#define HWCAP_26BIT 8 /* Play it safe */ -#define HWCAP_FAST_MULT 16 -#define HWCAP_FPA 32 -#define HWCAP_VFP 64 -#define HWCAP_EDSP 128 -#define HWCAP_JAVA 256 -#define HWCAP_IWMMXT 512 -#define HWCAP_CRUNCH 1024 -#define HWCAP_THUMBEE 2048 -#define HWCAP_NEON 4096 -#define HWCAP_VFPv3 8192 -#define HWCAP_VFPv3D16 16384 -#define HWCAP_TLS 32768 +#define HWCAP_SWP (1 << 0) +#define HWCAP_HALF (1 << 1) +#define HWCAP_THUMB (1 << 2) +#define HWCAP_26BIT (1 << 3) /* Play it safe */ +#define HWCAP_FAST_MULT (1 << 4) +#define HWCAP_FPA (1 << 5) +#define HWCAP_VFP (1 << 6) +#define HWCAP_EDSP (1 << 7) +#define HWCAP_JAVA (1 << 8) +#define HWCAP_IWMMXT (1 << 9) +#define HWCAP_CRUNCH (1 << 10) +#define HWCAP_THUMBEE (1 << 11) +#define HWCAP_NEON (1 << 12) +#define HWCAP_VFPv3 (1 << 13) +#define HWCAP_VFPv3D16 (1 << 14) +#define HWCAP_TLS (1 << 15) +#define HWCAP_VFPv4 (1 << 16) +#define HWCAP_IDIVA (1 << 17) +#define HWCAP_IDIVT (1 << 18) +#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* diff --git a/arch/arm/include/asm/i8253.h b/arch/arm/include/asm/i8253.h deleted file mode 100644 index 70656b69d5c..00000000000 --- a/arch/arm/include/asm/i8253.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASMARM_I8253_H -#define __ASMARM_I8253_H - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 - -#define PIT_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) - -extern raw_spinlock_t i8253_lock; - -#define outb_pit outb_p -#define inb_pit inb_p - -#endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d66605dea55..065d100fa63 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -80,6 +80,7 @@ extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); extern void __iounmap(volatile void __iomem *addr); /* @@ -110,6 +111,27 @@ static inline void __iomem *__typesafe_io(unsigned long addr) #include <mach/io.h> /* + * This is the limit of PC card/PCI/ISA IO space, which is by default + * 64K if we have PC card, PCI or ISA support. Otherwise, default to + * zero to prevent ISA/PCI drivers claiming IO space (and potentially + * oopsing.) + * + * Only set this larger if you really need inb() et.al. to operate over + * a larger address space. Note that SOC_COMMON ioremaps each sockets + * IO space area, and so inb() et.al. must be defined to operate as per + * readb() et.al. on such platforms. + */ +#ifndef IO_SPACE_LIMIT +#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE) +#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff) +#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD) +#define IO_SPACE_LIMIT ((resource_size_t)0xffff) +#else +#define IO_SPACE_LIMIT ((resource_size_t)0) +#endif +#endif + +/* * IO port access primitives * ------------------------- * @@ -189,11 +211,11 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * IO port primitives for more information. */ #ifdef __mem_pci -#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ - __raw_readw(__mem_pci(c))); __v; }) -#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ - __raw_readl(__mem_pci(c))); __v; }) +#define readb_relaxed(c) ({ u8 __r = __raw_readb(__mem_pci(c)); __r; }) +#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \ + __raw_readw(__mem_pci(c))); __r; }) +#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ + __raw_readl(__mem_pci(c))); __r; }) #define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ @@ -238,7 +260,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * ioremap and friends. * * ioremap takes a PCI memory address, as specified in - * Documentation/IO-mapping.txt. + * Documentation/io-mapping.txt. * */ #ifndef __arch_ioremap @@ -260,10 +282,16 @@ extern void _memset_io(volatile void __iomem *, int, size_t); #define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) #define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) +#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) + #define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) #define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) #define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) +#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); }) +#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); }) + #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) #define ioread32_rep(p,d,c) __raw_readsl(p,d,c) diff --git a/arch/arm/include/asm/ioctl.h b/arch/arm/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe..00000000000 --- a/arch/arm/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 2721a5814cb..5a526afb5f1 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -23,6 +23,7 @@ struct pt_regs; extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); +void handle_IRQ(unsigned int, struct pt_regs *); void init_IRQ(void); #endif diff --git a/arch/arm/include/asm/irq_regs.h b/arch/arm/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b7027..00000000000 --- a/arch/arm/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/irq_regs.h> diff --git a/arch/arm/include/asm/kdebug.h b/arch/arm/include/asm/kdebug.h deleted file mode 100644 index 6ece1b03766..00000000000 --- a/arch/arm/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/kdebug.h> diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index e46bdd0097e..feec86768f9 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -24,12 +24,6 @@ #define MAX_INSN_SIZE 2 #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ -/* - * This undefined instruction must be unique and - * reserved solely for kprobes' use. - */ -#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 - #define regs_return_value(regs) ((regs)->ARM_r0) #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 @@ -38,14 +32,17 @@ typedef u32 kprobe_opcode_t; struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); - typedef unsigned long (kprobe_check_cc)(unsigned long); +typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); +typedef void (kprobe_insn_fn_t)(void); /* Architecture specific copy of original instruction. */ struct arch_specific_insn { - kprobe_opcode_t *insn; - kprobe_insn_handler_t *insn_handler; - kprobe_check_cc *insn_check_cc; + kprobe_opcode_t *insn; + kprobe_insn_handler_t *insn_handler; + kprobe_check_cc *insn_check_cc; + kprobe_insn_singlestep_t *insn_singlestep; + kprobe_insn_fn_t *insn_fn; }; struct prev_kprobe { @@ -62,20 +59,9 @@ struct kprobe_ctlblk { }; void arch_remove_kprobe(struct kprobe *); -void kretprobe_trampoline(void); - int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -enum kprobe_insn { - INSN_REJECTED, - INSN_GOOD, - INSN_GOOD_NO_SLOT -}; - -enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, - struct arch_specific_insn *); -void __init arm_kprobe_decode_init(void); #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/include/asm/local.h b/arch/arm/include/asm/local.h deleted file mode 100644 index c11c530f74d..00000000000 --- a/arch/arm/include/asm/local.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local.h> diff --git a/arch/arm/include/asm/local64.h b/arch/arm/include/asm/local64.h deleted file mode 100644 index 36c93b5cc23..00000000000 --- a/arch/arm/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/local64.h> diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 080d74f8128..c6a18424888 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h @@ -10,6 +10,9 @@ #ifndef __ASM_ARM_LOCALTIMER_H #define __ASM_ARM_LOCALTIMER_H +#include <linux/errno.h> +#include <linux/interrupt.h> + struct clock_event_device; /* @@ -17,27 +20,20 @@ struct clock_event_device; */ void percpu_timer_setup(void); -/* - * Called from assembly, this is the local timer IRQ handler - */ -asmlinkage void do_local_timer(struct pt_regs *); - - #ifdef CONFIG_LOCAL_TIMERS #ifdef CONFIG_HAVE_ARM_TWD #include "smp_twd.h" -#define local_timer_ack() twd_timer_ack() +#define local_timer_stop(c) twd_timer_stop((c)) #else /* - * Platform provides this to acknowledge a local timer IRQ. - * Returns true if the local timer IRQ is to be processed. + * Stop the local timer */ -int local_timer_ack(void); +void local_timer_stop(struct clock_event_device *); #endif @@ -52,6 +48,10 @@ static inline int local_timer_setup(struct clock_event_device *evt) { return -ENXIO; } + +static inline void local_timer_stop(struct clock_event_device *evt) +{ +} #endif #endif diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 946f4d778f7..7d19425dd49 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -17,12 +17,16 @@ struct sys_timer; struct machine_desc { unsigned int nr; /* architecture number */ const char *name; /* architecture name */ - unsigned long boot_params; /* tagged list */ + unsigned long atag_offset; /* tagged list (relative) */ const char **dt_compat; /* array of device tree * 'compatible' strings */ unsigned int nr_irqs; /* number of IRQs */ +#ifdef CONFIG_ZONE_DMA + unsigned long dma_zone_size; /* size of DMA-able area */ +#endif + unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ @@ -30,8 +34,7 @@ struct machine_desc { unsigned int reserve_lp1 :1; /* never has lp1 */ unsigned int reserve_lp2 :1; /* never has lp2 */ unsigned int soft_reboot :1; /* soft reboot */ - void (*fixup)(struct machine_desc *, - struct tag *, char **, + void (*fixup)(struct tag *, char **, struct meminfo *); void (*reserve)(void);/* reserve mem blocks */ void (*map_io)(void);/* IO mapping function */ @@ -70,4 +73,11 @@ static const struct machine_desc __mach_desc_##_type \ #define MACHINE_END \ }; +#define DT_MACHINE_START(_name, _namestr) \ +static const struct machine_desc __mach_desc_##_name \ + __used \ + __attribute__((__section__(".arch.info.init"))) = { \ + .nr = ~0, \ + .name = _namestr, + #endif diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index d2fedb5aeb1..b36f3654bf5 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -29,6 +29,7 @@ struct map_desc { #define MT_MEMORY_NONCACHED 11 #define MT_MEMORY_DTCM 12 #define MT_MEMORY_ITCM 13 +#define MT_MEMORY_SO 14 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index 16330bd0657..186efd4e05c 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -25,7 +25,7 @@ struct hw_pci { void (*preinit)(void); void (*postinit)(void); u8 (*swizzle)(struct pci_dev *dev, u8 *pin); - int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin); + int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); }; /* @@ -44,7 +44,7 @@ struct pci_sys_data { /* Bridge swizzling */ u8 (*swizzle)(struct pci_dev *, u8 *); /* IRQ mapping */ - int (*map_irq)(struct pci_dev *, u8, u8); + int (*map_irq)(const struct pci_dev *, u8, u8); struct hw_pci *hw; void *private_data; /* platform controller private data */ }; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb348..a8997d71084 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -16,9 +16,12 @@ #include <linux/compiler.h> #include <linux/const.h> #include <linux/types.h> -#include <mach/memory.h> #include <asm/sizes.h> +#ifdef CONFIG_NEED_MACH_MEMORY_H +#include <mach/memory.h> +#endif + /* * Allow for constants defined here to be used from assembly code * by prepending the UL suffix only with actual C code compilation. @@ -77,16 +80,7 @@ */ #define IOREMAP_MAX_ORDER 24 -/* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - #define CONSISTENT_END (0xffe00000UL) -#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) #else /* CONFIG_MMU */ @@ -160,7 +154,6 @@ * so that all we need to do is modify the 8-bit constant field. */ #define __PV_BITS_31_24 0x81000000 -#define __PV_BITS_23_16 0x00810000 extern unsigned long __pv_phys_offset; #define PHYS_OFFSET __pv_phys_offset @@ -178,9 +171,6 @@ static inline unsigned long __virt_to_phys(unsigned long x) { unsigned long t; __pv_stub(x, t, "add", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "add", __PV_BITS_23_16); -#endif return t; } @@ -188,9 +178,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) { unsigned long t; __pv_stub(x, t, "sub", __PV_BITS_31_24); -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT - __pv_stub(t, t, "sub", __PV_BITS_23_16); -#endif return t; } #else @@ -200,19 +187,11 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #ifndef PHYS_OFFSET +#ifdef PLAT_PHYS_OFFSET #define PHYS_OFFSET PLAT_PHYS_OFFSET -#endif - -/* - * The DMA mask corresponding to the maximum bus address allocatable - * using GFP_DMA. The default here places no restriction on DMA - * allocations. This must be the smallest DMA mask in the system, - * so a successful GFP_DMA allocation will always satisfy this. - */ -#ifndef ARM_DMA_ZONE_SIZE -#define ISA_DMA_THRESHOLD (0xffffffffULL) #else -#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) +#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) +#endif #endif /* diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b4ffe9d5b52..14965658a92 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,7 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; - spinlock_t id_lock; + raw_spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; @@ -16,7 +16,7 @@ typedef struct { /* init_mm.context.id_lock should be initialized. */ #define INIT_MM_CONTEXT(name) \ - .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), + .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), #else #define ASID(mm) (0) #endif diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h index 543b44916d2..6c6809f982f 100644 --- a/arch/arm/include/asm/module.h +++ b/arch/arm/include/asm/module.h @@ -31,11 +31,7 @@ struct mod_arch_specific { /* Add __virt_to_phys patching state as well */ #ifdef CONFIG_ARM_PATCH_PHYS_VIRT -#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT -#define MODULE_ARCH_VERMAGIC_P2V "p2v16 " -#else #define MODULE_ARCH_VERMAGIC_P2V "p2v8 " -#endif #else #define MODULE_ARCH_VERMAGIC_P2V "" #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index d8387437ec5..53426c66352 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -34,6 +34,7 @@ struct outer_cache_fns { void (*sync)(void); #endif void (*set_debug)(unsigned long); + void (*resume)(void); }; #ifdef CONFIG_OUTER_CACHE @@ -74,6 +75,12 @@ static inline void outer_disable(void) outer_cache.disable(); } +static inline void outer_resume(void) +{ + if (outer_cache.resume) + outer_cache.resume(); +} + #else static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index ac75d084888..ca94653f1ec 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -151,47 +151,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); -typedef unsigned long pteval_t; - -#undef STRICT_MM_TYPECHECKS - -#ifdef STRICT_MM_TYPECHECKS -/* - * These are used to make use of C type-checking.. - */ -typedef struct { pteval_t pte; } pte_t; -typedef struct { unsigned long pmd; } pmd_t; -typedef struct { unsigned long pgd[2]; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; - -#define pte_val(x) ((x).pte) -#define pmd_val(x) ((x).pmd) -#define pgd_val(x) ((x).pgd[0]) -#define pgprot_val(x) ((x).pgprot) - -#define __pte(x) ((pte_t) { (x) } ) -#define __pmd(x) ((pmd_t) { (x) } ) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#else -/* - * .. while these make it easier on the compiler - */ -typedef pteval_t pte_t; -typedef unsigned long pmd_t; -typedef unsigned long pgd_t[2]; -typedef unsigned long pgprot_t; - -#define pte_val(x) (x) -#define pmd_val(x) (x) -#define pgd_val(x) ((x)[0]) -#define pgprot_val(x) (x) - -#define __pte(x) (x) -#define __pmd(x) (x) -#define __pgprot(x) (x) - -#endif /* STRICT_MM_TYPECHECKS */ +#include <asm/pgtable-2level-types.h> #endif /* CONFIG_MMU */ diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 92e2a833693..2b1f245db0c 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -3,9 +3,19 @@ #ifdef __KERNEL__ #include <asm-generic/pci-dma-compat.h> +#include <asm-generic/pci-bridge.h> #include <asm/mach/pci.h> /* for pci_sys_data */ -#include <mach/hardware.h> /* for PCIBIOS_MIN_* */ + +extern unsigned long pcibios_min_io; +#define PCIBIOS_MIN_IO pcibios_min_io +extern unsigned long pcibios_min_mem; +#define PCIBIOS_MIN_MEM pcibios_min_mem + +static inline int pcibios_assign_all_busses(void) +{ + return pci_has_flag(PCI_REASSIGN_ALL_RSRC); +} #ifdef CONFIG_PCI_DOMAINS static inline int pci_domain_nr(struct pci_bus *bus) diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h deleted file mode 100644 index b4e32d8ec07..00000000000 --- a/arch/arm/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ARM_PERCPU -#define __ARM_PERCPU - -#include <asm-generic/percpu.h> - -#endif diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af..0f8e3827a89 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -24,6 +24,8 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_V6MP, ARM_PERF_PMU_ID_CA8, ARM_PERF_PMU_ID_CA9, + ARM_PERF_PMU_ID_CA5, + ARM_PERF_PMU_ID_CA15, ARM_NUM_PMU_IDS, }; diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 22de005f159..3e08fd3fbb6 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -105,9 +105,9 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) } static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, - unsigned long prot) + pmdval_t prot) { - unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; + pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); flush_pmd_entry(pmdp); diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h new file mode 100644 index 00000000000..5cfba15cb40 --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1,93 @@ +/* + * arch/arm/include/asm/pgtable-2level-hwdef.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_HWDEF_H +#define _ASM_PGTABLE_2LEVEL_HWDEF_H + +/* + * Hardware page table definitions. + * + * + Level 1 descriptor (PMD) + * - common + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) +#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) +#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ +/* + * - section + */ +#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) +#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) +#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ +#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */ +#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */ +#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */ +#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ +#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ +#define PMD_SECT_AF (_AT(pmdval_t, 0)) + +#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) +#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) +#define PMD_SECT_WT (PMD_SECT_CACHEABLE) +#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) +#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) +#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) + +/* + * - coarse table (not used) + */ + +/* + * + Level 2 descriptor (PTE) + * - common + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_LARGE (_AT(pteval_t, 1) << 0) +#define PTE_TYPE_SMALL (_AT(pteval_t, 2) << 0) +#define PTE_TYPE_EXT (_AT(pteval_t, 3) << 0) /* v5 */ +#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) +#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) + +/* + * - extended small page/tiny page + */ +#define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ +#define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) +#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) +#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) +#define PTE_EXT_AP_UNO_SRO (_AT(pteval_t, 0) << 4) +#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) +#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) +#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) +#define PTE_EXT_TEX(x) (_AT(pteval_t, (x)) << 6) /* v5 */ +#define PTE_EXT_APX (_AT(pteval_t, 1) << 9) /* v6 */ +#define PTE_EXT_COHERENT (_AT(pteval_t, 1) << 9) /* XScale3 */ +#define PTE_EXT_SHARED (_AT(pteval_t, 1) << 10) /* v6 */ +#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* v6 */ + +/* + * - small page + */ +#define PTE_SMALL_AP_MASK (_AT(pteval_t, 0xff) << 4) +#define PTE_SMALL_AP_UNO_SRO (_AT(pteval_t, 0x00) << 4) +#define PTE_SMALL_AP_UNO_SRW (_AT(pteval_t, 0x55) << 4) +#define PTE_SMALL_AP_URO_SRW (_AT(pteval_t, 0xaa) << 4) +#define PTE_SMALL_AP_URW_SRW (_AT(pteval_t, 0xff) << 4) + +#define PHYS_MASK (~0UL) + +#endif diff --git a/arch/arm/include/asm/pgtable-2level-types.h b/arch/arm/include/asm/pgtable-2level-types.h new file mode 100644 index 00000000000..66cb5b0e89c --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level-types.h @@ -0,0 +1,67 @@ +/* + * arch/arm/include/asm/pgtable-2level-types.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#ifndef _ASM_PGTABLE_2LEVEL_TYPES_H +#define _ASM_PGTABLE_2LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u32 pteval_t; +typedef u32 pmdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pmdval_t pgd[2]; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd[0]) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pmdval_t pgd_t[2]; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) ((x)[0]) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#endif /* _ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h new file mode 100644 index 00000000000..470457e1cfc --- /dev/null +++ b/arch/arm/include/asm/pgtable-2level.h @@ -0,0 +1,143 @@ +/* + * arch/arm/include/asm/pgtable-2level.h + * + * Copyright (C) 1995-2002 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_PGTABLE_2LEVEL_H +#define _ASM_PGTABLE_2LEVEL_H + +/* + * Hardware-wise, we have a two level page table structure, where the first + * level has 4096 entries, and the second level has 256 entries. Each entry + * is one 32-bit word. Most of the bits in the second level entry are used + * by hardware, and there aren't any "accessed" and "dirty" bits. + * + * Linux on the other hand has a three level page table structure, which can + * be wrapped to fit a two level page table structure easily - using the PGD + * and PTE only. However, Linux also expects one "PTE" table per page, and + * at least a "dirty" bit. + * + * Therefore, we tweak the implementation slightly - we tell Linux that we + * have 2048 entries in the first level, each of which is 8 bytes (iow, two + * hardware pointers to the second level.) The second level contains two + * hardware PTE tables arranged contiguously, preceded by Linux versions + * which contain the state information Linux needs. We, therefore, end up + * with 512 entries in the "PTE" level. + * + * This leads to the page tables having the following layout: + * + * pgd pte + * | | + * +--------+ + * | | +------------+ +0 + * +- - - - + | Linux pt 0 | + * | | +------------+ +1024 + * +--------+ +0 | Linux pt 1 | + * | |-----> +------------+ +2048 + * +- - - - + +4 | h/w pt 0 | + * | |-----> +------------+ +3072 + * +--------+ +8 | h/w pt 1 | + * | | +------------+ +4096 + * + * See L_PTE_xxx below for definitions of bits in the "Linux pt", and + * PTE_xxx for definitions of bits appearing in the "h/w pt". + * + * PMD_xxx definitions refer to bits in the first level page table. + * + * The "dirty" bit is emulated by only granting hardware write permission + * iff the page is marked "writable" and "dirty" in the Linux PTE. This + * means that a write to a clean page will cause a permission fault, and + * the Linux MM layer will mark the page dirty via handle_pte_fault(). + * For the hardware to notice the permission change, the TLB entry must + * be flushed, and ptep_set_access_flags() does that for us. + * + * The "accessed" or "young" bit is emulated by a similar method; we only + * allow accesses to the page if the "young" bit is set. Accesses to the + * page will cause a fault, and handle_pte_fault() will set the young bit + * for us as long as the page is marked present in the corresponding Linux + * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is + * up to date. + * + * However, when the "young" bit is cleared, we deny access to the page + * by clearing the hardware PTE. Currently Linux does not flush the TLB + * for us in this case, which means the TLB will retain the transation + * until either the TLB entry is evicted under pressure, or a context + * switch which changes the user space mapping occurs. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 1 +#define PTRS_PER_PGD 2048 + +#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) +#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) +#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) + +/* + * PMD_SHIFT determines the size of the area a second-level page table can map + * PGDIR_SHIFT determines what a third-level page table entry can map + */ +#define PMD_SHIFT 21 +#define PGDIR_SHIFT 21 + +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 20 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +/* + * ARMv6 supersection address mask and size definitions. + */ +#define SUPERSECTION_SHIFT 24 +#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) +#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) + +/* + * "Linux" PTE definitions. + * + * We keep two sets of PTEs - the hardware and the linux version. + * This allows greater flexibility in the way we map the Linux bits + * onto the hardware tables, and allows us to have YOUNG and DIRTY + * bits. + * + * The PTE table pointer refers to the hardware entries; the "Linux" + * entries are stored 1024 bytes below. + */ +#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) +#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) +#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ +#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) +#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) +#define L_PTE_USER (_AT(pteval_t, 1) << 8) +#define L_PTE_XN (_AT(pteval_t, 1) << 9) +#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ + +/* + * These are the memory types, defined to be compatible with + * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB + */ +#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ +#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ +#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ +#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ +#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ +#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ +#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ +#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ +#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ +#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ +#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) + +#endif /* _ASM_PGTABLE_2LEVEL_H */ diff --git a/arch/arm/include/asm/pgtable-hwdef.h b/arch/arm/include/asm/pgtable-hwdef.h index fd1521d5cb9..183111164ce 100644 --- a/arch/arm/include/asm/pgtable-hwdef.h +++ b/arch/arm/include/asm/pgtable-hwdef.h @@ -10,81 +10,6 @@ #ifndef _ASMARM_PGTABLE_HWDEF_H #define _ASMARM_PGTABLE_HWDEF_H -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - * - common - */ -#define PMD_TYPE_MASK (3 << 0) -#define PMD_TYPE_FAULT (0 << 0) -#define PMD_TYPE_TABLE (1 << 0) -#define PMD_TYPE_SECT (2 << 0) -#define PMD_BIT4 (1 << 4) -#define PMD_DOMAIN(x) ((x) << 5) -#define PMD_PROTECTION (1 << 9) /* v5 */ -/* - * - section - */ -#define PMD_SECT_BUFFERABLE (1 << 2) -#define PMD_SECT_CACHEABLE (1 << 3) -#define PMD_SECT_XN (1 << 4) /* v6 */ -#define PMD_SECT_AP_WRITE (1 << 10) -#define PMD_SECT_AP_READ (1 << 11) -#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ -#define PMD_SECT_APX (1 << 15) /* v6 */ -#define PMD_SECT_S (1 << 16) /* v6 */ -#define PMD_SECT_nG (1 << 17) /* v6 */ -#define PMD_SECT_SUPER (1 << 18) /* v6 */ - -#define PMD_SECT_UNCACHED (0) -#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT (PMD_SECT_CACHEABLE) -#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2)) - -/* - * - coarse table (not used) - */ - -/* - * + Level 2 descriptor (PTE) - * - common - */ -#define PTE_TYPE_MASK (3 << 0) -#define PTE_TYPE_FAULT (0 << 0) -#define PTE_TYPE_LARGE (1 << 0) -#define PTE_TYPE_SMALL (2 << 0) -#define PTE_TYPE_EXT (3 << 0) /* v5 */ -#define PTE_BUFFERABLE (1 << 2) -#define PTE_CACHEABLE (1 << 3) - -/* - * - extended small page/tiny page - */ -#define PTE_EXT_XN (1 << 0) /* v6 */ -#define PTE_EXT_AP_MASK (3 << 4) -#define PTE_EXT_AP0 (1 << 4) -#define PTE_EXT_AP1 (2 << 4) -#define PTE_EXT_AP_UNO_SRO (0 << 4) -#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0) -#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1) -#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) -#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ -#define PTE_EXT_APX (1 << 9) /* v6 */ -#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */ -#define PTE_EXT_SHARED (1 << 10) /* v6 */ -#define PTE_EXT_NG (1 << 11) /* v6 */ - -/* - * - small page - */ -#define PTE_SMALL_AP_MASK (0xff << 4) -#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) -#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) -#define PTE_SMALL_AP_URO_SRW (0xaa << 4) -#define PTE_SMALL_AP_URW_SRW (0xff << 4) +#include <asm/pgtable-2level-hwdef.h> #endif diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 5750704e027..9451dce3a55 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -24,6 +24,8 @@ #include <mach/vmalloc.h> #include <asm/pgtable-hwdef.h> +#include <asm/pgtable-2level.h> + /* * Just any arbitrary offset to the start of the vmalloc VM area: the * current 8MB value just means that there will be a 8MB "hole" after the @@ -41,79 +43,6 @@ #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) #endif -/* - * Hardware-wise, we have a two level page table structure, where the first - * level has 4096 entries, and the second level has 256 entries. Each entry - * is one 32-bit word. Most of the bits in the second level entry are used - * by hardware, and there aren't any "accessed" and "dirty" bits. - * - * Linux on the other hand has a three level page table structure, which can - * be wrapped to fit a two level page table structure easily - using the PGD - * and PTE only. However, Linux also expects one "PTE" table per page, and - * at least a "dirty" bit. - * - * Therefore, we tweak the implementation slightly - we tell Linux that we - * have 2048 entries in the first level, each of which is 8 bytes (iow, two - * hardware pointers to the second level.) The second level contains two - * hardware PTE tables arranged contiguously, preceded by Linux versions - * which contain the state information Linux needs. We, therefore, end up - * with 512 entries in the "PTE" level. - * - * This leads to the page tables having the following layout: - * - * pgd pte - * | | - * +--------+ - * | | +------------+ +0 - * +- - - - + | Linux pt 0 | - * | | +------------+ +1024 - * +--------+ +0 | Linux pt 1 | - * | |-----> +------------+ +2048 - * +- - - - + +4 | h/w pt 0 | - * | |-----> +------------+ +3072 - * +--------+ +8 | h/w pt 1 | - * | | +------------+ +4096 - * - * See L_PTE_xxx below for definitions of bits in the "Linux pt", and - * PTE_xxx for definitions of bits appearing in the "h/w pt". - * - * PMD_xxx definitions refer to bits in the first level page table. - * - * The "dirty" bit is emulated by only granting hardware write permission - * iff the page is marked "writable" and "dirty" in the Linux PTE. This - * means that a write to a clean page will cause a permission fault, and - * the Linux MM layer will mark the page dirty via handle_pte_fault(). - * For the hardware to notice the permission change, the TLB entry must - * be flushed, and ptep_set_access_flags() does that for us. - * - * The "accessed" or "young" bit is emulated by a similar method; we only - * allow accesses to the page if the "young" bit is set. Accesses to the - * page will cause a fault, and handle_pte_fault() will set the young bit - * for us as long as the page is marked present in the corresponding Linux - * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is - * up to date. - * - * However, when the "young" bit is cleared, we deny access to the page - * by clearing the hardware PTE. Currently Linux does not flush the TLB - * for us in this case, which means the TLB will retain the transation - * until either the TLB entry is evicted under pressure, or a context - * switch which changes the user space mapping occurs. - */ -#define PTRS_PER_PTE 512 -#define PTRS_PER_PMD 1 -#define PTRS_PER_PGD 2048 - -#define PTE_HWTABLE_PTRS (PTRS_PER_PTE) -#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) -#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) - -/* - * PMD_SHIFT determines the size of the area a second-level page table can map - * PGDIR_SHIFT determines what a third-level page table entry can map - */ -#define PMD_SHIFT 21 -#define PGDIR_SHIFT 21 - #define LIBRARY_TEXT_START 0x0c000000 #ifndef __ASSEMBLY__ @@ -124,12 +53,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) -#endif /* !__ASSEMBLY__ */ - -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) /* * This is the lowest virtual address we can permit any user space @@ -138,60 +61,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); */ #define FIRST_USER_ADDRESS PAGE_SIZE -#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT 20 -#define SECTION_SIZE (1UL << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) - -/* - * ARMv6 supersection address mask and size definitions. - */ -#define SUPERSECTION_SHIFT 24 -#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) -#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) - -/* - * "Linux" PTE definitions. - * - * We keep two sets of PTEs - the hardware and the linux version. - * This allows greater flexibility in the way we map the Linux bits - * onto the hardware tables, and allows us to have YOUNG and DIRTY - * bits. - * - * The PTE table pointer refers to the hardware entries; the "Linux" - * entries are stored 1024 bytes below. - */ -#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) -#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) -#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ -#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) -#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) -#define L_PTE_USER (_AT(pteval_t, 1) << 8) -#define L_PTE_XN (_AT(pteval_t, 1) << 9) -#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ - -/* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB - */ -#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ -#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ -#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ -#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ -#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ -#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ -#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ -#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ -#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ -#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ -#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) - -#ifndef __ASSEMBLY__ - /* * The pgprot_* and protection_map entries will be fixed up in runtime * to include the cachable and bufferable bits based on memory policy, @@ -232,6 +101,9 @@ extern pgprot_t pgprot_kernel; #define pgprot_writecombine(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) +#define pgprot_stronglyordered(prot) \ + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) + #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) @@ -327,10 +199,10 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; static inline pte_t *pmd_page_vaddr(pmd_t pmd) { - return __va(pmd_val(pmd) & PAGE_MASK); + return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); } -#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) /* we don't need complex calculations here as the pmd is folded into the pgd */ #define pmd_addr_end(addr,end) (end) @@ -351,7 +223,7 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) #define pte_unmap(pte) __pte_unmap(pte) -#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 7544ce6b481..71d99b83cdb 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -13,7 +13,12 @@ #define __ARM_PMU_H__ #include <linux/interrupt.h> +#include <linux/perf_event.h> +/* + * Types of PMUs that can be accessed directly and require mutual + * exclusion between profiling tools. + */ enum arm_pmu_type { ARM_PMU_DEVICE_CPU = 0, ARM_NUM_PMU_DEVICES, @@ -37,22 +42,18 @@ struct arm_pmu_platdata { * reserve_pmu() - reserve the hardware performance counters * * Reserve the hardware performance counters in the system for exclusive use. - * The platform_device for the system is returned on success, ERR_PTR() - * encoded error on failure. + * Returns 0 on success or -EBUSY if the lock is already held. */ -extern struct platform_device * -reserve_pmu(enum arm_pmu_type device); +extern int +reserve_pmu(enum arm_pmu_type type); /** * release_pmu() - Relinquish control of the performance counters * * Release the performance counters and allow someone else to use them. - * Callers must have disabled the counters and released IRQs before calling - * this. The platform_device returned from reserve_pmu() must be passed as - * a cookie. */ -extern int -release_pmu(struct platform_device *pdev); +extern void +release_pmu(enum arm_pmu_type type); /** * init_pmu() - Initialise the PMU. @@ -62,30 +63,84 @@ release_pmu(struct platform_device *pdev); * the actual hardware initialisation. */ extern int -init_pmu(enum arm_pmu_type device); +init_pmu(enum arm_pmu_type type); #else /* CONFIG_CPU_HAS_PMU */ #include <linux/err.h> -static inline struct platform_device * -reserve_pmu(enum arm_pmu_type device) -{ - return ERR_PTR(-ENODEV); -} - static inline int -release_pmu(struct platform_device *pdev) +reserve_pmu(enum arm_pmu_type type) { return -ENODEV; } -static inline int -init_pmu(enum arm_pmu_type device) -{ - return -ENODEV; -} +static inline void +release_pmu(enum arm_pmu_type type) { } #endif /* CONFIG_CPU_HAS_PMU */ +#ifdef CONFIG_HW_PERF_EVENTS + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event **events; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long *used_mask; + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; +}; + +struct arm_pmu { + struct pmu pmu; + enum arm_perf_pmu_ids id; + enum arm_pmu_type type; + cpumask_t active_irqs; + const char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + int (*get_event_idx)(struct pmu_hw_events *hw_events, + struct hw_perf_event *hwc); + int (*set_event_filter)(struct hw_perf_event *evt, + struct perf_event_attr *attr); + u32 (*read_counter)(int idx); + void (*write_counter)(int idx, u32 val); + void (*start)(void); + void (*stop)(void); + void (*reset)(void *); + int (*map_event)(struct perf_event *event); + int num_events; + atomic_t active_events; + struct mutex reserve_mutex; + u64 max_period; + struct platform_device *plat_device; + struct pmu_hw_events *(*get_hw_events)(void); +}; + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) + +int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); + +u64 armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx, int overflow); + +int armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx); + +#endif /* CONFIG_HW_PERF_EVENTS */ + #endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/poll.h b/arch/arm/include/asm/poll.h deleted file mode 100644 index c98509d3149..00000000000 --- a/arch/arm/include/asm/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8ec535e11fd..9e92cb205e6 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -81,14 +81,22 @@ extern void cpu_dcache_clean_area(void *, int); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); + +/* These three are private to arch/arm/kernel/suspend.c */ +extern void cpu_do_suspend(void *); +extern void cpu_do_resume(void *); #else -#define cpu_proc_init() processor._proc_init() -#define cpu_proc_fin() processor._proc_fin() -#define cpu_reset(addr) processor.reset(addr) -#define cpu_do_idle() processor._do_idle() -#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) -#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) -#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) +#define cpu_proc_init processor._proc_init +#define cpu_proc_fin processor._proc_fin +#define cpu_reset processor.reset +#define cpu_do_idle processor._do_idle +#define cpu_dcache_clean_area processor.dcache_clean_area +#define cpu_set_pte_ext processor.set_pte_ext +#define cpu_do_switch_mm processor.switch_mm + +/* These three are private to arch/arm/kernel/suspend.c */ +#define cpu_do_suspend processor.do_suspend +#define cpu_do_resume processor.do_resume #endif extern void cpu_resume(void); diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index 11b8708fc4d..6f65ca86a5e 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h @@ -16,11 +16,6 @@ #include <asm/setup.h> #include <asm/irq.h> -static inline void irq_dispose_mapping(unsigned int virq) -{ - return; -} - extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); extern void arm_dt_memblock_reserve(void); diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 312d10877bd..96187ff58c2 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -69,8 +69,9 @@ #define PSR_c 0x000000ff /* Control */ /* - * ARMv7 groups of APSR bits + * ARMv7 groups of PSR bits */ +#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ #define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ @@ -200,6 +201,14 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define PREDICATE_ALWAYS 0xe0000000 /* + * True if instr is a 32-bit thumb instruction. This works if instr + * is the first or only half-word of a thumb instruction. It also works + * when instr holds all 32-bits of a wide thumb instruction if stored + * in the form (first_half<<16)|(second_half) + */ +#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800) + +/* * kprobe-based event tracer support */ #include <linux/stddef.h> diff --git a/arch/arm/include/asm/resource.h b/arch/arm/include/asm/resource.h deleted file mode 100644 index 734b581b5b6..00000000000 --- a/arch/arm/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ARM_RESOURCE_H -#define _ARM_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d934..cefdb8f898a 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -1,6 +1,10 @@ #ifndef _ASMARM_SCATTERLIST_H #define _ASMARM_SCATTERLIST_H +#ifdef CONFIG_ARM_HAS_SG_CHAIN +#define ARCH_HAS_SG_CHAIN +#endif + #include <asm/memory.h> #include <asm/types.h> #include <asm-generic/scatterlist.h> diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h deleted file mode 100644 index 2b8c5160388..00000000000 --- a/arch/arm/include/asm/sections.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/sections.h> diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07a..915696dd9c7 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -187,12 +187,16 @@ struct tagtable { #define __tag __used __attribute__((__section__(".taglist.init"))) #define __tagtable(tag, fn) \ -static struct tagtable __tagtable_##fn __tag = { tag, fn } +static const struct tagtable __tagtable_##fn __tag = { tag, fn } /* * Memory map description */ -#define NR_BANKS 8 +#ifdef CONFIG_ARCH_EP93XX +# define NR_BANKS 16 +#else +# define NR_BANKS 8 +#endif struct membank { phys_addr_t start; diff --git a/arch/arm/include/asm/siginfo.h b/arch/arm/include/asm/siginfo.h deleted file mode 100644 index 5e21852e603..00000000000 --- a/arch/arm/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASMARM_SIGINFO_H -#define _ASMARM_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h deleted file mode 100644 index 154b89b81d3..00000000000 --- a/arch/arm/include/asm/sizes.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -/* Size definitions - * Copyright (C) ARM Limited 1998. All rights reserved. - */ -#include <asm-generic/sizes.h> - -#define SZ_48M (SZ_32M + SZ_16M) diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index e42d96a45d3..1e5717afc4a 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -33,6 +33,11 @@ extern void show_ipi_list(struct seq_file *, int); asmlinkage void do_IPI(int ipinr, struct pt_regs *regs); /* + * Called from C code, this handles an IPI. + */ +void handle_IPI(int ipinr, struct pt_regs *regs); + +/* * Setup the set of possible CPUs (via set_cpu_possible) */ extern void smp_init_cpus(void); @@ -66,6 +71,12 @@ extern void platform_secondary_init(unsigned int cpu); extern void platform_smp_prepare_cpus(unsigned int); /* + * Logical CPU mapping. + */ +extern int __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +/* * Initial data for bringing up a secondary CPU. */ struct secondary_data { @@ -88,9 +99,4 @@ extern void platform_cpu_enable(unsigned int cpu); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -/* - * show local interrupt info - */ -extern void show_local_irqs(struct seq_file *, int); - #endif /* ifndef __ASM_ARM_SMP_H */ diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index fed9981fba0..ef9ffba97ad 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -22,7 +22,7 @@ struct clock_event_device; extern void __iomem *twd_base; -int twd_timer_ack(void); void twd_timer_setup(struct clock_event_device *); +void twd_timer_stop(struct clock_event_device *); #endif diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 00000000000..1c0a551ae37 --- /dev/null +++ b/arch/arm/include/asm/suspend.h @@ -0,0 +1,7 @@ +#ifndef __ASM_ARM_SUSPEND_H +#define __ASM_ARM_SUSPEND_H + +extern void cpu_resume(void); +extern int cpu_suspend(unsigned long, int (*)(unsigned long)); + +#endif diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 832888d0c20..984014b9264 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -57,18 +57,12 @@ #ifndef __ASSEMBLY__ +#include <linux/compiler.h> #include <linux/linkage.h> #include <linux/irqflags.h> #include <asm/outercache.h> -#define __exception __attribute__((section(".exception.text"))) -#ifdef CONFIG_FUNCTION_GRAPH_TRACER -#define __exception_irq_entry __irq_entry -#else -#define __exception_irq_entry __exception -#endif - struct thread_info; struct task_struct; @@ -97,14 +91,13 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -extern asmlinkage void __backtrace(void); extern asmlinkage void c_backtrace(unsigned long fp, int pmode); struct mm_struct; extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); -extern int cpu_architecture(void); +extern int __pure cpu_architecture(void); extern void cpu_init(void); void arm_machine_restart(char mode, const char *cmd); diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927..8578d726ad7 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h @@ -27,5 +27,7 @@ void *tcm_alloc(size_t len); void tcm_free(void *addr, size_t len); +bool tcm_dtcm_present(void); +bool tcm_itcm_present(void); #endif diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index d2005de383b..02b2f820398 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -34,16 +34,12 @@ #define TLB_V6_D_ASID (1 << 17) #define TLB_V6_I_ASID (1 << 18) -#define TLB_BTB (1 << 28) - /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ #define TLB_V7_UIS_PAGE (1 << 19) #define TLB_V7_UIS_FULL (1 << 20) #define TLB_V7_UIS_ASID (1 << 21) -/* Inner Shareable BTB operation (ARMv7 MP extensions) */ -#define TLB_V7_IS_BTB (1 << 22) - +#define TLB_BARRIER (1 << 28) #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_DCLEAN (1 << 30) #define TLB_WB (1 << 31) @@ -58,7 +54,7 @@ * v4wb - ARMv4 with write buffer without I TLB flush entry instruction * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) - * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB)) + * fa - Faraday (v4 with write buffer with UTLB) * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction * v7wbi - identical to v6wbi */ @@ -99,7 +95,7 @@ # define v4_always_flags (-1UL) #endif -#define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \ +#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V4_U_FULL | TLB_V4_U_PAGE) #ifdef CONFIG_CPU_TLB_FA @@ -166,7 +162,7 @@ # define v4wb_always_flags (-1UL) #endif -#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_I_FULL | TLB_V6_D_FULL | \ TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ TLB_V6_I_ASID | TLB_V6_D_ASID) @@ -184,9 +180,9 @@ # define v6wbi_always_flags (-1UL) #endif -#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) -#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) #ifdef CONFIG_CPU_TLB_V7 @@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void) if (tlb_flag(TLB_V7_UIS_FULL)) asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) { dsb(); isb(); } @@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); #endif - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) dsb(); - isb(); - } } static inline void @@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); #endif - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) dsb(); - isb(); - } } static inline void local_flush_tlb_kernel_page(unsigned long kaddr) @@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) if (tlb_flag(TLB_V7_UIS_PAGE)) asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); - if (tlb_flag(TLB_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); - dsb(); - isb(); - } - if (tlb_flag(TLB_V7_IS_BTB)) { - /* flush the branch target cache */ - asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + if (tlb_flag(TLB_BARRIER)) { dsb(); isb(); } @@ -509,7 +471,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) * these operations. This is typically used when we are removing * PMD entries. */ -static inline void flush_pmd_entry(pmd_t *pmd) +static inline void flush_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; @@ -525,7 +487,7 @@ static inline void flush_pmd_entry(pmd_t *pmd) dsb(); } -static inline void clean_pmd_entry(pmd_t *pmd) +static inline void clean_pmd_entry(void *pmd) { const unsigned int __tlb_flag = __cpu_tlb_flags; diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index accbd7cad9b..a7e457ed27c 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -1,6 +1,39 @@ #ifndef _ASM_ARM_TOPOLOGY_H #define _ASM_ARM_TOPOLOGY_H +#ifdef CONFIG_ARM_CPU_TOPOLOGY + +#include <linux/cpumask.h> + +struct cputopo_arm { + int thread_id; + int core_id; + int socket_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; +}; + +extern struct cputopo_arm cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) + +#define mc_capable() (cpu_topology[0].socket_id != -1) +#define smt_capable() (cpu_topology[0].thread_id != -1) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(unsigned int cpu); + +#else + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } + +#endif + #include <asm-generic/topology.h> #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index f90756dc16d..5b29a667362 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h @@ -3,6 +3,9 @@ #include <linux/list.h> +struct pt_regs; +struct task_struct; + struct undef_hook { struct list_head node; u32 instr_mask; diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 2c04ed5efeb..c60a2944f95 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -478,8 +478,8 @@ /* * Unimplemented (or alternatively implemented) syscalls */ -#define __IGNORE_fadvise64_64 1 -#define __IGNORE_migrate_pages 1 +#define __IGNORE_fadvise64_64 +#define __IGNORE_migrate_pages #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ diff --git a/arch/arm/include/asm/vga.h b/arch/arm/include/asm/vga.h index 250a4dd0063..91f40217bfa 100644 --- a/arch/arm/include/asm/vga.h +++ b/arch/arm/include/asm/vga.h @@ -2,9 +2,10 @@ #define ASMARM_VGA_H #include <linux/io.h> -#include <mach/hardware.h> -#define VGA_MAP_MEM(x,s) (PCIMEM_BASE + (x)) +extern unsigned long vga_base; + +#define VGA_MAP_MEM(x,s) (vga_base + (x)) #define vga_readb(x) (*((volatile unsigned char *)x)) #define vga_writeb(x,y) (*((volatile unsigned char *)y) = (x)) |