diff options
Diffstat (limited to 'arch/arm/include')
53 files changed, 1285 insertions, 389 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 00f46d9ce29..6e8f05c8a1c 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -149,10 +149,10 @@ #define USER(x...) \ 9999: x; \ - .section __ex_table,"a"; \ + .pushsection __ex_table,"a"; \ .align 3; \ .long 9999b,9001f; \ - .previous + .popsection /* * SMP data memory barrier @@ -193,10 +193,10 @@ .error "Unsupported inc macro argument" .endif - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long 9999b, \abort - .previous + .popsection .endm .macro usracc, instr, reg, ptr, inc, cond, rept, abort @@ -234,10 +234,10 @@ .error "Unsupported inc macro argument" .endif - .section __ex_table,"a" + .pushsection __ex_table,"a" .align 3 .long 9999b, \abort - .previous + .popsection .endr .endm diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index d0daeab2234..7e79503ab89 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -24,7 +24,7 @@ * strex/ldrex monitor on some implementations. The reason we can use it for * atomic_set() is the clrex or dummy strex done on every exception return. */ -#define atomic_read(v) ((v)->counter) +#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) (((v)->counter) = (i)) #if __LINUX_ARM_ARCH__ >= 6 @@ -40,12 +40,12 @@ static inline void atomic_add(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_add\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -58,12 +58,12 @@ static inline int atomic_add_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_add_return\n" -"1: ldrex %0, [%2]\n" -" add %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" add %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -78,12 +78,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; __asm__ __volatile__("@ atomic_sub\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); } @@ -96,12 +96,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) smp_mb(); __asm__ __volatile__("@ atomic_sub_return\n" -"1: ldrex %0, [%2]\n" -" sub %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" sub %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (result), "=&r" (tmp) + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) : "r" (&v->counter), "Ir" (i) : "cc"); @@ -118,11 +118,11 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) do { __asm__ __volatile__("@ atomic_cmpxchg\n" - "ldrex %1, [%2]\n" + "ldrex %1, [%3]\n" "mov %0, #0\n" - "teq %1, %3\n" - "strexeq %0, %4, [%2]\n" - : "=&r" (res), "=&r" (oldval) + "teq %1, %4\n" + "strexeq %0, %5, [%3]\n" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) : "r" (&ptr->counter), "Ir" (old), "r" (new) : "cc"); } while (res); @@ -137,12 +137,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; __asm__ __volatile__("@ atomic_clear_mask\n" -"1: ldrex %0, [%2]\n" -" bic %0, %0, %3\n" -" strex %1, %0, [%2]\n" +"1: ldrex %0, [%3]\n" +" bic %0, %0, %4\n" +" strex %1, %0, [%3]\n" " teq %1, #0\n" " bne 1b" - : "=&r" (tmp), "=&r" (tmp2) + : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) : "r" (addr), "Ir" (mask) : "cc"); } @@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() +#ifndef CONFIG_GENERIC_ATOMIC64 +typedef struct { + u64 __aligned(8) counter; +} atomic64_t; + +#define ATOMIC64_INIT(i) { (i) } + +static inline u64 atomic64_read(atomic64_t *v) +{ + u64 result; + + __asm__ __volatile__("@ atomic64_read\n" +" ldrexd %0, %H0, [%1]" + : "=&r" (result) + : "r" (&v->counter), "Qo" (v->counter) + ); + + return result; +} + +static inline void atomic64_set(atomic64_t *v, u64 i) +{ + u64 tmp; + + __asm__ __volatile__("@ atomic64_set\n" +"1: ldrexd %0, %H0, [%2]\n" +" strexd %0, %3, %H3, [%2]\n" +" teq %0, #0\n" +" bne 1b" + : "=&r" (tmp), "=Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_add\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_add_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_return\n" +"1: ldrexd %0, %H0, [%3]\n" +" adds %0, %0, %4\n" +" adc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + __asm__ __volatile__("@ atomic64_sub\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); +} + +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_sub_return\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, %4\n" +" sbc %H0, %H0, %H4\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (i) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) +{ + u64 oldval; + unsigned long res; + + smp_mb(); + + do { + __asm__ __volatile__("@ atomic64_cmpxchg\n" + "ldrexd %1, %H1, [%3]\n" + "mov %0, #0\n" + "teq %1, %4\n" + "teqeq %H1, %H4\n" + "strexdeq %0, %5, %H5, [%3]" + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "r" (old), "r" (new) + : "cc"); + } while (res); + + smp_mb(); + + return oldval; +} + +static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_xchg\n" +"1: ldrexd %0, %H0, [%3]\n" +" strexd %1, %4, %H4, [%3]\n" +" teq %1, #0\n" +" bne 1b" + : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) + : "r" (&ptr->counter), "r" (new) + : "cc"); + + smp_mb(); + + return result; +} + +static inline u64 atomic64_dec_if_positive(atomic64_t *v) +{ + u64 result; + unsigned long tmp; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_dec_if_positive\n" +"1: ldrexd %0, %H0, [%3]\n" +" subs %0, %0, #1\n" +" sbc %H0, %H0, #0\n" +" teq %H0, #0\n" +" bmi 2f\n" +" strexd %1, %0, %H0, [%3]\n" +" teq %1, #0\n" +" bne 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter) + : "cc"); + + smp_mb(); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) +{ + u64 val; + unsigned long tmp; + int ret = 1; + + smp_mb(); + + __asm__ __volatile__("@ atomic64_add_unless\n" +"1: ldrexd %0, %H0, [%4]\n" +" teq %0, %5\n" +" teqeq %H0, %H5\n" +" moveq %1, #0\n" +" beq 2f\n" +" adds %0, %0, %6\n" +" adc %H0, %H0, %H6\n" +" strexd %2, %0, %H0, [%4]\n" +" teq %2, #0\n" +" bne 1b\n" +"2:" + : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) + : "r" (&v->counter), "r" (u), "r" (a) + : "cc"); + + if (ret) + smp_mb(); + + return ret; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#else /* !CONFIG_GENERIC_ATOMIC64 */ +#include <asm-generic/atomic64.h> +#endif #include <asm-generic/atomic-long.h> #endif #endif diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index c77d2fa1f6e..4656a24058d 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -15,6 +15,7 @@ #include <asm/glue.h> #include <asm/shmparam.h> #include <asm/cachetype.h> +#include <asm/outercache.h> #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) @@ -42,7 +43,8 @@ #endif #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \ - defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) + defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \ + defined(CONFIG_CPU_ARM1026) # define MULTI_CACHE 1 #endif @@ -196,21 +198,6 @@ * DMA Cache Coherency * =================== * - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - start - virtual start address - * - end - virtual end address - * - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -227,15 +214,10 @@ struct cpu_cache_fns { void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); - void (*dma_flush_range)(const void *, const void *); -}; + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); -struct outer_cache_fns { - void (*inv_range)(unsigned long, unsigned long); - void (*clean_range)(unsigned long, unsigned long); - void (*flush_range)(unsigned long, unsigned long); + void (*dma_flush_range)(const void *, const void *); }; /* @@ -258,8 +240,8 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -284,58 +266,23 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif -#ifdef CONFIG_OUTER_CACHE - -extern struct outer_cache_fns outer_cache; - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ - if (outer_cache.inv_range) - outer_cache.inv_range(start, end); -} -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ - if (outer_cache.clean_range) - outer_cache.clean_range(start, end); -} -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ - if (outer_cache.flush_range) - outer_cache.flush_range(start, end); -} - -#else - -static inline void outer_inv_range(unsigned long start, unsigned long end) -{ } -static inline void outer_clean_range(unsigned long start, unsigned long end) -{ } -static inline void outer_flush_range(unsigned long start, unsigned long end) -{ } - -#endif - /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -369,17 +316,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } -static inline void -vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} - #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) @@ -387,15 +323,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) -#define flush_ptrace_access(vma,page,ua,ka,len,write) \ - vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) @@ -440,12 +371,26 @@ static inline void __flush_icache_all(void) #ifdef CONFIG_ARM_ERRATA_411920 extern void v6_icache_inval_all(void); v6_icache_inval_all(); +#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7 + asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n" + : + : "r" (0)); #else asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" : : "r" (0)); #endif } +static inline void flush_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} +static inline void invalidate_kernel_vmap_range(void *addr, int size) +{ + if ((cache_is_vivt() || cache_is_vipt_aliasing())) + __cpuc_flush_dcache_area(addr, (size_t)size); +} #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h index b6ec7c627b3..b56c1389b6f 100644 --- a/arch/arm/include/asm/clkdev.h +++ b/arch/arm/include/asm/clkdev.h @@ -13,6 +13,7 @@ #define __ASM_CLKDEV_H struct clk; +struct device; struct clk_lookup { struct list_head node; @@ -27,4 +28,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, void clkdev_add(struct clk_lookup *cl); void clkdev_drop(struct clk_lookup *cl); +void clkdev_add_table(struct clk_lookup *, size_t); +int clk_add_alias(const char *, const char *, char *, struct device *); + #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83f..69ce0727edb 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. + * + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_cpu_to_dev(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_cpu_to_dev(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); +} /* * Return whether the given device DMA address mask can be supported @@ -88,6 +128,14 @@ static inline int dma_supported(struct device *dev, u64 mask) static inline int dma_set_mask(struct device *dev, u64 dma_mask) { +#ifdef CONFIG_DMABOUNCE + if (dev->archdata.dmabounce) { + if (dma_mask >= ISA_DMA_THRESHOLD) + return 0; + else + return -EIO; + } +#endif if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; @@ -304,8 +352,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } @@ -329,8 +376,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + __dma_page_cpu_to_dev(page, offset, size, dir); return page_to_dma(dev, page) + offset; } @@ -352,7 +398,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } /** @@ -372,7 +418,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, + size, dir); } #endif /* CONFIG_DMABOUNCE */ @@ -400,7 +447,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -412,8 +462,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index a399bb5730f..51662feb9f1 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -9,6 +9,8 @@ #include <asm/ptrace.h> #include <asm/user.h> +struct task_struct; + typedef unsigned long elf_greg_t; typedef unsigned long elf_freg_t[3]; @@ -98,6 +100,7 @@ extern int elf_check_arch(const struct elf32_hdr *); extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) +struct task_struct; int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define ELF_CORE_COPY_TASK_REGS dump_task_regs diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S new file mode 100644 index 00000000000..3ceb85e4385 --- /dev/null +++ b/arch/arm/include/asm/entry-macro-vic2.S @@ -0,0 +1,57 @@ +/* arch/arm/include/asm/entry-macro-vic2.S + * + * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * Low-level IRQ helper macros for a device with two VICs + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. +*/ + +/* This should be included from <mach/entry-macro.S> with the necessary + * defines for virtual addresses and IRQ bases for the two vics. + * + * The code needs the following defined: + * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ + * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ + * VA_VIC0 Virtual address of VIC0 + * VA_VIC1 Virtual address of VIC1 + * + * Note, code assumes VIC0's virtual address is an ARM immediate constant + * away from VIC1. +*/ + +#include <asm/hardware/vic.h> + + .macro disable_fiq + .endm + + .macro get_irqnr_preamble, base, tmp + ldr \base, =VA_VIC0 + .endm + + .macro arch_ret_to_user, tmp1, tmp2 + .endm + + .macro get_irqnr_and_base, irqnr, irqstat, base, tmp + + @ check the vic0 + mov \irqnr, #IRQ_VIC0_BASE + 31 + ldr \irqstat, [ \base, # VIC_IRQ_STATUS ] + teq \irqstat, #0 + + @ otherwise try vic1 + addeq \tmp, \base, #(VA_VIC1 - VA_VIC0) + addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE) + ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ] + teqeq \irqstat, #0 + + clzne \irqstat, \irqstat + subne \irqnr, \irqnr, \irqstat + .endm diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index bfcc15929a7..540a044153a 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h @@ -21,14 +21,14 @@ "2: strt %0, [%2]\n" \ " mov %0, #0\n" \ "3:\n" \ - " .section __ex_table,\"a\"\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4f, 2b, 4f\n" \ - " .previous\n" \ - " .section .fixup,\"ax\"\n" \ + " .popsection\n" \ + " .pushsection .fixup,\"ax\"\n" \ "4: mov %0, %4\n" \ " b 3b\n" \ - " .previous" \ + " .popsection" \ : "=&r" (ret), "=&r" (oldval) \ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ : "cc", "memory") @@ -102,14 +102,14 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " it eq @ explicit IT needed for the 2b label\n" "2: streqt %2, [%3]\n" "3:\n" - " .section __ex_table,\"a\"\n" + " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" - " .previous\n" - " .section .fixup,\"ax\"\n" + " .popsection\n" + " .pushsection .fixup,\"ax\"\n" "4: mov %0, %4\n" " b 3b\n" - " .previous" + " .popsection" : "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 182310b9919..6d7485aff95 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h @@ -12,7 +12,9 @@ typedef struct { #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ -#if NR_IRQS > 256 +#if NR_IRQS > 512 +#define HARDIRQ_BITS 10 +#elif NR_IRQS > 256 #define HARDIRQ_BITS 9 #else #define HARDIRQ_BITS 8 diff --git a/arch/arm/include/asm/hardware/arm_timer.h b/arch/arm/include/asm/hardware/arm_timer.h index 04be3bdf46b..c0f4e7bf22d 100644 --- a/arch/arm/include/asm/hardware/arm_timer.h +++ b/arch/arm/include/asm/hardware/arm_timer.h @@ -1,21 +1,30 @@ #ifndef __ASM_ARM_HARDWARE_ARM_TIMER_H #define __ASM_ARM_HARDWARE_ARM_TIMER_H -#define TIMER_LOAD 0x00 -#define TIMER_VALUE 0x04 -#define TIMER_CTRL 0x08 -#define TIMER_CTRL_ONESHOT (1 << 0) -#define TIMER_CTRL_32BIT (1 << 1) -#define TIMER_CTRL_DIV1 (0 << 2) -#define TIMER_CTRL_DIV16 (1 << 2) -#define TIMER_CTRL_DIV256 (2 << 2) -#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */ -#define TIMER_CTRL_PERIODIC (1 << 6) -#define TIMER_CTRL_ENABLE (1 << 7) +/* + * ARM timer implementation, found in Integrator, Versatile and Realview + * platforms. Not all platforms support all registers and bits in these + * registers, so we mark them with A for Integrator AP, C for Integrator + * CP, V for Versatile and R for Realview. + * + * Integrator AP has 16-bit timers, Integrator CP, Versatile and Realview + * can have 16-bit or 32-bit selectable via a bit in the control register. + */ +#define TIMER_LOAD 0x00 /* ACVR rw */ +#define TIMER_VALUE 0x04 /* ACVR ro */ +#define TIMER_CTRL 0x08 /* ACVR rw */ +#define TIMER_CTRL_ONESHOT (1 << 0) /* CVR */ +#define TIMER_CTRL_32BIT (1 << 1) /* CVR */ +#define TIMER_CTRL_DIV1 (0 << 2) /* ACVR */ +#define TIMER_CTRL_DIV16 (1 << 2) /* ACVR */ +#define TIMER_CTRL_DIV256 (2 << 2) /* ACVR */ +#define TIMER_CTRL_IE (1 << 5) /* VR */ +#define TIMER_CTRL_PERIODIC (1 << 6) /* ACVR */ +#define TIMER_CTRL_ENABLE (1 << 7) /* ACVR */ -#define TIMER_INTCLR 0x0c -#define TIMER_RIS 0x10 -#define TIMER_MIS 0x14 -#define TIMER_BGLOAD 0x18 +#define TIMER_INTCLR 0x0c /* ACVR wo */ +#define TIMER_RIS 0x10 /* CVR ro */ +#define TIMER_MIS 0x14 /* CVR ro */ +#define TIMER_BGLOAD 0x18 /* CVR rw */ #endif diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index cdb9022716f..6bcba48800f 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -21,6 +21,9 @@ #define __ASM_ARM_HARDWARE_L2X0_H #define L2X0_CACHE_ID 0x000 +#define L2X0_CACHE_ID_PART_MASK (0xf << 6) +#define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_CACHE_TYPE 0x004 #define L2X0_CTRL 0x100 #define L2X0_AUX_CTRL 0x104 diff --git a/arch/arm/include/asm/hardware/icst.h b/arch/arm/include/asm/hardware/icst.h new file mode 100644 index 00000000000..10382a3dcec --- /dev/null +++ b/arch/arm/include/asm/hardware/icst.h @@ -0,0 +1,59 @@ +/* + * arch/arm/include/asm/hardware/icst.h + * + * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Support functions for calculating clocks/divisors for the ICST + * clock generators. See http://www.icst.com/ for more information + * on these devices. + */ +#ifndef ASMARM_HARDWARE_ICST_H +#define ASMARM_HARDWARE_ICST_H + +struct icst_params { + unsigned long ref; + unsigned long vco_max; /* inclusive */ + unsigned long vco_min; /* exclusive */ + unsigned short vd_min; /* inclusive */ + unsigned short vd_max; /* inclusive */ + unsigned char rd_min; /* inclusive */ + unsigned char rd_max; /* inclusive */ + const unsigned char *s2div; /* chip specific s2div array */ + const unsigned char *idx2s; /* chip specific idx2s array */ +}; + +struct icst_vco { + unsigned short v; + unsigned char r; + unsigned char s; +}; + +unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco); +struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq); + +/* + * ICST307 VCO frequency must be between 6MHz and 200MHz (3.3 or 5V). + * This frequency is pre-output divider. + */ +#define ICST307_VCO_MIN 6000000 +#define ICST307_VCO_MAX 200000000 + +extern const unsigned char icst307_s2div[]; +extern const unsigned char icst307_idx2s[]; + +/* + * ICST525 VCO frequency must be between 10MHz and 200MHz (3V) or 320MHz (5V). + * This frequency is pre-output divider. + */ +#define ICST525_VCO_MIN 10000000 +#define ICST525_VCO_MAX_3V 200000000 +#define ICST525_VCO_MAX_5V 320000000 + +extern const unsigned char icst525_s2div[]; +extern const unsigned char icst525_idx2s[]; + +#endif diff --git a/arch/arm/include/asm/hardware/icst307.h b/arch/arm/include/asm/hardware/icst307.h deleted file mode 100644 index 554f128a104..00000000000 --- a/arch/arm/include/asm/hardware/icst307.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * arch/arm/include/asm/hardware/icst307.h - * - * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Support functions for calculating clocks/divisors for the ICS307 - * clock generators. See http://www.icst.com/ for more information - * on these devices. - * - * This file is similar to the icst525.h file - */ -#ifndef ASMARM_HARDWARE_ICST307_H -#define ASMARM_HARDWARE_ICST307_H - -struct icst307_params { - unsigned long ref; - unsigned long vco_max; /* inclusive */ - unsigned short vd_min; /* inclusive */ - unsigned short vd_max; /* inclusive */ - unsigned char rd_min; /* inclusive */ - unsigned char rd_max; /* inclusive */ -}; - -struct icst307_vco { - unsigned short v; - unsigned char r; - unsigned char s; -}; - -unsigned long icst307_khz(const struct icst307_params *p, struct icst307_vco vco); -struct icst307_vco icst307_khz_to_vco(const struct icst307_params *p, unsigned long freq); -struct icst307_vco icst307_ps_to_vco(const struct icst307_params *p, unsigned long period); - -#endif diff --git a/arch/arm/include/asm/hardware/icst525.h b/arch/arm/include/asm/hardware/icst525.h deleted file mode 100644 index 58f0dc43e2e..00000000000 --- a/arch/arm/include/asm/hardware/icst525.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * arch/arm/include/asm/hardware/icst525.h - * - * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Support functions for calculating clocks/divisors for the ICST525 - * clock generators. See http://www.icst.com/ for more information - * on these devices. - */ -#ifndef ASMARM_HARDWARE_ICST525_H -#define ASMARM_HARDWARE_ICST525_H - -struct icst525_params { - unsigned long ref; - unsigned long vco_max; /* inclusive */ - unsigned short vd_min; /* inclusive */ - unsigned short vd_max; /* inclusive */ - unsigned char rd_min; /* inclusive */ - unsigned char rd_max; /* inclusive */ -}; - -struct icst525_vco { - unsigned short v; - unsigned char r; - unsigned char s; -}; - -unsigned long icst525_khz(const struct icst525_params *p, struct icst525_vco vco); -struct icst525_vco icst525_khz_to_vco(const struct icst525_params *p, unsigned long freq); -struct icst525_vco icst525_ps_to_vco(const struct icst525_params *p, unsigned long period); - -#endif diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 1a8c7279a28..9b28f1243bd 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -366,8 +366,7 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -389,8 +388,7 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - if (len) - slot_cnt += *slots_per_op; + slot_cnt += *slots_per_op; return slot_cnt; } @@ -737,10 +735,8 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) i += slots_per_op; } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - if (len) { - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; - } + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; } } diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 74b5fff7f57..6700c7fc7eb 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h @@ -75,6 +75,18 @@ extern unsigned long it8152_base_address; IT8152_PD_IRQ(1) USB (USBR) IT8152_PD_IRQ(0) Audio controller (ACR) */ +#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) + +/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ +#define IT8152_LD_IRQ_COUNT 9 +#define IT8152_LP_IRQ_COUNT 16 +#define IT8152_PD_IRQ_COUNT 15 + +/* Priorities: */ +#define IT8152_PD_IRQ(i) IT8152_IRQ(i) +#define IT8152_LP_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT) +#define IT8152_LD_IRQ(i) (IT8152_IRQ(i) + IT8152_PD_IRQ_COUNT + IT8152_LP_IRQ_COUNT) + /* frequently used interrupts */ #define IT8152_PCISERR IT8152_PD_IRQ(14) #define IT8152_H2PTADR IT8152_PD_IRQ(13) diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index 954b1be991b..74e51d6bd93 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -214,4 +214,8 @@ void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int /* Frontlight control */ void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf); +struct locomo_platform_data { + int irq_base; /* IRQ base for cascaded on-chip IRQs */ +}; + #endif diff --git a/arch/arm/include/asm/hardware/pl330.h b/arch/arm/include/asm/hardware/pl330.h new file mode 100644 index 00000000000..575fa8186ca --- /dev/null +++ b/arch/arm/include/asm/hardware/pl330.h @@ -0,0 +1,217 @@ +/* linux/include/asm/hardware/pl330.h + * + * Copyright (C) 2010 Samsung Electronics Co. Ltd. + * Jaswinder Singh <jassi.brar@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __PL330_CORE_H +#define __PL330_CORE_H + +#define PL330_MAX_CHAN 8 +#define PL330_MAX_IRQS 32 +#define PL330_MAX_PERI 32 + +enum pl330_srccachectrl { + SCCTRL0 = 0, /* Noncacheable and nonbufferable */ + SCCTRL1, /* Bufferable only */ + SCCTRL2, /* Cacheable, but do not allocate */ + SCCTRL3, /* Cacheable and bufferable, but do not allocate */ + SINVALID1, + SINVALID2, + SCCTRL6, /* Cacheable write-through, allocate on reads only */ + SCCTRL7, /* Cacheable write-back, allocate on reads only */ +}; + +enum pl330_dstcachectrl { + DCCTRL0 = 0, /* Noncacheable and nonbufferable */ + DCCTRL1, /* Bufferable only */ + DCCTRL2, /* Cacheable, but do not allocate */ + DCCTRL3, /* Cacheable and bufferable, but do not allocate */ + DINVALID1 = 8, + DINVALID2, + DCCTRL6, /* Cacheable write-through, allocate on writes only */ + DCCTRL7, /* Cacheable write-back, allocate on writes only */ +}; + +/* Populated by the PL330 core driver for DMA API driver's info */ +struct pl330_config { + u32 periph_id; + u32 pcell_id; +#define DMAC_MODE_NS (1 << 0) + unsigned int mode; + unsigned int data_bus_width:10; /* In number of bits */ + unsigned int data_buf_dep:10; + unsigned int num_chan:4; + unsigned int num_peri:6; + u32 peri_ns; + unsigned int num_events:6; + u32 irq_ns; +}; + +/* Handle to the DMAC provided to the PL330 core */ +struct pl330_info { + /* Owning device */ + struct device *dev; + /* Size of MicroCode buffers for each channel. */ + unsigned mcbufsz; + /* ioremap'ed address of PL330 registers. */ + void __iomem *base; + /* Client can freely use it. */ + void *client_data; + /* PL330 core data, Client must not touch it. */ + void *pl330_data; + /* Populated by the PL330 core driver during pl330_add */ + struct pl330_config pcfg; + /* + * If the DMAC has some reset mechanism, then the + * client may want to provide pointer to the method. + */ + void (*dmac_reset)(struct pl330_info *pi); +}; + +enum pl330_byteswap { + SWAP_NO = 0, + SWAP_2, + SWAP_4, + SWAP_8, + SWAP_16, +}; + +/** + * Request Configuration. + * The PL330 core does not modify this and uses the last + * working configuration if the request doesn't provide any. + * + * The Client may want to provide this info only for the + * first request and a request with new settings. + */ +struct pl330_reqcfg { + /* Address Incrementing */ + unsigned dst_inc:1; + unsigned src_inc:1; + + /* + * For now, the SRC & DST protection levels + * and burst size/length are assumed same. + */ + bool nonsecure; + bool privileged; + bool insnaccess; + unsigned brst_len:5; + unsigned brst_size:3; /* in power of 2 */ + + enum pl330_dstcachectrl dcctl; + enum pl330_srccachectrl scctl; + enum pl330_byteswap swap; +}; + +/* + * One cycle of DMAC operation. + * There may be more than one xfer in a request. + */ +struct pl330_xfer { + u32 src_addr; + u32 dst_addr; + /* Size to xfer */ + u32 bytes; + /* + * Pointer to next xfer in the list. + * The last xfer in the req must point to NULL. + */ + struct pl330_xfer *next; +}; + +/* The xfer callbacks are made with one of these arguments. */ +enum pl330_op_err { + /* The all xfers in the request were success. */ + PL330_ERR_NONE, + /* If req aborted due to global error. */ + PL330_ERR_ABORT, + /* If req failed due to problem with Channel. */ + PL330_ERR_FAIL, +}; + +enum pl330_reqtype { + MEMTOMEM, + MEMTODEV, + DEVTOMEM, + DEVTODEV, +}; + +/* A request defining Scatter-Gather List ending with NULL xfer. */ +struct pl330_req { + enum pl330_reqtype rqtype; + /* Index of peripheral for the xfer. */ + unsigned peri:5; + /* Unique token for this xfer, set by the client. */ + void *token; + /* Callback to be called after xfer. */ + void (*xfer_cb)(void *token, enum pl330_op_err err); + /* If NULL, req will be done at last set parameters. */ + struct pl330_reqcfg *cfg; + /* Pointer to first xfer in the request. */ + struct pl330_xfer *x; +}; + +/* + * To know the status of the channel and DMAC, the client + * provides a pointer to this structure. The PL330 core + * fills it with current information. + */ +struct pl330_chanstatus { + /* + * If the DMAC engine halted due to some error, + * the client should remove-add DMAC. + */ + bool dmac_halted; + /* + * If channel is halted due to some error, + * the client should ABORT/FLUSH and START the channel. + */ + bool faulting; + /* Location of last load */ + u32 src_addr; + /* Location of last store */ + u32 dst_addr; + /* + * Pointer to the currently active req, NULL if channel is + * inactive, even though the requests may be present. + */ + struct pl330_req *top_req; + /* Pointer to req waiting second in the queue if any. */ + struct pl330_req *wait_req; +}; + +enum pl330_chan_op { + /* Start the channel */ + PL330_OP_START, + /* Abort the active xfer */ + PL330_OP_ABORT, + /* Stop xfer and flush queue */ + PL330_OP_FLUSH, +}; + +extern int pl330_add(struct pl330_info *); +extern void pl330_del(struct pl330_info *pi); +extern int pl330_update(const struct pl330_info *pi); +extern void pl330_release_channel(void *ch_id); +extern void *pl330_request_channel(const struct pl330_info *pi); +extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus); +extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op); +extern int pl330_submit_req(void *ch_id, struct pl330_req *r); + +#endif /* __PL330_CORE_H */ diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h index 5da2595759e..92ed254c175 100644 --- a/arch/arm/include/asm/hardware/sa1111.h +++ b/arch/arm/include/asm/hardware/sa1111.h @@ -578,4 +578,8 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev, unsigned int bits, unsigned int void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v); +struct sa1111_platform_data { + int irq_base; /* base for cascaded on-chip IRQs */ +}; + #endif /* _ASM_ARCH_SA1111 */ diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h new file mode 100644 index 00000000000..a101f10bb5b --- /dev/null +++ b/arch/arm/include/asm/hardware/sp810.h @@ -0,0 +1,59 @@ +/* + * arch/arm/include/asm/hardware/sp810.h + * + * ARM PrimeXsys System Controller SP810 header file + * + * Copyright (C) 2009 ST Microelectronics + * Viresh Kumar<viresh.kumar@st.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __ASM_ARM_SP810_H +#define __ASM_ARM_SP810_H + +#include <linux/io.h> + +/* sysctl registers offset */ +#define SCCTRL 0x000 +#define SCSYSSTAT 0x004 +#define SCIMCTRL 0x008 +#define SCIMSTAT 0x00C +#define SCXTALCTRL 0x010 +#define SCPLLCTRL 0x014 +#define SCPLLFCTRL 0x018 +#define SCPERCTRL0 0x01C +#define SCPERCTRL1 0x020 +#define SCPEREN 0x024 +#define SCPERDIS 0x028 +#define SCPERCLKEN 0x02C +#define SCPERSTAT 0x030 +#define SCSYSID0 0xEE0 +#define SCSYSID1 0xEE4 +#define SCSYSID2 0xEE8 +#define SCSYSID3 0xEEC +#define SCITCR 0xF00 +#define SCITIR0 0xF04 +#define SCITIR1 0xF08 +#define SCITOR 0xF0C +#define SCCNTCTRL 0xF10 +#define SCCNTDATA 0xF14 +#define SCCNTSTEP 0xF18 +#define SCPERIPHID0 0xFE0 +#define SCPERIPHID1 0xFE4 +#define SCPERIPHID2 0xFE8 +#define SCPERIPHID3 0xFEC +#define SCPCELLID0 0xFF0 +#define SCPCELLID1 0xFF4 +#define SCPCELLID2 0xFF8 +#define SCPCELLID3 0xFFC + +static inline void sysctl_soft_reset(void __iomem *base) +{ + /* writing any value to SCSYSSTAT reg will reset system */ + writel(0, base + SCSYSSTAT); +} + +#endif /* __ASM_ARM_SP810_H */ diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 7f36d00600b..feb988a7ec3 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -11,7 +11,11 @@ #define kmap_prot PAGE_KERNEL -#define flush_cache_kmaps() flush_cache_all() +#define flush_cache_kmaps() \ + do { \ + if (cache_is_vivt()) \ + flush_cache_all(); \ + } while (0) extern pte_t *pkmap_page_table; @@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page); extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); +extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); +extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); + +/* + * The following functions are already defined by <linux/highmem.h> + * when CONFIG_HIGHMEM is not set. + */ +#ifdef CONFIG_HIGHMEM extern void *kmap(struct page *page); extern void kunmap(struct page *page); extern void *kmap_atomic(struct page *page, enum km_type type); extern void kunmap_atomic(void *kvaddr, enum km_type type); extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); extern struct page *kmap_atomic_to_page(const void *ptr); +#endif #endif diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index d2a59cfc30c..1261b1f928d 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -26,6 +26,7 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <asm/memory.h> +#include <asm/system.h> /* * ISA I/O bus memory addresses are 1:1 with the physical address. @@ -69,9 +70,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); /* * __arm_ioremap takes CPU physical address. * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page + * The _caller variety takes a __builtin_return_address(0) value for + * /proc/vmalloc to use - and should only be used in non-inline functions. */ -extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); -extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, + size_t, unsigned int, void *); +extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, + void *); + +extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); +extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); extern void __iounmap(volatile void __iomem *addr); /* @@ -172,25 +180,38 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * IO port primitives for more information. */ #ifdef __mem_pci -#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ +#define readb_relaxed(c) ({ u8 __v = __raw_readb(__mem_pci(c)); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \ __raw_readw(__mem_pci(c))); __v; }) -#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \ __raw_readl(__mem_pci(c))); __v; }) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) + +#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ + cpu_to_le16(v),__mem_pci(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \ + cpu_to_le32(v),__mem_pci(c))) + +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE +#define __iormb() rmb() +#define __iowmb() wmb() +#else +#define __iormb() do { } while (0) +#define __iowmb() do { } while (0) +#endif + +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) #define readsb(p,d,l) __raw_readsb(__mem_pci(p),d,l) #define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l) #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) -#define writeb(v,c) __raw_writeb(v,__mem_pci(c)) -#define writew(v,c) __raw_writew((__force __u16) \ - cpu_to_le16(v),__mem_pci(c)) -#define writel(v,c) __raw_writel((__force __u32) \ - cpu_to_le32(v),__mem_pci(c)) - #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) #define writesl(p,d,l) __raw_writesl(__mem_pci(p),d,l) @@ -237,13 +258,13 @@ extern void _memset_io(volatile void __iomem *, int, size_t); * io{read,write}{8,16,32} macros */ #ifndef ioread8 -#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; }) -#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; }) -#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; }) +#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; }) +#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; }) +#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; }) -#define iowrite8(v,p) __raw_writeb(v, p) -#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p) -#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p) +#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) +#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) +#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) #define ioread8_rep(p,d,c) __raw_readsb(p,d,c) #define ioread16_rep(p,d,c) __raw_readsw(p,d,c) diff --git a/arch/arm/include/asm/ioctls.h b/arch/arm/include/asm/ioctls.h index a91d8a1523c..7f0b6d13296 100644 --- a/arch/arm/include/asm/ioctls.h +++ b/arch/arm/include/asm/ioctls.h @@ -53,6 +53,9 @@ #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ +#define TIOCGRS485 0x542E +#define TIOCSRS485 0x542F + #define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ #define FIOCLEX 0x5451 #define FIOASYNC 0x5452 diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index 328f14a8b79..237282f7c76 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -17,6 +17,7 @@ #ifndef __ASSEMBLY__ struct irqaction; +struct pt_regs; extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index c019949a518..e51b1e81df0 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -18,7 +18,9 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_L1_CACHE, KM_L2_CACHE, + KM_KDB, KM_TYPE_NR }; diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index a38bdc7afa3..52f0da1e97d 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -8,10 +8,16 @@ * published by the Free Software Foundation. */ +#ifndef __ASM_MACH_PCI_H +#define __ASM_MACH_PCI_H + struct pci_sys_data; struct pci_bus; struct hw_pci { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif struct list_head buses; int nr_controllers; int (*setup)(int nr, struct pci_sys_data *); @@ -26,6 +32,9 @@ struct hw_pci { * Per-controller structure */ struct pci_sys_data { +#ifdef CONFIG_PCI_DOMAINS + int domain; +#endif struct list_head node; int busnr; /* primary bus number */ u64 mem_offset; /* bus->cpu memory mapping offset */ @@ -70,3 +79,5 @@ extern int pci_v3_setup(int nr, struct pci_sys_data *); extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *); extern void pci_v3_preinit(void); extern void pci_v3_postinit(void); + +#endif /* __ASM_MACH_PCI_H */ diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h index b2cc1fcd040..35d408f6dcc 100644 --- a/arch/arm/include/asm/mach/time.h +++ b/arch/arm/include/asm/mach/time.h @@ -38,7 +38,7 @@ struct sys_timer { void (*init)(void); void (*suspend)(void); void (*resume)(void); -#ifndef CONFIG_GENERIC_TIME +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET unsigned long (*offset)(void); #endif }; @@ -46,12 +46,4 @@ struct sys_timer { extern struct sys_timer *system_timer; extern void timer_tick(void); -/* - * Kernel time keeping support. - */ -struct timespec; -extern int (*set_rtc)(void); -extern void save_time_delta(struct timespec *delta, struct timespec *rtc); -extern void restore_time_delta(struct timespec *delta, struct timespec *rtc); - #endif diff --git a/arch/arm/include/asm/mach/udc_pxa2xx.h b/arch/arm/include/asm/mach/udc_pxa2xx.h index f3eabf1ecec..833306ee9e7 100644 --- a/arch/arm/include/asm/mach/udc_pxa2xx.h +++ b/arch/arm/include/asm/mach/udc_pxa2xx.h @@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info { * here. Note that sometimes the signals go through inverters... */ bool gpio_vbus_inverted; - u16 gpio_vbus; /* high == vbus present */ + int gpio_vbus; /* high == vbus present */ bool gpio_pullup_inverted; - u16 gpio_pullup; /* high == pullup activated */ + int gpio_pullup; /* high == pullup activated */ }; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82a257..4312ee5e3d0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -93,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET @@ -113,14 +124,6 @@ #endif /* !CONFIG_MMU */ /* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - -/* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a..68870c77667 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329d..a0b3cac0547 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h new file mode 100644 index 00000000000..25f76bae57a --- /dev/null +++ b/arch/arm/include/asm/outercache.h @@ -0,0 +1,75 @@ +/* + * arch/arm/include/asm/outercache.h + * + * Copyright (C) 2010 ARM Ltd. + * Written by Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __ASM_OUTERCACHE_H +#define __ASM_OUTERCACHE_H + +struct outer_cache_fns { + void (*inv_range)(unsigned long, unsigned long); + void (*clean_range)(unsigned long, unsigned long); + void (*flush_range)(unsigned long, unsigned long); +#ifdef CONFIG_OUTER_CACHE_SYNC + void (*sync)(void); +#endif +}; + +#ifdef CONFIG_OUTER_CACHE + +extern struct outer_cache_fns outer_cache; + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ + if (outer_cache.inv_range) + outer_cache.inv_range(start, end); +} +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ + if (outer_cache.clean_range) + outer_cache.clean_range(start, end); +} +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ + if (outer_cache.flush_range) + outer_cache.flush_range(start, end); +} + +#else + +static inline void outer_inv_range(unsigned long start, unsigned long end) +{ } +static inline void outer_clean_range(unsigned long start, unsigned long end) +{ } +static inline void outer_flush_range(unsigned long start, unsigned long end) +{ } + +#endif + +#ifdef CONFIG_OUTER_CACHE_SYNC +static inline void outer_sync(void) +{ + if (outer_cache.sync) + outer_cache.sync(); +} +#else +static inline void outer_sync(void) +{ } +#endif + +#endif /* __ASM_OUTERCACHE_H */ diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 3a32af4cce3..a485ac3c869 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -117,11 +117,12 @@ #endif struct page; +struct vm_area_struct; struct cpu_user_fns { void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); }; #ifdef MULTI_USER @@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user; extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr); + unsigned long vaddr, struct vm_area_struct *vma); #endif #define clear_user_highpage(page,vaddr) \ @@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ - __cpu_copy_user_highpage(to, from, vaddr) + __cpu_copy_user_highpage(to, from, vaddr, vma) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, const void *from); diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 226cddd2fb6..92e2a833693 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -4,8 +4,23 @@ #ifdef __KERNEL__ #include <asm-generic/pci-dma-compat.h> +#include <asm/mach/pci.h> /* for pci_sys_data */ #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) +{ + struct pci_sys_data *root = bus->sysdata; + + return root->domain; +} + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return pci_domain_nr(bus); +} +#endif /* CONFIG_PCI_DOMAINS */ + #ifdef CONFIG_PCI_HOST_ITE8152 /* ITE bridge requires setting latency timer to avoid early bus access termination by PIC bus mater devices @@ -30,17 +45,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active) */ #define PCI_DMA_BUS_IS_PHYS (1) -/* - * Whether pci_unmap_{single,page} is a nop depends upon the - * configuration. - */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; -#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) -#define pci_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h new file mode 100644 index 00000000000..48837e6d888 --- /dev/null +++ b/arch/arm/include/asm/perf_event.h @@ -0,0 +1,48 @@ +/* + * linux/arch/arm/include/asm/perf_event.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PERF_EVENT_H__ +#define __ARM_PERF_EVENT_H__ + +/* + * NOP: on *most* (read: all supported) ARM platforms, the performance + * counter interrupts are regular interrupts and not an NMI. This + * means that when we receive the interrupt we can call + * perf_event_do_pending() that handles all of the work with + * interrupts enabled. + */ +static inline void +set_perf_event_pending(void) +{ +} + +/* ARM performance counters start from 1 (in the cp15 accesses) so use the + * same indexes here for consistency. */ +#define PERF_EVENT_INDEX_OFFSET 1 + +/* ARM perf PMU IDs for use by internal perf clients. */ +enum arm_perf_pmu_ids { + ARM_PERF_PMU_ID_XSCALE1 = 0, + ARM_PERF_PMU_ID_XSCALE2, + ARM_PERF_PMU_ID_V6, + ARM_PERF_PMU_ID_V6MP, + ARM_PERF_PMU_ID_CA8, + ARM_PERF_PMU_ID_CA9, + ARM_NUM_PMU_IDS, +}; + +extern enum arm_perf_pmu_ids +armpmu_get_pmu_id(void); + +extern int +armpmu_get_max_events(void); + +#endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939a..ffc0e85775b 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -67,6 +67,7 @@ static inline int pte_file(pte_t pte) { return 0; } */ #define pgprot_noncached(prot) __pgprot(0) #define pgprot_writecombine(prot) __pgprot(0) +#define pgprot_dmacoherent(prot) __pgprot(0) /* @@ -86,8 +87,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 11397687f42..ab68cf1ef80 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -314,7 +314,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) #define pgprot_writecombine(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) -#if __LINUX_ARM_ARCH__ >= 7 +#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE #define pgprot_dmacoherent(prot) \ __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) #else diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h new file mode 100644 index 00000000000..8ccea012722 --- /dev/null +++ b/arch/arm/include/asm/pmu.h @@ -0,0 +1,77 @@ +/* + * linux/arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __ARM_PMU_H__ +#define __ARM_PMU_H__ + +enum arm_pmu_type { + ARM_PMU_DEVICE_CPU = 0, + ARM_NUM_PMU_DEVICES, +}; + +#ifdef CONFIG_CPU_HAS_PMU + +/** + * reserve_pmu() - reserve the hardware performance counters + * + * Reserve the hardware performance counters in the system for exclusive use. + * The platform_device for the system is returned on success, ERR_PTR() + * encoded error on failure. + */ +extern struct platform_device * +reserve_pmu(enum arm_pmu_type device); + +/** + * release_pmu() - Relinquish control of the performance counters + * + * Release the performance counters and allow someone else to use them. + * Callers must have disabled the counters and released IRQs before calling + * this. The platform_device returned from reserve_pmu() must be passed as + * a cookie. + */ +extern int +release_pmu(struct platform_device *pdev); + +/** + * init_pmu() - Initialise the PMU. + * + * Initialise the system ready for PMU enabling. This should typically set the + * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do + * the actual hardware initialisation. + */ +extern int +init_pmu(enum arm_pmu_type device); + +#else /* CONFIG_CPU_HAS_PMU */ + +#include <linux/err.h> + +static inline struct platform_device * +reserve_pmu(enum arm_pmu_type device) +{ + return ERR_PTR(-ENODEV); +} + +static inline int +release_pmu(struct platform_device *pdev) +{ + return -ENODEV; +} + +static inline int +init_pmu(enum arm_pmu_type device) +{ + return -ENODEV; +} + +#endif /* CONFIG_CPU_HAS_PMU */ + +#endif /* __ARM_PMU_H__ */ diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 6a89567ffc5..7bed3daf83b 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -91,7 +91,11 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +#if __LINUX_ARM_ARCH__ == 6 +#define cpu_relax() smp_mb() +#else #define cpu_relax() barrier() +#endif /* * Create a new kernel thread diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index eec6e897ceb..9dcb11e5902 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -128,6 +128,8 @@ struct pt_regs { #ifdef __KERNEL__ +#define arch_has_single_step() (1) + #define user_mode(regs) \ (((regs)->ARM_cpsr & 0xf) == 0) diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index ca0a37d0340..2f87870d934 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h @@ -3,25 +3,6 @@ #include <asm/memory.h> #include <asm/types.h> - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; /* buffer offset */ - dma_addr_t dma_address; /* dma address */ - unsigned int length; /* length */ -}; - -/* - * These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns, or alternatively stop on the first sg_dma_len(sg) which - * is 0. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) +#include <asm-generic/scatterlist.h> #endif /* _ASMARM_SCATTERLIST_H */ diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03..f392fb4437a 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -223,18 +223,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index e0d763be184..3d05190797c 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -82,7 +82,7 @@ struct secondary_data { extern struct secondary_data secondary_data; extern int __cpu_disable(void); -extern int mach_cpu_disable(unsigned int cpu); +extern int platform_cpu_disable(unsigned int cpu); extern void __cpu_die(unsigned int cpu); extern void cpu_die(void); diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 59303e20084..e6215305544 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +static inline int cache_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} + #endif diff --git a/arch/arm/include/asm/smp_twd.h b/arch/arm/include/asm/smp_twd.h index 7be0978b262..634f357be6b 100644 --- a/arch/arm/include/asm/smp_twd.h +++ b/arch/arm/include/asm/smp_twd.h @@ -1,6 +1,23 @@ #ifndef __ASMARM_SMP_TWD_H #define __ASMARM_SMP_TWD_H +#define TWD_TIMER_LOAD 0x00 +#define TWD_TIMER_COUNTER 0x04 +#define TWD_TIMER_CONTROL 0x08 +#define TWD_TIMER_INTSTAT 0x0C + +#define TWD_WDOG_LOAD 0x20 +#define TWD_WDOG_COUNTER 0x24 +#define TWD_WDOG_CONTROL 0x28 +#define TWD_WDOG_INTSTAT 0x2C +#define TWD_WDOG_RESETSTAT 0x30 +#define TWD_WDOG_DISABLE 0x34 + +#define TWD_TIMER_CONTROL_ENABLE (1 << 0) +#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) +#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) +#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) + struct clock_event_device; extern void __iomem *twd_base; diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c91c64cab92..17eb355707d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -5,6 +5,22 @@ #error SMP not supported on pre-ARMv6 CPUs #endif +static inline void dsb_sev(void) +{ +#if __LINUX_ARM_ARCH__ >= 7 + __asm__ __volatile__ ( + "dsb\n" + "sev" + ); +#elif defined(CONFIG_CPU_32v6K) + __asm__ __volatile__ ( + "mcr p15, 0, %0, c7, c10, 4\n" + "sev" + : : "r" (0) + ); +#endif +} + /* * ARMv6 Spin-locking. * @@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __asm__ __volatile__( " str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev" -#endif : : "r" (&lock->lock), "r" (0) : "cc"); + + dsb_sev(); } /* @@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) __asm__ __volatile__( "str %1, [%0]\n" -#ifdef CONFIG_CPU_32v6K -" mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ -" sev\n" -#endif : : "r" (&rw->lock), "r" (0) : "cc"); + + dsb_sev(); } /* write_can_lock - would write_trylock() succeed? */ @@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " strex %1, %0, [%2]\n" " teq %1, #0\n" " bne 1b" -#ifdef CONFIG_CPU_32v6K -"\n cmp %0, #0\n" -" mcreq p15, 0, %0, c7, c10, 4\n" -" seveq" -#endif : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) : "cc"); + + if (tmp == 0) + dsb_sev(); } static inline int arch_read_trylock(arch_rwlock_t *rw) diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 058e7e90881..5f4f4800273 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -60,6 +60,8 @@ #include <linux/linkage.h> #include <linux/irqflags.h> +#include <asm/outercache.h> + #define __exception __attribute__((section(".exception.text"))) struct thread_info; @@ -73,8 +75,7 @@ extern unsigned int mem_fclk_21285; struct pt_regs; -void die(const char *msg, struct pt_regs *regs, int err) - __attribute__((noreturn)); +void die(const char *msg, struct pt_regs *regs, int err); struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, @@ -138,10 +139,12 @@ extern unsigned int user_debug; #define dmb() __asm__ __volatile__ ("" : : : "memory") #endif -#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP) -#define mb() dmb() +#ifdef CONFIG_ARCH_HAS_BARRIERS +#include <mach/barriers.h> +#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() do { dsb(); outer_sync(); } while (0) #define rmb() dmb() -#define wmb() dmb() +#define wmb() mb() #else #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) @@ -153,9 +156,9 @@ extern unsigned int user_debug; #define smp_rmb() barrier() #define smp_wmb() barrier() #else -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() +#define smp_mb() dmb() +#define smp_rmb() dmb() +#define smp_wmb() dmb() #endif #define read_barrier_depends() do { } while(0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 2dfb7d7a66e..763e29fa853 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *); extern void iwmmxt_task_release(struct thread_info *); extern void iwmmxt_task_switch(struct thread_info *); -extern void vfp_sync_state(struct thread_info *thread); +extern void vfp_sync_hwstate(struct thread_info *); +extern void vfp_flush_hwstate(struct thread_info *); #endif @@ -140,7 +141,7 @@ extern void vfp_sync_state(struct thread_info *thread); #define TIF_SYSCALL_TRACE 8 #define TIF_POLLING_NRFLAG 16 #define TIF_USING_IWMMXT 17 -#define TIF_MEMDIE 18 +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c2f1605de35..bd863d8608c 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -46,6 +46,9 @@ #define TLB_V7_UIS_FULL (1 << 20) #define TLB_V7_UIS_ASID (1 << 21) +/* Inner Shareable BTB operation (ARMv7 MP extensions) */ +#define TLB_V7_IS_BTB (1 << 22) + #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ #define TLB_DCLEAN (1 << 30) #define TLB_WB (1 << 31) @@ -183,7 +186,7 @@ #endif #ifdef CONFIG_SMP -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ +#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) #else #define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ @@ -339,6 +342,12 @@ static inline void local_flush_tlb_all(void) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_mm(struct mm_struct *mm) @@ -376,6 +385,12 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void @@ -416,6 +431,12 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); dsb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } static inline void local_flush_tlb_kernel_page(unsigned long kaddr) @@ -454,6 +475,12 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) dsb(); isb(); } + if (tlb_flag(TLB_V7_IS_BTB)) { + /* flush the branch target cache */ + asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); + dsb(); + isb(); + } } /* @@ -529,7 +556,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 1d6bd40a432..33e4a48fe10 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -229,16 +229,16 @@ do { \ __asm__ __volatile__( \ "1: ldrbt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -265,16 +265,16 @@ do { \ __asm__ __volatile__( \ "1: ldrt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " mov %1, #0\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT) \ : "cc") @@ -310,15 +310,15 @@ do { \ __asm__ __volatile__( \ "1: strbt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -343,15 +343,15 @@ do { \ __asm__ __volatile__( \ "1: strt %1,[%2]\n" \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, %3\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err) \ : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \ : "cc") @@ -371,16 +371,16 @@ do { \ THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \ THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, %3\n" \ " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ - " .previous" \ + " .popsection" \ : "+r" (err), "+r" (__pu_addr) \ : "r" (x), "i" (-EFAULT) \ : "cc") diff --git a/arch/arm/include/asm/ucontext.h b/arch/arm/include/asm/ucontext.h index bf65e9f4525..47f023aa849 100644 --- a/arch/arm/include/asm/ucontext.h +++ b/arch/arm/include/asm/ucontext.h @@ -59,23 +59,22 @@ struct iwmmxt_sigframe { #endif /* CONFIG_IWMMXT */ #ifdef CONFIG_VFP -#if __LINUX_ARM_ARCH__ < 6 -/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra - * word after the registers, and a word of padding at the end for - * alignment. */ #define VFP_MAGIC 0x56465001 -#define VFP_STORAGE_SIZE 152 -#else -#define VFP_MAGIC 0x56465002 -#define VFP_STORAGE_SIZE 144 -#endif struct vfp_sigframe { unsigned long magic; unsigned long size; - union vfp_state storage; -}; + struct user_vfp ufp; + struct user_vfp_exc ufp_exc; +} __attribute__((__aligned__(8))); + +/* + * 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc, + * 4 bytes padding. + */ +#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe) + #endif /* CONFIG_VFP */ /* @@ -91,7 +90,7 @@ struct aux_sigframe { #ifdef CONFIG_IWMMXT struct iwmmxt_sigframe iwmmxt; #endif -#if 0 && defined CONFIG_VFP /* Not yet saved. */ +#ifdef CONFIG_VFP struct vfp_sigframe vfp; #endif /* Something that isn't a valid magic number for any coprocessor. */ diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index cf9cdaa2d4d..dd2bf53000f 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -443,9 +443,12 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGSUSPEND +#define __ARCH_WANT_SYS_OLD_MMAP +#define __ARCH_WANT_SYS_OLD_SELECT #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) #define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_ALARM #define __ARCH_WANT_SYS_UTIME diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h index df95e050f9d..05ac4b06876 100644 --- a/arch/arm/include/asm/user.h +++ b/arch/arm/include/asm/user.h @@ -83,11 +83,21 @@ struct user{ /* * User specific VFP registers. If only VFPv2 is present, registers 16 to 31 - * are ignored by the ptrace system call. + * are ignored by the ptrace system call and the signal handler. */ struct user_vfp { unsigned long long fpregs[32]; unsigned long fpscr; }; +/* + * VFP exception registers exposed to user space during signal delivery. + * Fields not relavant to the current VFP architecture are ignored. + */ +struct user_vfp_exc { + unsigned long fpexc; + unsigned long fpinst; + unsigned long fpinst2; +}; + #endif /* _ARM_USER_H */ |