diff options
Diffstat (limited to 'arch/xtensa/include/asm')
30 files changed, 733 insertions, 198 deletions
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 24f50cada70..c3f289174c1 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -66,19 +66,35 @@ */ static inline void atomic_add(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /** @@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v) */ static inline void atomic_sub(int i, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); +#endif } /* @@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "add %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " add %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " add %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " add %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } static inline int atomic_sub_return(int i, atomic_t * v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "sub %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (i), "a" (v) - : "a15", "memory" - ); - - return vval; +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " sub %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + " sub %0, %0, %2\n" + : "=&a" (result), "=&a" (tmp) + : "a" (i), "a" (v) + : "memory" + ); + + return result; +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " sub %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (i), "a" (v) + : "a15", "memory" + ); + + return vval; +#endif } /** @@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - unsigned int all_f = -1; - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "xor %1, %4, %3 \n\t" - "and %0, %0, %4 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval), "=a" (mask) - : "a" (v), "a" (all_f), "1" (mask) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (~mask), "a" (v) + : "memory" + ); +#else + unsigned int all_f = -1; + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " xor %1, %4, %3\n" + " and %0, %0, %4\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval), "=a" (mask) + : "a" (v), "a" (all_f), "1" (mask) + : "a15", "memory" + ); +#endif } static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - unsigned int vval; - - __asm__ __volatile__( - "rsil a15,"__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %2, 0 \n\t" - "or %0, %0, %1 \n\t" - "s32i %0, %2, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n" - : "=&a" (vval) - : "a" (mask), "a" (v) - : "a15", "memory" - ); +#if XCHAL_HAVE_S32C1I + unsigned long tmp; + int result; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (mask), "a" (v) + : "memory" + ); +#else + unsigned int vval; + + __asm__ __volatile__( + " rsil a15,"__stringify(LOCKLEVEL)"\n" + " l32i %0, %2, 0\n" + " or %0, %0, %1\n" + " s32i %0, %2, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (vval) + : "a" (mask), "a" (v) + : "a15", "memory" + ); +#endif } /* Atomic operations are already serializing */ @@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ - diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 55707a8009d..ef021677d53 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2012 Tensilica Inc. */ #ifndef _XTENSA_SYSTEM_H @@ -12,8 +12,8 @@ #define smp_read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0) -#define mb() barrier() -#define rmb() mb() +#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) +#define rmb() barrier() #define wmb() mb() #ifdef CONFIG_SMP diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h index 5270197ddd3..84afe58d5d3 100644 --- a/arch/xtensa/include/asm/bitops.h +++ b/arch/xtensa/include/asm/bitops.h @@ -29,7 +29,6 @@ #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() -#include <asm-generic/bitops/atomic.h> #include <asm-generic/bitops/non-atomic.h> #if XCHAL_HAVE_NSA @@ -104,6 +103,132 @@ static inline unsigned long __fls(unsigned long word) #endif #include <asm-generic/bitops/fls64.h> + +#if XCHAL_HAVE_S32C1I + +static inline void set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline void clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); +} + +static inline void change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); +} + +static inline int +test_and_set_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " or %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_clear_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " and %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (~mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +static inline int +test_and_change_bit(unsigned int bit, volatile unsigned long *p) +{ + unsigned long tmp, value; + unsigned long mask = 1UL << (bit & 31); + + p += bit >> 5; + + __asm__ __volatile__( + "1: l32i %1, %3, 0\n" + " wsr %1, scompare1\n" + " xor %0, %1, %2\n" + " s32c1i %0, %3, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp), "=&a" (value) + : "a" (mask), "a" (p) + : "memory"); + + return tmp & mask; +} + +#else + +#include <asm-generic/bitops/atomic.h> + +#endif /* XCHAL_HAVE_S32C1I */ + #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/le.h> diff --git a/arch/xtensa/include/asm/bootparam.h b/arch/xtensa/include/asm/bootparam.h index 9983f2c1b7e..0c25799faca 100644 --- a/arch/xtensa/include/asm/bootparam.h +++ b/arch/xtensa/include/asm/bootparam.h @@ -22,6 +22,7 @@ #define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */ #define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */ #define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */ +#define BP_TAG_FDT 0x1006 /* flat device tree addr */ #define BP_TAG_FIRST 0x7B0B /* first tag with a version number */ #define BP_TAG_LAST 0x7E0B /* last tag */ @@ -31,15 +32,15 @@ /* All records are aligned to 4 bytes */ typedef struct bp_tag { - unsigned short id; /* tag id */ - unsigned short size; /* size of this record excluding the structure*/ - unsigned long data[0]; /* data */ + unsigned short id; /* tag id */ + unsigned short size; /* size of this record excluding the structure*/ + unsigned long data[0]; /* data */ } bp_tag_t; typedef struct meminfo { - unsigned long type; - unsigned long start; - unsigned long end; + unsigned long type; + unsigned long start; + unsigned long end; } meminfo_t; #define SYSMEM_BANKS_MAX 5 @@ -48,14 +49,11 @@ typedef struct meminfo { #define MEMORY_TYPE_NONE 0x2000 typedef struct sysmem_info { - int nr_banks; - meminfo_t bank[SYSMEM_BANKS_MAX]; + int nr_banks; + meminfo_t bank[SYSMEM_BANKS_MAX]; } sysmem_info_t; extern sysmem_info_t sysmem; #endif #endif - - - diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h index 2c20a58f94c..60e18773ecb 100644 --- a/arch/xtensa/include/asm/cacheasm.h +++ b/arch/xtensa/include/asm/cacheasm.h @@ -174,4 +174,3 @@ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH .endm - diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 569fec4f9a2..127cd48883c 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page*); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); -extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); +extern void flush_cache_page(struct vm_area_struct*, + unsigned long, unsigned long); #else diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index e4d831a3077..aed7ad68ca4 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); +asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone @@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len, __wsum sum, int *err_ptr) { return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); @@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) /* Since the input registers which are loaded with iph and ihl are modified, we must also specify them as outputs, or gcc will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) + : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), + "=&r" (endaddr) : "1" (iph), "2" (ihl) : "memory"); @@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, static __inline__ __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold (csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM @@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, - int len, __wsum sum, int *err_ptr) +static __inline__ __wsum csum_and_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum, int *err_ptr) { if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); + return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr); if (len) *err_ptr = -EFAULT; diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 64dad04a9d2..d9ab131bc1a 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -22,17 +22,30 @@ static inline unsigned long __cmpxchg_u32(volatile int *p, int old, int new) { - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "bne %0, %2, 1f \n\t" - "s32i %3, %1, 0 \n\t" - "1: \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (old) - : "a" (p), "a" (old), "r" (new) - : "a15", "memory"); - return old; +#if XCHAL_HAVE_S32C1I + __asm__ __volatile__( + " wsr %2, scompare1\n" + " s32c1i %0, %1, 0\n" + : "+a" (new) + : "a" (p), "a" (old) + : "memory" + ); + + return new; +#else + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " bne %0, %2, 1f\n" + " s32i %3, %1, 0\n" + "1:\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (old) + : "a" (p), "a" (old), "r" (new) + : "a15", "memory"); + return old; +#endif } /* This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ @@ -93,19 +106,36 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long xchg_u32(volatile int * m, unsigned long val) { - unsigned long tmp; - __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" - "l32i %0, %1, 0 \n\t" - "s32i %2, %1, 0 \n\t" - "wsr a15, ps \n\t" - "rsync \n\t" - : "=&a" (tmp) - : "a" (m), "a" (val) - : "a15", "memory"); - return tmp; +#if XCHAL_HAVE_S32C1I + unsigned long tmp, result; + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " mov %0, %3\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (m), "a" (val) + : "memory" + ); + return result; +#else + unsigned long tmp; + __asm__ __volatile__( + " rsil a15, "__stringify(LOCKLEVEL)"\n" + " l32i %0, %1, 0\n" + " s32i %2, %1, 0\n" + " wsr a15, ps\n" + " rsync\n" + : "=&a" (tmp) + : "a" (m), "a" (val) + : "a15", "memory"); + return tmp; +#endif } -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define xchg(ptr,x) \ + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) /* * This only works if the compiler isn't horribly bad at optimizing. diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h index 8d1eb5d7864..47e46dcf5d4 100644 --- a/arch/xtensa/include/asm/current.h +++ b/arch/xtensa/include/asm/current.h @@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void) #define GET_CURRENT(reg,sp) \ GET_THREAD_INFO(reg,sp); \ - l32i reg, reg, TI_TASK \ + l32i reg, reg, TI_TASK \ #endif diff --git a/arch/xtensa/include/asm/delay.h b/arch/xtensa/include/asm/delay.h index 58c0a4fd400..61fc5faeb46 100644 --- a/arch/xtensa/include/asm/delay.h +++ b/arch/xtensa/include/asm/delay.h @@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy; static inline void __delay(unsigned long loops) { - /* 2 cycles per loop. */ - __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" - : "=r" (loops) : "0" (loops)); + /* 2 cycles per loop. */ + __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" + : "=r" (loops) : "0" (loops)); } static __inline__ u32 xtensa_get_ccount(void) @@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs) } #endif - diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h index 492c95790ad..4acb5feba1f 100644 --- a/arch/xtensa/include/asm/dma-mapping.h +++ b/arch/xtensa/include/asm/dma-mapping.h @@ -16,6 +16,8 @@ #include <linux/mm.h> #include <linux/scatterlist.h> +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) + /* * DMA-consistent mapping functions. */ @@ -98,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, } static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) +dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction) { consistent_sync((void *)bus_to_virt(dma_handle), size, direction); } diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h index 5293312bc6a..264d5fa450d 100644 --- a/arch/xtensa/include/asm/elf.h +++ b/arch/xtensa/include/asm/elf.h @@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); */ #define ELF_PLAT_INIT(_r, load_addr) \ - do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ - _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ - _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ - _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ - } while (0) + do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ + _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ + _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ + _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ + } while (0) typedef struct { xtregs_opt_t opt; diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 0a046ca5a68..80be1512469 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -14,4 +14,3 @@ extern void flush_cache_kmaps(void); #endif - diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h new file mode 100644 index 00000000000..e1f8ba4061e --- /dev/null +++ b/arch/xtensa/include/asm/initialize_mmu.h @@ -0,0 +1,55 @@ +/* + * arch/xtensa/include/asm/initialize_mmu.h + * + * Initializes MMU: + * + * For the new V3 MMU we remap the TLB from virtual == physical + * to the standard Linux mapping used in earlier MMU's. + * + * The the MMU we also support a new configuration register that + * specifies how the S32C1I instruction operates with the cache + * controller. + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of + * this archive for more details. + * + * Copyright (C) 2008 - 2012 Tensilica, Inc. + * + * Marc Gauthier <marc@tensilica.com> + * Pete Delaney <piet@tensilica.com> + */ + +#ifndef _XTENSA_INITIALIZE_MMU_H +#define _XTENSA_INITIALIZE_MMU_H + +#ifdef __ASSEMBLY__ + +#define XTENSA_HWVERSION_RC_2009_0 230000 + + .macro initialize_mmu + +#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) +/* + * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it. + * For details see Documentation/xtensa/atomctl.txt + */ +#if XCHAL_DCACHE_IS_COHERENT + movi a3, 0x25 /* For SMP/MX -- internal for writeback, + * RCW otherwise + */ +#else + movi a3, 0x29 /* non-MX -- Most cores use Std Memory + * Controlers which usually can't use RCW + */ +#endif + wsr a3, atomctl +#endif /* XCHAL_HAVE_S32C1I && + * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) + */ + + .endm + +#endif /*__ASSEMBLY__*/ + +#endif /* _XTENSA_INITIALIZE_MMU_H */ diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h index feb10af9651..d43525a286b 100644 --- a/arch/xtensa/include/asm/mmu_context.h +++ b/arch/xtensa/include/asm/mmu_context.h @@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) + struct task_struct *tsk) { unsigned long asid = asid_cache; diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h index 599e7a2e729..3407cf7989b 100644 --- a/arch/xtensa/include/asm/nommu_context.h +++ b/arch/xtensa/include/asm/nommu_context.h @@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm) { return 0; } diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 7a5591a71f8..47f582333f6 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -29,19 +29,19 @@ * PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_SHIFT 12 +#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) #ifdef CONFIG_MMU -#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR -#define MAX_MEM_PFN XCHAL_KSEG_SIZE +#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR +#define MAX_MEM_PFN XCHAL_KSEG_SIZE #else -#define PAGE_OFFSET 0 -#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) +#define PAGE_OFFSET 0 +#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #endif -#define PGTABLE_START 0x80000000 +#define PGTABLE_START 0x80000000 /* * Cache aliasing: @@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) +#define pfn_valid(pfn) \ + ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) + #ifdef CONFIG_DISCONTIGMEM # error CONFIG_DISCONTIGMEM not supported #endif diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h index 00fcbd7c534..0b68c76ec1e 100644 --- a/arch/xtensa/include/asm/pci-bridge.h +++ b/arch/xtensa/include/asm/pci-bridge.h @@ -35,7 +35,7 @@ struct pci_space { struct pci_controller { int index; /* used for pci_controller_num */ struct pci_controller *next; - struct pci_bus *bus; + struct pci_bus *bus; void *arch_data; int first_busno; diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 05244f07dd3..614be031a79 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -53,7 +53,7 @@ struct pci_dev; /* Map a range of PCI memory or I/O space for a device into user space */ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); + enum pci_mmap_state mmap_state, int write_combine); /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ #define HAVE_PCI_MMAP 1 diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 40cf9bceda2..cf914c8c249 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) extern struct kmem_cache *pgtable_cache; -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index b03c043ce75..c90ea5bfa1b 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -284,7 +284,7 @@ struct vm_area_struct; static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, - pte_t *ptep) + pte_t *ptep) { pte_t pte = *ptep; if (!pte_young(pte)) @@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_t pte = *ptep; - update_pte(ptep, pte_wrprotect(pte)); + pte_t pte = *ptep; + update_pte(ptep, pte_wrprotect(pte)); } /* to find an entry in a kernel page-table-directory */ @@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, */ #define io_remap_pfn_range(vma,from,pfn,size,prot) \ - remap_pfn_range(vma, from, pfn, size, prot) + remap_pfn_range(vma, from, pfn, size, prot) typedef pte_t *pte_addr_t; diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h index 7d936e58e9b..ec098b68fb9 100644 --- a/arch/xtensa/include/asm/platform.h +++ b/arch/xtensa/include/asm/platform.h @@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void); extern void platform_calibrate_ccount (void); #endif /* _XTENSA_PLATFORM_H */ - diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index 2d630e7399c..e5fb6b0abdf 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -89,7 +89,7 @@ #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) typedef struct { - unsigned long seg; + unsigned long seg; } mm_segment_t; struct thread_struct { @@ -145,10 +145,10 @@ struct thread_struct { * set_thread_state in signal.c depends on it. */ #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ - (1 << PS_CALLINC_SHIFT) | \ - (USER_RING << PS_RING_SHIFT) | \ - (1 << PS_UM_BIT) | \ - (1 << PS_EXCM_BIT)) + (1 << PS_CALLINC_SHIFT) | \ + (USER_RING << PS_RING_SHIFT) | \ + (1 << PS_UM_BIT) | \ + (1 << PS_EXCM_BIT)) /* Clearing a0 terminates the backtrace. */ #define start_thread(regs, new_pc, new_sp) \ diff --git a/arch/xtensa/include/asm/prom.h b/arch/xtensa/include/asm/prom.h new file mode 100644 index 00000000000..f3d7cd2c0de --- /dev/null +++ b/arch/xtensa/include/asm/prom.h @@ -0,0 +1,6 @@ +#ifndef _XTENSA_ASM_PROM_H +#define _XTENSA_ASM_PROM_H + +#define HAVE_ARCH_DEVTREE_FIXUPS + +#endif /* _XTENSA_ASM_PROM_H */ diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index da21c17f23a..58bf6fd3f91 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -37,7 +37,7 @@ struct pt_regs { unsigned long windowstart; /* 52 */ unsigned long syscall; /* 56 */ unsigned long icountlevel; /* 60 */ - int reserved[1]; /* 64 */ + unsigned long scompare1; /* 64 */ /* Additional configurable registers that are used by the compiler. */ xtregs_opt_t xtregs_opt; @@ -55,7 +55,7 @@ struct pt_regs { # define arch_has_single_step() (1) # define task_pt_regs(tsk) ((struct pt_regs*) \ - (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) + (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define instruction_pointer(regs) ((regs)->pc) diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index 8a8aa61ccc8..76096a4e5b8 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h @@ -52,6 +52,10 @@ #define EXCCAUSE_SPECULATION 7 #define EXCCAUSE_PRIVILEGED 8 #define EXCCAUSE_UNALIGNED 9 +#define EXCCAUSE_INSTR_DATA_ERROR 12 +#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 +#define EXCCAUSE_INSTR_ADDR_ERROR 14 +#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 #define EXCCAUSE_ITLB_MISS 16 #define EXCCAUSE_ITLB_MULTIHIT 17 #define EXCCAUSE_ITLB_PRIVILEGE 18 @@ -105,4 +109,3 @@ #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ #endif /* _XTENSA_SPECREG_H */ - diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index 8ff23649581..03975906b36 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -11,6 +11,192 @@ #ifndef _XTENSA_SPINLOCK_H #define _XTENSA_SPINLOCK_H -#include <linux/spinlock.h> +/* + * spinlock + * + * There is at most one owner of a spinlock. There are not different + * types of spinlock owners like there are for rwlocks (see below). + * + * When trying to obtain a spinlock, the function "spins" forever, or busy- + * waits, until the lock is obtained. When spinning, presumably some other + * owner will soon give up the spinlock making it available to others. Use + * the trylock functions to avoid spinning forever. + * + * possible values: + * + * 0 nobody owns the spinlock + * 1 somebody owns the spinlock + */ + +#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define __raw_spin_unlock_wait(lock) \ + do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) + +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&lock->slock) + : "memory"); +} + +/* + * rwlock + * + * Read-write locks are really a more flexible spinlock. They allow + * multiple readers but only one writer. Write ownership is exclusive + * (i.e., all other readers and writers are blocked from ownership while + * there is a write owner). These rwlocks are unfair to writers. Writers + * can be starved for an indefinite time by readers. + * + * possible values: + * + * 0 nobody owns the rwlock + * >0 one or more readers own the rwlock + * (the positive value is the actual number of readers) + * 0x80000000 one writer owns the rwlock, no other writers, no readers + */ + +#define __raw_write_can_lock(x) ((x)->lock == 0) + +static inline void __raw_write_lock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + "1: movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + " bnez %0, 1b\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_write_trylock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " wsr %0, scompare1\n" + " movi %0, 1\n" + " slli %0, %0, 31\n" + " s32c1i %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return tmp == 0 ? 1 : 0; +} + +static inline void __raw_write_unlock(raw_rwlock_t *rw) +{ + unsigned long tmp; + + __asm__ __volatile__( + " movi %0, 0\n" + " s32ri %0, %1, 0\n" + : "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +static inline void __raw_read_lock(raw_rwlock_t *rw) +{ + unsigned long tmp; + unsigned long result; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " bltz %1, 1b\n" + " wsr %1, scompare1\n" + " addi %0, %1, 1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); +} + +/* Returns 1 if the lock is obtained, 0 otherwise. */ + +static inline int __raw_read_trylock(raw_rwlock_t *rw) +{ + unsigned long result; + unsigned long tmp; + + __asm__ __volatile__( + " l32i %1, %2, 0\n" + " addi %0, %1, 1\n" + " bltz %0, 1f\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " sub %0, %0, %1\n" + "1:\n" + : "=&a" (result), "=&a" (tmp) + : "a" (&rw->lock) + : "memory"); + + return result == 0; +} + +static inline void __raw_read_unlock(raw_rwlock_t *rw) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( + "1: l32i %1, %2, 0\n" + " addi %0, %1, -1\n" + " wsr %1, scompare1\n" + " s32c1i %0, %2, 0\n" + " bne %0, %1, 1b\n" + : "=&a" (tmp1), "=&a" (tmp2) + : "a" (&rw->lock) + : "memory"); +} #endif /* _XTENSA_SPINLOCK_H */ diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h index b00c928d4cc..8d5e47fad09 100644 --- a/arch/xtensa/include/asm/syscall.h +++ b/arch/xtensa/include/asm/syscall.h @@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int, /* Should probably move to linux/syscalls.h */ struct pollfd; asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec __user *tsp, void __user *sig); + fd_set __user *exp, struct timespec __user *tsp, + void __user *sig); asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, - struct timespec __user *tsp, const sigset_t __user *sigmask, - size_t sigsetsize); -asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, - size_t sigsetsize); + struct timespec __user *tsp, + const sigset_t __user *sigmask, + size_t sigsetsize); +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h new file mode 100644 index 00000000000..54f70440185 --- /dev/null +++ b/arch/xtensa/include/asm/traps.h @@ -0,0 +1,23 @@ +/* + * arch/xtensa/include/asm/traps.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 Tensilica Inc. + */ +#ifndef _XTENSA_TRAPS_H +#define _XTENSA_TRAPS_H + +#include <asm/ptrace.h> + +/* + * handler must be either of the following: + * void (*)(struct pt_regs *regs); + * void (*)(struct pt_regs *regs, unsigned long exccause); + */ +extern void * __init trap_set_handler(int cause, void *handler); +extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); + +#endif /* _XTENSA_TRAPS_H */ diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 6e4bb3b791a..fd686dc45d1 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -180,7 +180,8 @@ #define segment_eq(a,b) ((a).seg == (b).seg) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) +#define __user_ok(addr,size) \ + (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) @@ -234,10 +235,10 @@ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ - case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ - case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ - case 8: { \ + case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ + case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ + case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ + case 8: { \ __typeof__(*ptr) __v64 = x; \ retval = __copy_to_user(ptr,&__v64,8); \ break; \ @@ -291,7 +292,7 @@ do { \ * __check_align_* macros still work. */ #define __put_user_asm(x, addr, err, align, insn, cb) \ - __asm__ __volatile__( \ +__asm__ __volatile__( \ __check_align_##align \ "1: "insn" %2, %3, 0 \n" \ "2: \n" \ @@ -301,8 +302,8 @@ do { \ " .long 2b \n" \ "5: \n" \ " l32r %1, 4b \n" \ - " movi %0, %4 \n" \ - " jx %1 \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ @@ -334,13 +335,13 @@ extern long __get_user_bad(void); do { \ int __cb; \ retval = 0; \ - switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ - case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ - case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ - case 8: retval = __copy_from_user(&x,ptr,8); break; \ - default: (x) = __get_user_bad(); \ - } \ + switch (size) { \ + case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ + case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ + case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ + case 8: retval = __copy_from_user(&x,ptr,8); break; \ + default: (x) = __get_user_bad(); \ + } \ } while (0) @@ -349,7 +350,7 @@ do { \ * __check_align_* macros still work. */ #define __get_user_asm(x, addr, err, align, insn, cb) \ - __asm__ __volatile__( \ +__asm__ __volatile__( \ __check_align_##align \ "1: "insn" %2, %3, 0 \n" \ "2: \n" \ @@ -360,8 +361,8 @@ do { \ "5: \n" \ " l32r %1, 4b \n" \ " movi %2, 0 \n" \ - " movi %0, %4 \n" \ - " jx %1 \n" \ + " movi %0, %4 \n" \ + " jx %1 \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ " .long 1b, 5b \n" \ @@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) -#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_to_user(to,from,n) \ + __generic_copy_to_user_nocheck((to),(from),(n)) +#define __copy_from_user(to,from,n) \ + __generic_copy_from_user_nocheck((to),(from),(n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user |