summaryrefslogtreecommitdiffstats
path: root/include/asm-avr32
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-avr32')
-rw-r--r--include/asm-avr32/atomic.h8
-rw-r--r--include/asm-avr32/checksum.h40
-rw-r--r--include/asm-avr32/device.h7
-rw-r--r--include/asm-avr32/io.h33
-rw-r--r--include/asm-avr32/unistd.h3
5 files changed, 64 insertions, 27 deletions
diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h
index e0b9c44c126..c40b6032c48 100644
--- a/include/asm-avr32/atomic.h
+++ b/include/asm-avr32/atomic.h
@@ -41,7 +41,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
" stcond %1, %0\n"
" brne 1b"
: "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "ir"(i)
+ : "m"(v->counter), "rKs21"(i)
: "cc");
return result;
@@ -58,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
{
int result;
- if (__builtin_constant_p(i))
+ if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
result = atomic_sub_return(-i, v);
else
asm volatile(
@@ -101,7 +101,7 @@ static inline int atomic_sub_unless(atomic_t *v, int a, int u)
" mov %1, 1\n"
"1:"
: "=&r"(tmp), "=&r"(result), "=o"(v->counter)
- : "m"(v->counter), "ir"(a), "ir"(u)
+ : "m"(v->counter), "rKs21"(a), "rKs21"(u)
: "cc", "memory");
return result;
@@ -121,7 +121,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int tmp, result;
- if (__builtin_constant_p(a))
+ if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
result = atomic_sub_unless(v, -a, u);
else {
result = 0;
diff --git a/include/asm-avr32/checksum.h b/include/asm-avr32/checksum.h
index 41b7af09edc..af9d53f0f5d 100644
--- a/include/asm-avr32/checksum.h
+++ b/include/asm-avr32/checksum.h
@@ -20,8 +20,7 @@
*
* it's best to have buff aligned on a 32-bit boundary
*/
-unsigned int csum_partial(const unsigned char * buff, int len,
- unsigned int sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
@@ -30,8 +29,8 @@ unsigned int csum_partial(const unsigned char * buff, int len,
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
-unsigned int csum_partial_copy_generic(const char *src, char *dst, int len,
- int sum, int *src_err_ptr,
+__wsum csum_partial_copy_generic(const void *src, void *dst, int len,
+ __wsum sum, int *src_err_ptr,
int *dst_err_ptr);
/*
@@ -42,17 +41,17 @@ unsigned int csum_partial_copy_generic(const char *src, char *dst, int len,
* verify_area().
*/
static inline
-unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
- int len, int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline
-unsigned int csum_partial_copy_from_user (const char __user *src, char *dst,
- int len, int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
{
- return csum_partial_copy_generic((const char __force *)src, dst, len,
+ return csum_partial_copy_generic((const void __force *)src, dst, len,
sum, err_ptr, NULL);
}
@@ -60,8 +59,7 @@ unsigned int csum_partial_copy_from_user (const char __user *src, char *dst,
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
-static inline unsigned short ip_fast_csum(unsigned char *iph,
- unsigned int ihl)
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum, tmp;
@@ -90,14 +88,14 @@ static inline unsigned short ip_fast_csum(unsigned char *iph,
: "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp)
: "1"(iph), "2"(ihl)
: "memory", "cc");
- return sum;
+ return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
-static inline unsigned int csum_fold(unsigned int sum)
+static inline __sum16 csum_fold(__wsum sum)
{
unsigned int tmp;
@@ -109,21 +107,20 @@ static inline unsigned int csum_fold(unsigned int sum)
: "=&r"(sum), "=&r"(tmp)
: "0"(sum));
- return ~sum;
+ return (__force __sum16)~sum;
}
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
- unsigned long daddr,
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
asm(" add %0, %1\n"
" adc %0, %0, %2\n"
" adc %0, %0, %3\n"
" acr %0"
: "=r"(sum)
- : "r"(daddr), "r"(saddr), "r"(ntohs(len) | (proto << 16)),
+ : "r"(daddr), "r"(saddr), "r"(len + proto),
"0"(sum)
: "cc");
@@ -134,11 +131,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
- unsigned int sum)
+ __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -148,7 +144,7 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* in icmp.c
*/
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
diff --git a/include/asm-avr32/device.h b/include/asm-avr32/device.h
new file mode 100644
index 00000000000..d8f9872b0e2
--- /dev/null
+++ b/include/asm-avr32/device.h
@@ -0,0 +1,7 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#include <asm-generic/device.h>
+
diff --git a/include/asm-avr32/io.h b/include/asm-avr32/io.h
index 2fc8f111dce..eec47500fa6 100644
--- a/include/asm-avr32/io.h
+++ b/include/asm-avr32/io.h
@@ -76,6 +76,39 @@ static inline unsigned int readl(const volatile void __iomem *addr)
#define readsw(p, d, l) __raw_readsw((unsigned int)p, d, l)
#define readsl(p, d, l) __raw_readsl((unsigned int)p, d, l)
+
+/*
+ * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
+ */
+#ifndef ioread8
+
+#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; })
+
+#define ioread16(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(p)); __v; })
+#define ioread16be(p) ({ unsigned int __v = be16_to_cpu(__raw_readw(p)); __v; })
+
+#define ioread32(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(p)); __v; })
+#define ioread32be(p) ({ unsigned int __v = be32_to_cpu(__raw_readl(p)); __v; })
+
+#define iowrite8(v,p) __raw_writeb(v, p)
+
+#define iowrite16(v,p) __raw_writew(cpu_to_le16(v), p)
+#define iowrite16be(v,p) __raw_writew(cpu_to_be16(v), p)
+
+#define iowrite32(v,p) __raw_writel(cpu_to_le32(v), p)
+#define iowrite32be(v,p) __raw_writel(cpu_to_be32(v), p)
+
+#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
+#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
+#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
+
+#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c)
+#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c)
+#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c)
+
+#endif
+
+
/*
* These two are only here because ALSA _thinks_ it needs them...
*/
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index a50e5004550..56ed1f9d348 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -280,9 +280,10 @@
#define __NR_sync_file_range 262
#define __NR_tee 263
#define __NR_vmsplice 264
+#define __NR_epoll_pwait 265
#ifdef __KERNEL__
-#define NR_syscalls 265
+#define NR_syscalls 266
#define __ARCH_WANT_IPC_PARSE_VERSION