From 9d40ee200a527ce08ab8c793ba8ae3e242edbb0e Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 7 Oct 2009 10:54:19 -0700 Subject: [IA64] Squeeze ticket locks back into 4 bytes. Linus pointed out that other people have spent large amounts of time and effort to optimize the layout of frequently used structures. Often these have embedded locks, and the assumption is that a lock takes 4 bytes. Linus also pointed out how to work with the limited options for atomic instructions on Itanium. Signed-off-by: Tony Luck --- arch/ia64/include/asm/spinlock.h | 45 ++++++++++++++++++++-------------- arch/ia64/include/asm/spinlock_types.h | 2 +- 2 files changed, 27 insertions(+), 20 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 30bb930e111..4fa502739d6 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -25,61 +25,68 @@ * by atomically noting the tail and incrementing it by one (thus adding * ourself to the queue and noting our position), then waiting until the head * becomes equal to the the initial value of the tail. + * The pad bits in the middle are used to prevent the next_ticket number + * overflowing into the now_serving number. * - * 63 32 31 0 + * 31 17 16 15 14 0 * +----------------------------------------------------+ - * | next_ticket_number | now_serving | + * | now_serving | padding | next_ticket | * +----------------------------------------------------+ */ -#define TICKET_SHIFT 32 +#define TICKET_SHIFT 17 +#define TICKET_BITS 15 +#define TICKET_MASK ((1 << TICKET_BITS) - 1) static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { - int *p = (int *)&lock->lock, turn, now_serving; + int *p = (int *)&lock->lock, ticket, serve; - now_serving = *p; - turn = ia64_fetchadd(1, p+1, acq); + ticket = ia64_fetchadd(1, p, acq); - if (turn == now_serving) + if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) return; - do { + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory"); + + if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) + return; cpu_relax(); - } while (ACCESS_ONCE(*p) != turn); + } } static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) { - long tmp = ACCESS_ONCE(lock->lock), try; + int tmp = ACCESS_ONCE(lock->lock); - if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { - try = tmp + (1L << TICKET_SHIFT); - - return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; - } + if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) + return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; return 0; } static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) { - int *p = (int *)&lock->lock; + unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; - (void)ia64_fetchadd(1, p, rel); + asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); + ACCESS_ONCE(*p) = (tmp + 2) & ~1; } static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); - return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); + return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); - return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; + return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } static inline int __raw_spin_is_locked(raw_spinlock_t *lock) diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index b61d136d9bc..474e46f1ab4 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #endif typedef struct { - volatile unsigned long lock; + volatile unsigned int lock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } -- cgit v1.2.3-70-g09d2 From 3b885787ea4112eaa80945999ea0901bf742707f Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Mon, 12 Oct 2009 13:26:31 -0700 Subject: net: Generalize socket rx gap / receive queue overflow cmsg Create a new socket level option to report number of queue overflows Recently I augmented the AF_PACKET protocol to report the number of frames lost on the socket receive queue between any two enqueued frames. This value was exported via a SOL_PACKET level cmsg. AFter I completed that work it was requested that this feature be generalized so that any datagram oriented socket could make use of this option. As such I've created this patch, It creates a new SOL_SOCKET level option called SO_RXQ_OVFL, which when enabled exports a SOL_SOCKET level cmsg that reports the nubmer of times the sk_receive_queue overflowed between any two given frames. It also augments the AF_PACKET protocol to take advantage of this new feature (as it previously did not touch sk->sk_drops, which this patch uses to record the overflow count). Tested successfully by me. Notes: 1) Unlike my previous patch, this patch simply records the sk_drops value, which is not a number of drops between packets, but rather a total number of drops. Deltas must be computed in user space. 2) While this patch currently works with datagram oriented protocols, it will also be accepted by non-datagram oriented protocols. I'm not sure if thats agreeable to everyone, but my argument in favor of doing so is that, for those protocols which aren't applicable to this option, sk_drops will always be zero, and reporting no drops on a receive queue that isn't used for those non-participating protocols seems reasonable to me. This also saves us having to code in a per-protocol opt in mechanism. 3) This applies cleanly to net-next assuming that commit 977750076d98c7ff6cbda51858bb5a5894a9d9ab (my af packet cmsg patch) is reverted Signed-off-by: Neil Horman Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/alpha/include/asm/socket.h | 2 ++ arch/arm/include/asm/socket.h | 2 ++ arch/avr32/include/asm/socket.h | 2 ++ arch/cris/include/asm/socket.h | 2 ++ arch/frv/include/asm/socket.h | 2 ++ arch/h8300/include/asm/socket.h | 2 ++ arch/ia64/include/asm/socket.h | 2 ++ arch/m32r/include/asm/socket.h | 2 ++ arch/m68k/include/asm/socket.h | 2 ++ arch/mips/include/asm/socket.h | 2 ++ arch/mn10300/include/asm/socket.h | 2 ++ arch/parisc/include/asm/socket.h | 2 ++ arch/powerpc/include/asm/socket.h | 2 ++ arch/s390/include/asm/socket.h | 2 ++ arch/sparc/include/asm/socket.h | 2 ++ arch/xtensa/include/asm/socket.h | 2 ++ include/asm-generic/socket.h | 1 + include/linux/skbuff.h | 6 ++++-- include/net/sock.h | 3 +++ net/atm/common.c | 2 +- net/bluetooth/af_bluetooth.c | 2 +- net/bluetooth/rfcomm/sock.c | 2 +- net/can/bcm.c | 2 +- net/can/raw.c | 2 +- net/core/sock.c | 17 ++++++++++++++++- net/ieee802154/dgram.c | 2 +- net/ieee802154/raw.c | 2 +- net/ipv4/raw.c | 2 +- net/ipv4/udp.c | 2 +- net/ipv6/raw.c | 2 +- net/ipv6/udp.c | 2 +- net/key/af_key.c | 2 +- net/packet/af_packet.c | 7 +++---- net/rxrpc/ar-recvmsg.c | 2 +- net/sctp/socket.c | 2 +- net/socket.c | 15 +++++++++++++++ 36 files changed, 88 insertions(+), 21 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h index 26773e3246e..06edfefc337 100644 --- a/arch/alpha/include/asm/socket.h +++ b/arch/alpha/include/asm/socket.h @@ -67,6 +67,8 @@ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_RXQ_OVFL 40 + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/arm/include/asm/socket.h b/arch/arm/include/asm/socket.h index 92ac61d294f..90ffd04b8e7 100644 --- a/arch/arm/include/asm/socket.h +++ b/arch/arm/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/avr32/include/asm/socket.h b/arch/avr32/include/asm/socket.h index fe863f9794d..c8d1fae4947 100644 --- a/arch/avr32/include/asm/socket.h +++ b/arch/avr32/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* __ASM_AVR32_SOCKET_H */ diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h index 45ec49bdb7b..1a4a61909ca 100644 --- a/arch/cris/include/asm/socket.h +++ b/arch/cris/include/asm/socket.h @@ -62,6 +62,8 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/frv/include/asm/socket.h b/arch/frv/include/asm/socket.h index 2dea726095c..a6b26880c1e 100644 --- a/arch/frv/include/asm/socket.h +++ b/arch/frv/include/asm/socket.h @@ -60,5 +60,7 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/h8300/include/asm/socket.h b/arch/h8300/include/asm/socket.h index 1547f01c8e2..04c0f4596eb 100644 --- a/arch/h8300/include/asm/socket.h +++ b/arch/h8300/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h index 0b0d5ff062e..51427eaa51b 100644 --- a/arch/ia64/include/asm/socket.h +++ b/arch/ia64/include/asm/socket.h @@ -69,4 +69,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/asm/socket.h b/arch/m32r/include/asm/socket.h index 3390a864f22..469787c3009 100644 --- a/arch/m32r/include/asm/socket.h +++ b/arch/m32r/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m68k/include/asm/socket.h b/arch/m68k/include/asm/socket.h index eee01cce921..9bf49c87d95 100644 --- a/arch/m68k/include/asm/socket.h +++ b/arch/m68k/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index ae05accd9fe..9de5190f248 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h @@ -80,6 +80,8 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */ #define SO_TIMESTAMPING 37 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_RXQ_OVFL 40 + #ifdef __KERNEL__ /** sock_type - Socket types diff --git a/arch/mn10300/include/asm/socket.h b/arch/mn10300/include/asm/socket.h index 4df75af29d7..4e60c428128 100644 --- a/arch/mn10300/include/asm/socket.h +++ b/arch/mn10300/include/asm/socket.h @@ -60,4 +60,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h index 960b1e5d8e1..225b7d6a1a0 100644 --- a/arch/parisc/include/asm/socket.h +++ b/arch/parisc/include/asm/socket.h @@ -59,6 +59,8 @@ #define SO_TIMESTAMPING 0x4020 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_RXQ_OVFL 0x4021 + /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h index 3ab8b3e6feb..866f7606da6 100644 --- a/arch/powerpc/include/asm/socket.h +++ b/arch/powerpc/include/asm/socket.h @@ -67,4 +67,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_POWERPC_SOCKET_H */ diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h index e42df89a0b8..fdff1e995c7 100644 --- a/arch/s390/include/asm/socket.h +++ b/arch/s390/include/asm/socket.h @@ -68,4 +68,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/asm/socket.h b/arch/sparc/include/asm/socket.h index 3a5ae3d1208..9d3fefcff2f 100644 --- a/arch/sparc/include/asm/socket.h +++ b/arch/sparc/include/asm/socket.h @@ -56,6 +56,8 @@ #define SO_TIMESTAMPING 0x0023 #define SCM_TIMESTAMPING SO_TIMESTAMPING +#define SO_RXQ_OVFL 0x0024 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/xtensa/include/asm/socket.h b/arch/xtensa/include/asm/socket.h index beb3a6bdb61..cbdf2ffaacf 100644 --- a/arch/xtensa/include/asm/socket.h +++ b/arch/xtensa/include/asm/socket.h @@ -71,4 +71,6 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 + #endif /* _XTENSA_SOCKET_H */ diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h index 538991cef6f..9a6115e7cf6 100644 --- a/include/asm-generic/socket.h +++ b/include/asm-generic/socket.h @@ -63,4 +63,5 @@ #define SO_PROTOCOL 38 #define SO_DOMAIN 39 +#define SO_RXQ_OVFL 40 #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index df7b23ac66e..8c866b5cb97 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -389,8 +389,10 @@ struct sk_buff { #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; #endif - - __u32 mark; + union { + __u32 mark; + __u32 dropcount; + }; __u16 vlan_tci; diff --git a/include/net/sock.h b/include/net/sock.h index 98398bdec57..10669b01eea 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -505,6 +505,7 @@ enum sock_flags { SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */ SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */ SOCK_FASYNC, /* fasync() active */ + SOCK_RXQ_OVFL, }; static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) @@ -1493,6 +1494,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) sk->sk_stamp = kt; } +extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); + /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @msg: outgoing packet diff --git a/net/atm/common.c b/net/atm/common.c index 950bd16d238..d61e051e0a3 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -496,7 +496,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (error) return error; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); atm_return(vcc, skb->truesize); skb_free_datagram(sk, skb); diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 1f6e49c1cde..399e59c9c6c 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -257,7 +257,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err == 0) - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); skb_free_datagram(sk, skb); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c7078650385..d3bfc1b0afb 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -703,7 +703,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied += chunk; size -= chunk; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (!(flags & MSG_PEEK)) { atomic_sub(chunk, &sk->sk_rmem_alloc); diff --git a/net/can/bcm.c b/net/can/bcm.c index 597da4f8f88..2f47039c79d 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1534,7 +1534,7 @@ static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { msg->msg_namelen = sizeof(struct sockaddr_can); diff --git a/net/can/raw.c b/net/can/raw.c index b5e897922d3..962fc9f1d0c 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -702,7 +702,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { msg->msg_namelen = sizeof(struct sockaddr_can); diff --git a/net/core/sock.c b/net/core/sock.c index 7626b6aacd6..43ca2c99539 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -276,6 +276,8 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err = 0; int skb_len; + unsigned long flags; + struct sk_buff_head *list = &sk->sk_receive_queue; /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces number of warnings when compiling with -W --ANK @@ -305,7 +307,10 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) */ skb_len = skb->len; - skb_queue_tail(&sk->sk_receive_queue, skb); + spin_lock_irqsave(&list->lock, flags); + skb->dropcount = atomic_read(&sk->sk_drops); + __skb_queue_tail(list, skb); + spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); @@ -702,6 +707,12 @@ set_rcvbuf: /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ + case SO_RXQ_OVFL: + if (valbool) + sock_set_flag(sk, SOCK_RXQ_OVFL); + else + sock_reset_flag(sk, SOCK_RXQ_OVFL); + break; default: ret = -ENOPROTOOPT; break; @@ -901,6 +912,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_mark; break; + case SO_RXQ_OVFL: + v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); + break; + default: return -ENOPROTOOPT; } diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index a413b1bf446..25ad956a39d 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -303,7 +303,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, if (err) goto done; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (flags & MSG_TRUNC) copied = skb->len; diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 30e74eee07d..769c8d138fc 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -191,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (err) goto done; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (flags & MSG_TRUNC) copied = skb->len; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 757c9171e7c..f18172b0761 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -682,7 +682,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (err) goto done; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 194bcdc6d9f..71e5353b30c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -955,7 +955,7 @@ try_again: UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 4f24570b086..d8375bc7f2d 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -497,7 +497,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, sin6->sin6_scope_id = IP6CB(skb)->iif; } - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index ff778c172ef..1f8e2afa449 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -252,7 +252,7 @@ try_again: UDP_MIB_INDATAGRAMS, is_udplite); } - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (msg->msg_name) { diff --git a/net/key/af_key.c b/net/key/af_key.c index c078ae6e975..472f6594184 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3606,7 +3606,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, if (err) goto out_free; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index f87ed4803c1..bf3a2954cd4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -627,15 +627,14 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, spin_lock(&sk->sk_receive_queue.lock); po->stats.tp_packets++; + skb->dropcount = atomic_read(&sk->sk_drops); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock(&sk->sk_receive_queue.lock); sk->sk_data_ready(sk, skb->len); return 0; drop_n_acct: - spin_lock(&sk->sk_receive_queue.lock); - po->stats.tp_drops++; - spin_unlock(&sk->sk_receive_queue.lock); + po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); drop_n_restore: if (skb_head != skb->data && skb_shared(skb)) { @@ -1478,7 +1477,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, if (err) goto out_free; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index a39bf97f883..60c2b94e6b5 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -146,7 +146,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, memcpy(msg->msg_name, &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); - sock_recv_timestamp(msg, &rx->sk, skb); + sock_recv_ts_and_drops(msg, &rx->sk, skb); } /* receive the message */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c8d05758661..0970e92c6ac 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1958,7 +1958,7 @@ SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, if (err) goto out_free; - sock_recv_timestamp(msg, sk, skb); + sock_recv_ts_and_drops(msg, sk, skb); if (sctp_ulpevent_is_notification(event)) { msg->msg_flags |= MSG_NOTIFICATION; sp->pf->event_msgname(event, msg->msg_name, addr_len); diff --git a/net/socket.c b/net/socket.c index 954f3381cc8..80793569384 100644 --- a/net/socket.c +++ b/net/socket.c @@ -668,6 +668,21 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, EXPORT_SYMBOL_GPL(__sock_recv_timestamp); +inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) +{ + if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount) + put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL, + sizeof(__u32), &skb->dropcount); +} + +void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + sock_recv_timestamp(msg, sk, skb); + sock_recv_drops(msg, sk, skb); +} +EXPORT_SYMBOL_GPL(sock_recv_ts_and_drops); + static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { -- cgit v1.2.3-70-g09d2 From 1502f08edc040b6ba4b986454416564088995e79 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 12 Oct 2009 09:51:41 -0700 Subject: [IA64] SMT friendly version of spin_unlock_wait() We can be kinder to SMT systems in spin_unlock_wait. Signed-off-by: Tony Luck --- arch/ia64/include/asm/spinlock.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 4fa502739d6..239ecdc9516 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -75,6 +75,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } +static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +{ + int *p = (int *)&lock->lock, ticket; + + ia64_invala(); + + for (;;) { + asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory"); + if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK)) + return; + cpu_relax(); + } +} + static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); @@ -123,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) - cpu_relax(); + __ticket_spin_unlock_wait(lock); } #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -- cgit v1.2.3-70-g09d2 From 67ca0e5fa884f483d655ef19f22ad8509138dd3e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 14 Oct 2009 13:11:30 -0700 Subject: ia64: Fix up the syscall table for recvmmsg Reported-by: "Tony Luck" Signed-off-by: Arnaldo Carvalho de Melo Acked-by: Tony Luck Signed-off-by: David S. Miller --- arch/ia64/include/asm/unistd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 5a5347f5c4e..9c72e36c528 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -311,11 +311,12 @@ #define __NR_preadv 1319 #define __NR_pwritev 1320 #define __NR_rt_tgsigqueueinfo 1321 +#define __NR_rt_recvmmsg 1322 #ifdef __KERNEL__ -#define NR_syscalls 298 /* length of syscall table */ +#define NR_syscalls 299 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about -- cgit v1.2.3-70-g09d2 From b94b08081fcecf83fa690d6c5664f6316fe72208 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 14 Oct 2009 15:10:03 -0700 Subject: [IA64] fix percpu warnings Fix percpu types warning in ia64/sn: arch/ia64/sn/kernel/setup.c:74: error: conflicting types for '__pcpu_scope___sn_cnodeid_to_nasid' arch/ia64/include/asm/sn/arch.h:74: error: previous declaration of '__pcpu_scope___sn_cnodeid_to_nasid' was here Signed-off-by: Randy Dunlap Cc: Jes Sorensen Acked-by: Tejun Heo Signed-off-by: Tony Luck --- arch/ia64/include/asm/sn/arch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h index 7caa1f44cd9..f5f493b0c07 100644 --- a/arch/ia64/include/asm/sn/arch.h +++ b/arch/ia64/include/asm/sn/arch.h @@ -71,7 +71,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); * Compact node ID to nasid mappings kept in the per-cpu data areas of each * cpu. */ -DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +DECLARE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) -- cgit v1.2.3-70-g09d2 From e8c93fc7b7221b6ac7e7ddbd0e21e205bf9e801a Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Mon, 2 Nov 2009 09:23:08 -0800 Subject: Revert "[IA64] fix percpu warnings" This reverts commit b94b08081fcecf83fa690d6c5664f6316fe72208. genksyms currently cannot handle complicated types for exported percpu variables. Drop this patch for now as it prevents a module from being loaded on sn2 systems: xpc: no symbol version for per_cpu____sn_cnodeid_to_nasid xpc: Unknown symbol per_cpu____sn_cnodeid_to_nasid Signed-off-by: Tony Luck --- arch/ia64/include/asm/sn/arch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h index f5f493b0c07..7caa1f44cd9 100644 --- a/arch/ia64/include/asm/sn/arch.h +++ b/arch/ia64/include/asm/sn/arch.h @@ -71,7 +71,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); * Compact node ID to nasid mappings kept in the per-cpu data areas of each * cpu. */ -DECLARE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); #define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) -- cgit v1.2.3-70-g09d2 From 5b5d94487d934be6b0aa966c9acbdf15b07ef627 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 6 Nov 2009 11:11:43 +0900 Subject: ia64/xen: compilation fix This patch fixes the following compilation error introduced by a PCI related features. The change set of 5dd1af9f84c79bedd589db89e71ca733f3bf0ebd moves some xen related definitions from the arch header file (x86/include/asm/xen/hypervisor.h) to the common header file (include/xen/xen.h). So ia64/xen also follows it. In file included from linux-next/include/xen/grant_table.h:41, from linux-next/drivers/block/xen-blkfront.c:48: linux-next/arch/ia64/include/asm/xen/hypervisor.h:43: error: nested redefinition of 'enum xen_domain_type' linux-next/arch/ia64/include/asm/xen/hypervisor.h:43: error: redeclaration of 'enum xen_domain_type' linux-next/arch/ia64/include/asm/xen/hypervisor.h:44: error: redeclaration of enumerator 'XEN_NATIVE' linux-next/include/xen/xen.h:5: error: previous definition of 'XEN_NATIVE' was here linux-next/arch/ia64/include/asm/xen/hypervisor.h:45: error: redeclaration of enumerator 'XEN_PV_DOMAIN' linux-next/include/xen/xen.h:6: error: previous definition of 'XEN_PV_DOMAIN' was here linux-next/arch/ia64/include/asm/xen/hypervisor.h:46: error: redeclaration of enumerator 'XEN_HVM_DOMAIN' linux-next/include/xen/xen.h:7: error: previous definition of 'XEN_HVM_DOMAIN' was here Signed-off-by: Isaku Yamahata Signed-off-by: Jesse Barnes --- arch/ia64/include/asm/xen/hypervisor.h | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h index 88afb54501e..67455c2ed2b 100644 --- a/arch/ia64/include/asm/xen/hypervisor.h +++ b/arch/ia64/include/asm/xen/hypervisor.h @@ -37,35 +37,9 @@ #include #include /* to compile feature.c */ #include /* to comiple xen-netfront.c */ +#include #include -/* xen_domain_type is set before executing any C code by early_xen_setup */ -enum xen_domain_type { - XEN_NATIVE, /* running on bare hardware */ - XEN_PV_DOMAIN, /* running in a PV domain */ - XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/ -}; - -#ifdef CONFIG_XEN -extern enum xen_domain_type xen_domain_type; -#else -#define xen_domain_type XEN_NATIVE -#endif - -#define xen_domain() (xen_domain_type != XEN_NATIVE) -#define xen_pv_domain() (xen_domain() && \ - xen_domain_type == XEN_PV_DOMAIN) -#define xen_hvm_domain() (xen_domain() && \ - xen_domain_type == XEN_HVM_DOMAIN) - -#ifdef CONFIG_XEN_DOM0 -#define xen_initial_domain() (xen_pv_domain() && \ - (xen_start_info->flags & SIF_INITDOMAIN)) -#else -#define xen_initial_domain() (0) -#endif - - #ifdef CONFIG_XEN extern struct shared_info *HYPERVISOR_shared_info; extern struct start_info *xen_start_info; -- cgit v1.2.3-70-g09d2 From 6959450e567c1f17d3ce8489099fc56c3721d577 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Sat, 14 Nov 2009 20:46:38 +0900 Subject: swiotlb: Remove duplicate swiotlb_force extern declarations Signed-off-by: FUJITA Tomonori Cc: tony.luck@intel.com LKML-Reference: <1258199198-16657-4-git-send-email-fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar --- arch/ia64/include/asm/swiotlb.h | 2 -- arch/x86/include/asm/swiotlb.h | 4 ---- include/linux/swiotlb.h | 2 ++ 3 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h index dcbaea7ce12..f0acde68aae 100644 --- a/arch/ia64/include/asm/swiotlb.h +++ b/arch/ia64/include/asm/swiotlb.h @@ -4,8 +4,6 @@ #include #include -extern int swiotlb_force; - #ifdef CONFIG_SWIOTLB extern int swiotlb; extern void pci_swiotlb_init(void); diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 940f13a213f..87ffcb12a1b 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -3,10 +3,6 @@ #include -/* SWIOTLB interface */ - -extern int swiotlb_force; - #ifdef CONFIG_SWIOTLB extern int swiotlb; extern int pci_swiotlb_init(void); diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index eb9bdb4d485..febedcf67c7 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -7,6 +7,8 @@ struct device; struct dma_attrs; struct scatterlist; +extern int swiotlb_force; + /* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? -- cgit v1.2.3-70-g09d2 From 2d4dc890b5c8fabd818a8586607e6843c4375e62 Mon Sep 17 00:00:00 2001 From: Ilya Loginov Date: Thu, 26 Nov 2009 09:16:19 +0100 Subject: block: add helpers to run flush_dcache_page() against a bio and a request's pages Mtdblock driver doesn't call flush_dcache_page for pages in request. So, this causes problems on architectures where the icache doesn't fill from the dcache or with dcache aliases. The patch fixes this. The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid pointless empty cache-thrashing loops on architectures for which flush_dcache_page() is a no-op. Every architecture was provided with this flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is equal 1 or do nothing otherwise. See "fix mtd_blkdevs problem with caches on some architectures" discussion on LKML for more information. Signed-off-by: Ilya Loginov Cc: Ingo Molnar Cc: David Woodhouse Cc: Peter Horton Cc: "Ed L. Cashin" Signed-off-by: Jens Axboe --- arch/alpha/include/asm/cacheflush.h | 1 + arch/arm/include/asm/cacheflush.h | 1 + arch/avr32/include/asm/cacheflush.h | 1 + arch/blackfin/include/asm/cacheflush.h | 2 ++ arch/cris/include/asm/cacheflush.h | 1 + arch/frv/include/asm/cacheflush.h | 1 + arch/h8300/include/asm/cacheflush.h | 1 + arch/ia64/include/asm/cacheflush.h | 1 + arch/m32r/include/asm/cacheflush.h | 3 +++ arch/m68k/include/asm/cacheflush_mm.h | 1 + arch/m68k/include/asm/cacheflush_no.h | 1 + arch/microblaze/include/asm/cacheflush.h | 1 + arch/mips/include/asm/cacheflush.h | 1 + arch/mn10300/include/asm/cacheflush.h | 1 + arch/parisc/include/asm/cacheflush.h | 1 + arch/powerpc/include/asm/cacheflush.h | 1 + arch/s390/include/asm/cacheflush.h | 1 + arch/score/include/asm/cacheflush.h | 1 + arch/sh/include/asm/cacheflush.h | 1 + arch/sparc/include/asm/cacheflush_32.h | 1 + arch/sparc/include/asm/cacheflush_64.h | 1 + arch/x86/include/asm/cacheflush.h | 1 + arch/xtensa/include/asm/cacheflush.h | 1 + block/blk-core.c | 19 +++++++++++++++++++ drivers/mtd/mtd_blkdevs.c | 2 ++ fs/bio.c | 12 ++++++++++++ include/asm-generic/cacheflush.h | 1 + include/linux/bio.h | 12 ++++++++++++ include/linux/blkdev.h | 11 +++++++++++ 29 files changed, 83 insertions(+) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/cacheflush.h b/arch/alpha/include/asm/cacheflush.h index b686cc7fc44..01d71e1c8a9 100644 --- a/arch/alpha/include/asm/cacheflush.h +++ b/arch/alpha/include/asm/cacheflush.h @@ -9,6 +9,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index fd03fb63a33..247b7b0adc2 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -408,6 +408,7 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); extern void __flush_dcache_page(struct address_space *mapping, struct page *page); diff --git a/arch/avr32/include/asm/cacheflush.h b/arch/avr32/include/asm/cacheflush.h index 670674749b2..96e53820bbb 100644 --- a/arch/avr32/include/asm/cacheflush.h +++ b/arch/avr32/include/asm/cacheflush.h @@ -107,6 +107,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); * do something here, but only for certain configurations. No such * configurations exist at this time. */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(page) do { } while (0) #define flush_dcache_mmap_unlock(page) do { } while (0) diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h index af03a36c7a4..417eaac7fe9 100644 --- a/arch/blackfin/include/asm/cacheflush.h +++ b/arch/blackfin/include/asm/cacheflush.h @@ -68,9 +68,11 @@ do { memcpy(dst, src, len); \ #endif #if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK) # define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end)) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 # define flush_dcache_page(page) blackfin_dflush_page(page_address(page)) #else # define flush_dcache_range(start,end) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 # define flush_dcache_page(page) do { } while (0) #endif diff --git a/arch/cris/include/asm/cacheflush.h b/arch/cris/include/asm/cacheflush.h index cf60e3f69f8..36795bca605 100644 --- a/arch/cris/include/asm/cacheflush.h +++ b/arch/cris/include/asm/cacheflush.h @@ -12,6 +12,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/frv/include/asm/cacheflush.h b/arch/frv/include/asm/cacheflush.h index 432a69e7f3d..edbac54ae01 100644 --- a/arch/frv/include/asm/cacheflush.h +++ b/arch/frv/include/asm/cacheflush.h @@ -47,6 +47,7 @@ static inline void __flush_cache_all(void) } /* dcache/icache coherency... */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #ifdef CONFIG_MMU extern void flush_dcache_page(struct page *page); #else diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h index 5ffdca217b9..4cf2df20c1c 100644 --- a/arch/h8300/include/asm/cacheflush.h +++ b/arch/h8300/include/asm/cacheflush.h @@ -15,6 +15,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma,a,b) #define flush_cache_page(vma,p,pfn) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) #define flush_dcache_mmap_lock(mapping) #define flush_dcache_mmap_unlock(mapping) diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h index c8ce2719fee..429eefc93ee 100644 --- a/arch/ia64/include/asm/cacheflush.h +++ b/arch/ia64/include/asm/cacheflush.h @@ -25,6 +25,7 @@ #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) \ do { \ clear_bit(PG_arch_1, &(page)->flags); \ diff --git a/arch/m32r/include/asm/cacheflush.h b/arch/m32r/include/asm/cacheflush.h index 78587c95814..8e8e04516c3 100644 --- a/arch/m32r/include/asm/cacheflush.h +++ b/arch/m32r/include/asm/cacheflush.h @@ -12,6 +12,7 @@ extern void _flush_cache_copyback_all(void); #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) @@ -33,6 +34,7 @@ extern void smp_flush_cache_all(void); #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) @@ -46,6 +48,7 @@ extern void smp_flush_cache_all(void); #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h index 16bf375fdbe..73de7c89d8e 100644 --- a/arch/m68k/include/asm/cacheflush_mm.h +++ b/arch/m68k/include/asm/cacheflush_mm.h @@ -128,6 +128,7 @@ static inline void __flush_page_to_ram(void *vaddr) } } +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h index c65f00a9455..89f195656be 100644 --- a/arch/m68k/include/asm/cacheflush_no.h +++ b/arch/m68k/include/asm/cacheflush_no.h @@ -12,6 +12,7 @@ #define flush_cache_range(vma, start, end) __flush_cache_all() #define flush_cache_page(vma, vmaddr) do { } while (0) #define flush_dcache_range(start,len) __flush_cache_all() +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index f989d6aad64..088076e657b 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h @@ -37,6 +37,7 @@ #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) #define flush_dcache_range(start, end) __invalidate_dcache_range(start, end) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 03b1d69b142..40bb9fde205 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -38,6 +38,7 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma, extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn); extern void __flush_dcache_page(struct page *page); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 static inline void flush_dcache_page(struct page *page) { if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h index 1a55d61f0d0..29e692f7f03 100644 --- a/arch/mn10300/include/asm/cacheflush.h +++ b/arch/mn10300/include/asm/cacheflush.h @@ -26,6 +26,7 @@ #define flush_cache_page(vma, vmaddr, pfn) do {} while (0) #define flush_cache_vmap(start, end) do {} while (0) #define flush_cache_vunmap(start, end) do {} while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do {} while (0) #define flush_dcache_mmap_lock(mapping) do {} while (0) #define flush_dcache_mmap_unlock(mapping) do {} while (0) diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 724395143f2..7a73b615c23 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -42,6 +42,7 @@ void flush_cache_mm(struct mm_struct *mm); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) \ diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index ba667a383b8..ab9e402518e 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -25,6 +25,7 @@ #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h index 49d5af916d0..405cc97c624 100644 --- a/arch/s390/include/asm/cacheflush.h +++ b/arch/s390/include/asm/cacheflush.h @@ -10,6 +10,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/score/include/asm/cacheflush.h b/arch/score/include/asm/cacheflush.h index 07cc8fc457c..caaba24036e 100644 --- a/arch/score/include/asm/cacheflush.h +++ b/arch/score/include/asm/cacheflush.h @@ -16,6 +16,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_dcache_range(unsigned long start, unsigned long end); #define flush_cache_dup_mm(mm) do {} while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do {} while (0) #define flush_dcache_mmap_lock(mapping) do {} while (0) #define flush_dcache_mmap_unlock(mapping) do {} while (0) diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index c29918f3c81..dda96eb3e7c 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_page(struct vm_area_struct *vma, diff --git a/arch/sparc/include/asm/cacheflush_32.h b/arch/sparc/include/asm/cacheflush_32.h index 68ac1091027..2e468773f25 100644 --- a/arch/sparc/include/asm/cacheflush_32.h +++ b/arch/sparc/include/asm/cacheflush_32.h @@ -75,6 +75,7 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long) extern void sparc_flush_page_to_ram(struct page *page); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define flush_dcache_page(page) sparc_flush_page_to_ram(page) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index c43321729b3..b95384033e8 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -37,6 +37,7 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page); #endif extern void __flush_dcache_range(unsigned long start, unsigned long end); +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *page); #define flush_icache_page(vma, pg) do { } while(0) diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index b54f6afe7ec..9076add593a 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h @@ -12,6 +12,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { } +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 static inline void flush_dcache_page(struct page *page) { } static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index b7b8fbe47c7..a508f2f73bd 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -101,6 +101,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, #define flush_cache_vmap(start,end) flush_cache_all() #define flush_cache_vunmap(start,end) flush_cache_all() +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page*); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); diff --git a/block/blk-core.c b/block/blk-core.c index 71da5111120..718897e6d37 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2358,6 +2358,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->rq_disk = bio->bi_bdev->bd_disk; } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +/** + * rq_flush_dcache_pages - Helper function to flush all pages in a request + * @rq: the request to be flushed + * + * Description: + * Flush all pages in @rq. + */ +void rq_flush_dcache_pages(struct request *rq) +{ + struct req_iterator iter; + struct bio_vec *bvec; + + rq_for_each_segment(bvec, rq, iter) + flush_dcache_page(bvec->bv_page); +} +EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); +#endif + /** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 8ca17a3e96e..64e2b379a35 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -59,12 +59,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->readsect(dev, block, buf)) return -EIO; + rq_flush_dcache_pages(req); return 0; case WRITE: if (!tr->writesect) return -EIO; + rq_flush_dcache_pages(req); for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->writesect(dev, block, buf)) return -EIO; diff --git a/fs/bio.c b/fs/bio.c index 12da5db8682..e23a63f4f7d 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1393,6 +1393,18 @@ void bio_check_pages_dirty(struct bio *bio) } } +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +void bio_flush_dcache_pages(struct bio *bi) +{ + int i; + struct bio_vec *bvec; + + bio_for_each_segment(bvec, bi, i) + flush_dcache_page(bvec->bv_page); +} +EXPORT_SYMBOL(bio_flush_dcache_pages); +#endif + /** * bio_endio - end I/O on a bio * @bio: bio diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index ba4ec39a113..57b5c3c82e8 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -13,6 +13,7 @@ #define flush_cache_dup_mm(mm) do { } while (0) #define flush_cache_range(vma, start, end) do { } while (0) #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) diff --git a/include/linux/bio.h b/include/linux/bio.h index 474792b825d..7fc5606e6ea 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -391,6 +391,18 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, gfp_t, int); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void bio_flush_dcache_pages(struct bio *bi); +#else +static inline void bio_flush_dcache_pages(struct bio *bi) +{ +} +#endif + extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, unsigned long, unsigned int, int, gfp_t); extern struct bio *bio_copy_user_iov(struct request_queue *, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1cc02972fbe..e727f6c44c4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -752,6 +752,17 @@ struct req_iterator { #define rq_iter_last(rq, _iter) \ (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" +#endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +extern void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif + extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); extern void register_disk(struct gendisk *dev); -- cgit v1.2.3-70-g09d2 From 3e71f88bc90792a187703860cf22fbed7c12cbd9 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:21 +0300 Subject: KVM: Maintain back mapping from irqchip/pin to gsi Maintain back mapping from irqchip/pin to gsi to speedup interrupt acknowledgment notifications. [avi: build fix on non-x86/ia64] Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm.h | 1 + arch/x86/include/asm/kvm.h | 1 + include/linux/kvm_host.h | 9 +++++++++ virt/kvm/irq_comm.c | 31 ++++++++++++++----------------- 4 files changed, 25 insertions(+), 17 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index 18a7e49abbc..bc90c75adf6 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h @@ -60,6 +60,7 @@ struct kvm_ioapic_state { #define KVM_IRQCHIP_PIC_MASTER 0 #define KVM_IRQCHIP_PIC_SLAVE 1 #define KVM_IRQCHIP_IOAPIC 2 +#define KVM_NR_IRQCHIPS 3 #define KVM_CONTEXT_SIZE 8*1024 diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 4a5fe914dc5..f02e87a5206 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -79,6 +79,7 @@ struct kvm_ioapic_state { #define KVM_IRQCHIP_PIC_MASTER 0 #define KVM_IRQCHIP_PIC_SLAVE 1 #define KVM_IRQCHIP_IOAPIC 2 +#define KVM_NR_IRQCHIPS 3 /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f403e66557f..cc2d7493598 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -131,7 +131,10 @@ struct kvm_kernel_irq_routing_entry { struct hlist_node link; }; +#ifdef __KVM_HAVE_IOAPIC + struct kvm_irq_routing_table { + int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS]; struct kvm_kernel_irq_routing_entry *rt_entries; u32 nr_rt_entries; /* @@ -141,6 +144,12 @@ struct kvm_irq_routing_table { struct hlist_head map[0]; }; +#else + +struct kvm_irq_routing_table {}; + +#endif + struct kvm { spinlock_t mmu_lock; spinlock_t requests_lock; diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 81950f6f6fd..59cf8dae006 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -175,25 +175,16 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_ack_notifier *kian; struct hlist_node *n; - unsigned gsi = pin; - int i; + int gsi; trace_kvm_ack_irq(irqchip, pin); - for (i = 0; i < kvm->irq_routing->nr_rt_entries; i++) { - struct kvm_kernel_irq_routing_entry *e; - e = &kvm->irq_routing->rt_entries[i]; - if (e->type == KVM_IRQ_ROUTING_IRQCHIP && - e->irqchip.irqchip == irqchip && - e->irqchip.pin == pin) { - gsi = e->gsi; - break; - } - } - - hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, link) - if (kian->gsi == gsi) - kian->irq_acked(kian); + gsi = kvm->irq_routing->chip[irqchip][pin]; + if (gsi != -1) + hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, + link) + if (kian->gsi == gsi) + kian->irq_acked(kian); } void kvm_register_irq_ack_notifier(struct kvm *kvm, @@ -332,6 +323,9 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt, } e->irqchip.irqchip = ue->u.irqchip.irqchip; e->irqchip.pin = ue->u.irqchip.pin + delta; + if (e->irqchip.pin >= KVM_IOAPIC_NUM_PINS) + goto out; + rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi; break; case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; @@ -356,7 +350,7 @@ int kvm_set_irq_routing(struct kvm *kvm, unsigned flags) { struct kvm_irq_routing_table *new, *old; - u32 i, nr_rt_entries = 0; + u32 i, j, nr_rt_entries = 0; int r; for (i = 0; i < nr; ++i) { @@ -377,6 +371,9 @@ int kvm_set_irq_routing(struct kvm *kvm, new->rt_entries = (void *)&new->map[nr_rt_entries]; new->nr_rt_entries = nr_rt_entries; + for (i = 0; i < 3; i++) + for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++) + new->chip[i][j] = -1; for (i = 0; i < nr; ++i) { r = -EINVAL; -- cgit v1.2.3-70-g09d2 From 136bdfeee7b5bc986fc94af3a40d7d13ea37bb95 Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Mon, 24 Aug 2009 11:54:23 +0300 Subject: KVM: Move irq ack notifier list to arch independent code Mask irq notifier list is already there. Signed-off-by: Gleb Natapov Signed-off-by: Avi Kivity --- arch/ia64/include/asm/kvm_host.h | 1 - arch/x86/include/asm/kvm_host.h | 1 - include/linux/kvm_host.h | 1 + virt/kvm/irq_comm.c | 5 ++--- virt/kvm/kvm_main.c | 1 + 5 files changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index d9b6325a932..a362e67e0ca 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h @@ -475,7 +475,6 @@ struct kvm_arch { struct list_head assigned_dev_head; struct iommu_domain *iommu_domain; int iommu_flags; - struct hlist_head irq_ack_notifier_list; unsigned long irq_sources_bitmap; unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 35d3236c9de..a46e2dd9aca 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -397,7 +397,6 @@ struct kvm_arch{ struct kvm_pic *vpic; struct kvm_ioapic *vioapic; struct kvm_pit *vpit; - struct hlist_head irq_ack_notifier_list; int vapics_in_nmi_mode; unsigned int tss_addr; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cc2d7493598..4aa5e1d9a79 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -187,6 +187,7 @@ struct kvm { #ifdef CONFIG_HAVE_KVM_IRQCHIP struct kvm_irq_routing_table *irq_routing; struct hlist_head mask_notifier_list; + struct hlist_head irq_ack_notifier_list; #endif #ifdef KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index fb861dd956f..f0197259593 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c @@ -186,8 +186,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) rcu_read_unlock(); if (gsi != -1) - hlist_for_each_entry(kian, n, &kvm->arch.irq_ack_notifier_list, - link) + hlist_for_each_entry(kian, n, &kvm->irq_ack_notifier_list, link) if (kian->gsi == gsi) kian->irq_acked(kian); } @@ -196,7 +195,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, struct kvm_irq_ack_notifier *kian) { mutex_lock(&kvm->irq_lock); - hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); + hlist_add_head(&kian->link, &kvm->irq_ack_notifier_list); mutex_unlock(&kvm->irq_lock); } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3bee9489277..6eca153e1a0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -958,6 +958,7 @@ static struct kvm *kvm_create_vm(void) goto out; #ifdef CONFIG_HAVE_KVM_IRQCHIP INIT_HLIST_HEAD(&kvm->mask_notifier_list); + INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); #endif #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET -- cgit v1.2.3-70-g09d2 From af901ca181d92aac3a7dc265144a9081a86d8f39 Mon Sep 17 00:00:00 2001 From: André Goddard Rosa Date: Sat, 14 Nov 2009 13:09:05 -0200 Subject: tree-wide: fix assorted typos all over the place MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That is "success", "unknown", "through", "performance", "[re|un]mapping" , "access", "default", "reasonable", "[con]currently", "temperature" , "channel", "[un]used", "application", "example","hierarchy", "therefore" , "[over|under]flow", "contiguous", "threshold", "enough" and others. Signed-off-by: André Goddard Rosa Signed-off-by: Jiri Kosina --- Documentation/ABI/testing/procfs-diskstats | 2 +- Documentation/ABI/testing/sysfs-block | 2 +- Documentation/DocBook/mtdnand.tmpl | 2 +- Documentation/DocBook/v4l/videodev2.h.xml | 2 +- Documentation/DocBook/writing-an-alsa-driver.tmpl | 2 +- Documentation/dvb/README.dvb-usb | 2 +- Documentation/lguest/lguest.c | 2 +- Documentation/scsi/ChangeLog.megaraid_sas | 2 +- Documentation/spi/spi-summary | 2 +- Documentation/sysctl/vm.txt | 2 +- Documentation/video4linux/gspca.txt | 2 +- Documentation/vm/page-types.c | 2 +- arch/alpha/mm/numa.c | 2 +- arch/arm/common/scoop.c | 2 +- .../mach-bcmring/include/mach/csp/dmacHw_priv.h | 2 +- arch/arm/mach-bcmring/include/mach/dma.h | 2 +- arch/arm/mach-lh7a40x/include/mach/hardware.h | 2 +- arch/arm/mach-orion5x/pci.c | 2 +- arch/arm/mach-pxa/include/mach/palmld.h | 2 +- arch/arm/mach-pxa/include/mach/palmt5.h | 2 +- arch/arm/mach-pxa/include/mach/palmtc.h | 2 +- arch/arm/mach-pxa/include/mach/palmte2.h | 2 +- arch/arm/mach-pxa/include/mach/palmtx.h | 2 +- arch/arm/mach-pxa/include/mach/palmz72.h | 2 +- arch/arm/mach-s3c6400/setup-sdhci.c | 2 +- arch/arm/mach-s3c6410/setup-sdhci.c | 2 +- arch/arm/mach-sa1100/dma.c | 2 +- arch/arm/plat-mxc/include/mach/iomux-mx3.h | 2 +- arch/arm/plat-mxc/include/mach/iomux-mxc91231.h | 2 +- arch/arm/plat-mxc/pwm.c | 2 +- arch/arm/plat-omap/dma.c | 2 +- arch/arm/plat-omap/include/mach/omap16xx.h | 2 +- arch/arm/plat-s3c24xx/include/plat/map.h | 2 +- arch/avr32/boards/hammerhead/Kconfig | 2 +- arch/blackfin/kernel/traps.c | 2 +- .../mach-bf518/include/mach/defBF51x_base.h | 4 +- .../mach-bf527/include/mach/defBF52x_base.h | 4 +- arch/blackfin/mach-bf537/include/mach/defBF534.h | 4 +- arch/blackfin/mach-bf548/include/mach/defBF544.h | 4 +- arch/blackfin/mach-bf548/include/mach/defBF547.h | 4 +- arch/blackfin/mach-bf548/include/mach/defBF548.h | 4 +- arch/blackfin/mach-bf548/include/mach/defBF549.h | 4 +- arch/cris/mm/fault.c | 2 +- arch/ia64/hp/common/sba_iommu.c | 2 +- arch/ia64/ia32/ia32_entry.S | 2 +- arch/ia64/include/asm/perfmon_default_smpl.h | 2 +- arch/ia64/include/asm/sn/shubio.h | 2 +- arch/ia64/kernel/esi.c | 2 +- arch/ia64/kernel/perfmon.c | 2 +- arch/m68k/ifpsp060/src/fpsp.S | 28 +++++----- arch/m68k/ifpsp060/src/pfpsp.S | 26 ++++----- arch/m68k/include/asm/bootinfo.h | 2 +- arch/microblaze/lib/memcpy.c | 2 +- arch/microblaze/lib/memmove.c | 2 +- arch/microblaze/lib/memset.c | 2 +- arch/mips/include/asm/mach-pnx833x/gpio.h | 2 +- arch/mips/include/asm/sgi/ioc.h | 2 +- arch/mips/include/asm/sibyte/sb1250_mac.h | 2 +- arch/mips/include/asm/sn/sn0/hubio.h | 2 +- arch/mips/kernel/smtc.c | 2 +- arch/mips/math-emu/dp_sub.c | 2 +- arch/mips/txx9/generic/smsc_fdc37m81x.c | 2 +- arch/powerpc/include/asm/reg_fsl_emb.h | 2 +- arch/powerpc/kernel/kgdb.c | 2 +- arch/powerpc/kernel/tau_6xx.c | 2 +- arch/powerpc/oprofile/op_model_cell.c | 4 +- arch/powerpc/platforms/52xx/mpc52xx_pci.c | 2 +- arch/powerpc/platforms/powermac/pci.c | 2 +- arch/powerpc/sysdev/dart_iommu.c | 2 +- arch/s390/math-emu/math.c | 4 +- arch/x86/include/asm/desc_defs.h | 4 +- arch/x86/include/asm/mmzone_32.h | 2 +- arch/x86/include/asm/uv/uv_bau.h | 2 +- arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/amd_iommu.c | 4 +- arch/x86/kernel/cpu/perf_event.c | 2 +- arch/x86/kernel/kprobes.c | 4 +- arch/x86/mm/kmmio.c | 4 +- block/blk-iopoll.c | 2 +- drivers/ata/ata_piix.c | 2 +- drivers/ata/sata_fsl.c | 6 +-- drivers/atm/iphase.c | 2 +- drivers/base/dd.c | 2 +- drivers/bluetooth/btmrvl_sdio.c | 2 +- drivers/bluetooth/hci_ldisc.c | 2 +- drivers/char/mem.c | 2 +- drivers/char/mspec.c | 2 +- drivers/char/n_r3964.c | 2 +- drivers/char/rio/route.h | 2 +- drivers/crypto/hifn_795x.c | 2 +- drivers/dma/at_hdmac.c | 2 +- drivers/firewire/core-topology.c | 2 +- drivers/gpu/drm/drm_crtc.c | 4 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_fb.c | 2 +- drivers/gpu/drm/i915/intel_sdvo.c | 2 +- drivers/gpu/drm/radeon/r600.c | 4 +- drivers/gpu/drm/radeon/radeon_fb.c | 2 +- drivers/gpu/drm/radeon/radeon_state.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/radeon/rv770.c | 4 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 2 +- drivers/hwmon/adm1029.c | 2 +- drivers/hwmon/lm93.c | 2 +- drivers/ieee1394/dv1394.c | 2 +- drivers/infiniband/hw/ipath/ipath_iba6110.c | 2 +- drivers/infiniband/hw/ipath/ipath_sd7220.c | 4 +- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/input/serio/hp_sdc.c | 2 +- drivers/input/serio/hp_sdc_mlc.c | 2 +- drivers/input/touchscreen/atmel-wm97xx.c | 2 +- drivers/input/touchscreen/mainstone-wm97xx.c | 4 +- drivers/input/touchscreen/zylonite-wm97xx.c | 2 +- drivers/isdn/capi/capidrv.c | 2 +- drivers/isdn/hardware/eicon/di.c | 2 +- drivers/isdn/hardware/eicon/maintidi.c | 4 +- drivers/isdn/hardware/mISDN/hfcsusb.c | 2 +- drivers/isdn/hardware/mISDN/hfcsusb.h | 2 +- drivers/isdn/hardware/mISDN/mISDNisar.c | 2 +- drivers/isdn/hisax/hfc_usb.c | 2 +- drivers/isdn/i4l/isdn_ppp.c | 6 +-- drivers/isdn/i4l/isdn_ttyfax.c | 2 +- drivers/isdn/mISDN/dsp_core.c | 2 +- drivers/isdn/mISDN/tei.c | 2 +- drivers/macintosh/therm_windtunnel.c | 2 +- drivers/media/common/saa7146_i2c.c | 2 +- drivers/media/dvb/dvb-core/dvb_frontend.h | 2 +- drivers/media/dvb/dvb-usb/anysee.c | 2 +- drivers/media/dvb/dvb-usb/dibusb-mb.c | 2 +- drivers/media/dvb/dvb-usb/dvb-usb-remote.c | 2 +- drivers/media/dvb/dvb-usb/usb-urb.c | 4 +- drivers/media/dvb/frontends/au8522_decoder.c | 2 +- drivers/media/dvb/frontends/cx24110.c | 4 +- drivers/media/dvb/frontends/cx24113.c | 2 +- drivers/media/dvb/frontends/dib3000mb.c | 2 +- drivers/media/dvb/frontends/lgdt330x.c | 4 +- drivers/media/dvb/frontends/stb0899_drv.c | 2 +- drivers/media/dvb/ttpci/av7110.c | 4 +- drivers/media/dvb/ttpci/budget-patch.c | 2 +- drivers/media/radio/radio-mr800.c | 2 +- drivers/media/video/cx231xx/cx231xx-avcore.c | 8 +-- drivers/media/video/cx23885/cx23885-dvb.c | 2 +- drivers/media/video/cx88/cx88-core.c | 2 +- drivers/media/video/davinci/dm355_ccdc.c | 2 +- drivers/media/video/davinci/vpss.c | 2 +- drivers/media/video/gspca/sonixb.c | 2 +- drivers/media/video/gspca/spca500.c | 2 +- drivers/media/video/gspca/spca501.c | 6 +-- drivers/media/video/gspca/sunplus.c | 2 +- drivers/media/video/gspca/zc3xx.c | 2 +- drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h | 2 +- drivers/media/video/s2255drv.c | 2 +- drivers/media/video/zoran/zoran.h | 2 +- drivers/message/i2o/i2o_block.c | 2 +- drivers/message/i2o/iop.c | 4 +- drivers/misc/sgi-gru/grufile.c | 2 +- drivers/mmc/host/s3cmci.c | 2 +- drivers/mtd/devices/slram.c | 2 +- drivers/mtd/nand/diskonchip.c | 2 +- drivers/mtd/nand/nand_ecc.c | 2 +- drivers/mtd/nand/s3c2410.c | 2 +- drivers/net/82596.c | 2 +- drivers/net/amd8111e.c | 7 ++- drivers/net/appletalk/cops.c | 2 +- drivers/net/ariadne.h | 2 +- drivers/net/atl1c/atl1c_main.c | 2 +- drivers/net/benet/be_cmds.h | 2 +- drivers/net/benet/be_main.c | 2 +- drivers/net/bnx2x_reg.h | 2 +- drivers/net/cxgb3/sge.c | 2 +- drivers/net/ehea/ehea_ethtool.c | 4 +- drivers/net/hamradio/baycom_ser_fdx.c | 2 +- drivers/net/iseries_veth.c | 2 +- drivers/net/lasi_82596.c | 2 +- drivers/net/lib82596.c | 2 +- drivers/net/mlx4/en_rx.c | 2 +- drivers/net/mlx4/en_tx.c | 2 +- drivers/net/mlx4/mlx4_en.h | 2 +- drivers/net/ps3_gelic_net.c | 2 +- drivers/net/sis900.c | 4 +- drivers/net/skfp/h/smc.h | 8 +-- drivers/net/skfp/skfddi.c | 2 +- drivers/net/smsc911x.c | 2 +- drivers/net/smsc911x.h | 2 +- drivers/net/spider_net.c | 2 +- drivers/net/stmmac/gmac.c | 2 +- drivers/net/stmmac/gmac.h | 4 +- drivers/net/tokenring/smctr.c | 2 +- drivers/net/ucc_geth.c | 2 +- drivers/net/ucc_geth.h | 20 +++---- drivers/net/usb/smsc95xx.c | 2 +- drivers/net/wan/lmc/lmc_main.c | 2 +- drivers/net/wimax/i2400m/rx.c | 2 +- drivers/net/wireless/ath/ath5k/phy.c | 6 +-- drivers/net/wireless/ath/ath9k/rc.c | 2 +- drivers/net/wireless/ipw2x00/ipw2100.c | 6 +-- drivers/net/wireless/ipw2x00/ipw2200.c | 8 +-- drivers/net/wireless/ipw2x00/libipw_module.c | 2 +- drivers/net/wireless/iwmc3200wifi/hal.c | 2 +- drivers/net/wireless/iwmc3200wifi/rx.c | 2 +- drivers/net/wireless/libertas/if_sdio.c | 2 +- drivers/net/wireless/prism54/isl_ioctl.c | 4 +- drivers/net/wireless/rt2x00/rt2400pci.h | 2 +- drivers/net/wireless/rt2x00/rt2500pci.h | 2 +- drivers/net/wireless/rt2x00/rt2500usb.h | 2 +- drivers/net/wireless/rt2x00/rt61pci.h | 2 +- drivers/net/wireless/rt2x00/rt73usb.h | 2 +- drivers/net/wireless/wavelan_cs.c | 2 +- drivers/net/wireless/zd1211rw/zd_mac.c | 2 +- drivers/parisc/ccio-dma.c | 2 +- drivers/platform/x86/thinkpad_acpi.c | 2 +- drivers/pnp/pnpbios/rsparser.c | 8 +-- drivers/ps3/ps3-sys-manager.c | 2 +- drivers/rtc/rtc-v3020.c | 2 +- drivers/s390/char/fs3270.c | 2 +- drivers/s390/cio/chp.c | 2 +- drivers/s390/cio/cmf.c | 2 +- drivers/sbus/char/envctrl.c | 4 +- drivers/scsi/53c700.c | 2 +- drivers/scsi/aacraid/aacraid.h | 6 +-- drivers/scsi/aacraid/comminit.c | 2 +- drivers/scsi/aic7xxx/aic79xx.seq | 2 +- drivers/scsi/aic7xxx/aic79xx_core.c | 2 +- drivers/scsi/aic7xxx/aic7xxx_core.c | 2 +- drivers/scsi/bfa/include/defs/bfa_defs_pport.h | 2 +- drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h | 2 +- drivers/scsi/hptiop.c | 2 +- drivers/scsi/libfc/fc_lport.c | 2 +- drivers/scsi/libiscsi_tcp.c | 2 +- drivers/scsi/lpfc/lpfc_attr.c | 4 +- drivers/scsi/lpfc/lpfc_els.c | 4 +- drivers/scsi/lpfc/lpfc_init.c | 62 +++++++++++----------- drivers/scsi/lpfc/lpfc_sli.c | 10 ++-- drivers/scsi/megaraid.h | 2 +- drivers/scsi/megaraid/mbox_defs.h | 2 +- drivers/scsi/megaraid/megaraid_mbox.c | 2 +- drivers/scsi/mpt2sas/mpt2sas_scsih.c | 2 +- drivers/scsi/ncr53c8xx.c | 2 +- drivers/scsi/pmcraid.c | 6 +-- drivers/scsi/pmcraid.h | 6 +-- drivers/scsi/scsi_netlink.c | 2 +- drivers/scsi/scsi_transport_sas.c | 6 +-- drivers/scsi/sym53c8xx_2/sym_glue.c | 2 +- drivers/scsi/sym53c8xx_2/sym_hipd.c | 2 +- drivers/scsi/sym53c8xx_2/sym_hipd.h | 2 +- drivers/serial/8250_pnp.c | 4 +- drivers/serial/pmac_zilog.h | 2 +- drivers/serial/ucc_uart.c | 2 +- drivers/telephony/ixj.c | 4 +- drivers/usb/atm/ueagle-atm.c | 2 +- drivers/usb/class/usbtmc.c | 2 +- drivers/usb/core/message.c | 2 +- drivers/usb/gadget/f_acm.c | 2 +- drivers/usb/gadget/pxa27x_udc.c | 2 +- drivers/usb/host/fhci-sched.c | 2 +- drivers/usb/wusbcore/crypto.c | 2 +- drivers/usb/wusbcore/wa-xfer.c | 4 +- drivers/uwb/i1480/dfu/usb.c | 2 +- drivers/uwb/wlp/txrx.c | 2 +- drivers/video/aty/atyfb_base.c | 4 +- drivers/video/backlight/atmel-pwm-bl.c | 2 +- drivers/video/backlight/tosa_lcd.c | 2 +- drivers/video/console/sticore.c | 2 +- drivers/video/gbefb.c | 2 +- drivers/video/stifb.c | 4 +- drivers/video/tdfxfb.c | 2 +- drivers/video/via/dvi.c | 4 +- drivers/video/vt8623fb.c | 2 +- drivers/watchdog/coh901327_wdt.c | 2 +- drivers/watchdog/machzwd.c | 2 +- drivers/watchdog/wdrtas.c | 6 +-- fs/binfmt_elf.c | 2 +- fs/bio.c | 2 +- fs/btrfs/extent_map.c | 2 +- fs/cifs/README | 2 +- fs/cifs/cifsglob.h | 2 +- fs/cifs/inode.c | 4 +- fs/cifs/smbdes.c | 2 +- fs/dlm/plock.c | 2 +- fs/ext4/inode.c | 6 +-- fs/ext4/mballoc.c | 2 +- fs/jffs2/compr.c | 2 +- fs/jffs2/readinode.c | 2 +- fs/jffs2/xattr.c | 2 +- fs/jfs/jfs_dmap.c | 4 +- fs/ncpfs/ioctl.c | 2 +- fs/ntfs/compress.c | 2 +- fs/ntfs/file.c | 4 +- fs/ntfs/logfile.c | 2 +- fs/ocfs2/alloc.c | 2 +- fs/ocfs2/dlm/dlmmaster.c | 2 +- fs/ocfs2/dlmglue.c | 2 +- fs/ocfs2/journal.c | 2 +- fs/ocfs2/refcounttree.c | 2 +- fs/omfs/bitmap.c | 2 +- fs/ubifs/recovery.c | 2 +- fs/xfs/quota/xfs_dquot.h | 2 +- include/asm-generic/memory_model.h | 2 +- include/asm-generic/unistd.h | 2 +- include/linux/chio.h | 2 +- include/linux/mfd/ezx-pcap.h | 4 +- include/linux/pktcdvd.h | 2 +- include/linux/serial_reg.h | 8 +-- include/linux/videodev2.h | 2 +- include/net/sctp/structs.h | 2 +- include/net/tcp.h | 2 +- include/net/wimax.h | 2 +- include/sound/wm8993.h | 2 +- kernel/perf_event.c | 4 +- lib/Kconfig.debug | 2 +- lib/decompress_bunzip2.c | 2 +- lib/dma-debug.c | 2 +- lib/swiotlb.c | 2 +- mm/filemap.c | 2 +- mm/memcontrol.c | 4 +- mm/memory-failure.c | 2 +- net/ipv4/netfilter/ipt_ECN.c | 2 +- net/irda/irlap.c | 14 ++--- net/irda/irlap_event.c | 2 +- net/irda/irlmp.c | 4 +- net/mac80211/mesh_pathtbl.c | 4 +- net/netlabel/netlabel_domainhash.c | 2 +- net/sctp/sm_sideeffect.c | 2 +- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 +- net/wimax/op-reset.c | 2 +- scripts/kconfig/mconf.c | 2 +- security/selinux/netlabel.c | 2 +- security/selinux/ss/services.c | 2 +- sound/Kconfig | 2 +- sound/isa/cs423x/cs4236.c | 2 +- sound/isa/opti9xx/miro.c | 2 +- sound/isa/opti9xx/opti92x-ad1848.c | 2 +- sound/oss/dmasound/dmasound_paula.c | 2 +- sound/pci/ca0106/ca0106_proc.c | 2 +- sound/pci/cs46xx/imgs/cwcdma.asp | 9 ++-- sound/pci/emu10k1/emu10k1x.c | 2 +- sound/pci/hda/patch_cmedia.c | 2 +- sound/pci/hda/patch_realtek.c | 2 +- sound/pci/rme9652/hdspm.c | 4 +- sound/soc/codecs/uda134x.c | 4 +- sound/soc/codecs/wm8903.c | 6 +-- sound/soc/codecs/wm8993.c | 4 +- sound/soc/s3c24xx/s3c24xx_simtec.c | 2 +- sound/soc/s6000/s6000-pcm.c | 2 +- sound/sound_core.c | 2 +- 345 files changed, 516 insertions(+), 508 deletions(-) (limited to 'arch/ia64/include') diff --git a/Documentation/ABI/testing/procfs-diskstats b/Documentation/ABI/testing/procfs-diskstats index 99233902e09..f91a973a37f 100644 --- a/Documentation/ABI/testing/procfs-diskstats +++ b/Documentation/ABI/testing/procfs-diskstats @@ -8,7 +8,7 @@ Description: 1 - major number 2 - minor mumber 3 - device name - 4 - reads completed succesfully + 4 - reads completed successfully 5 - reads merged 6 - sectors read 7 - time spent reading (ms) diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 5f3bedaf8e3..d2f90334bb9 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -4,7 +4,7 @@ Contact: Jerome Marchand Description: The /sys/block//stat files displays the I/O statistics of disk . They contain 11 fields: - 1 - reads completed succesfully + 1 - reads completed successfully 2 - reads merged 3 - sectors read 4 - time spent reading (ms) diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl index df0d089d0fb..f508a8a27fe 100644 --- a/Documentation/DocBook/mtdnand.tmpl +++ b/Documentation/DocBook/mtdnand.tmpl @@ -362,7 +362,7 @@ module_exit(board_cleanup); Multiple chip control - The nand driver can control chip arrays. Therefor the + The nand driver can control chip arrays. Therefore the board driver must provide an own select_chip function. This function must (de)select the requested chip. The function pointer in the nand_chip structure must diff --git a/Documentation/DocBook/v4l/videodev2.h.xml b/Documentation/DocBook/v4l/videodev2.h.xml index 97002060ac4..26303e58345 100644 --- a/Documentation/DocBook/v4l/videodev2.h.xml +++ b/Documentation/DocBook/v4l/videodev2.h.xml @@ -492,7 +492,7 @@ struct v4l2_jpegcompression { * you do, leave them untouched. * Inluding less markers will make the * resulting code smaller, but there will - * be fewer aplications which can read it. + * be fewer applications which can read it. * The presence of the APP and COM marker * is influenced by APP_len and COM_len * ONLY, not by this property! */ diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl index 7a2e0e98986..0d0f7b4d4b1 100644 --- a/Documentation/DocBook/writing-an-alsa-driver.tmpl +++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl @@ -5318,7 +5318,7 @@ struct _snd_pcm_runtime { pages of the given size and map them onto the virtually contiguous memory. The virtual pointer is addressed in runtime->dma_area. The physical address (runtime->dma_addr) is set to zero, - because the buffer is physically non-contigous. + because the buffer is physically non-contiguous. The physical address table is set up in sgbuf->table. You can get the physical address at a certain offset via snd_pcm_sgbuf_get_addr(). diff --git a/Documentation/dvb/README.dvb-usb b/Documentation/dvb/README.dvb-usb index bf2a9cdfe7b..c8238e44ed6 100644 --- a/Documentation/dvb/README.dvb-usb +++ b/Documentation/dvb/README.dvb-usb @@ -85,7 +85,7 @@ http://www.linuxtv.org/wiki/index.php/DVB_USB - moved transfer control (pid filter, fifo control) from usb driver to frontend, it seems better settled there (added xfer_ops-struct) - created a common files for frontends (mc/p/mb) - 2004-09-28 - added support for a new device (Unkown, vendor ID is Hyper-Paltek) + 2004-09-28 - added support for a new device (Unknown, vendor ID is Hyper-Paltek) 2004-09-20 - added support for a new device (Compro DVB-U2000), thanks to Amaury Demol for reporting - changed usb TS transfer method (several urbs, stopping transfer diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c index 098de5bce00..42208511b5c 100644 --- a/Documentation/lguest/lguest.c +++ b/Documentation/lguest/lguest.c @@ -304,7 +304,7 @@ static void *map_zeroed_pages(unsigned int num) addr = mmap(NULL, getpagesize() * num, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0); if (addr == MAP_FAILED) - err(1, "Mmaping %u pages of /dev/zero", num); + err(1, "Mmapping %u pages of /dev/zero", num); /* * One neat mmap feature is that you can close the fd, and it diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index c851ef49779..84524e0cf9c 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas @@ -185,7 +185,7 @@ ii. FW enables WCE bit in Mode Sense cmd for drives that are configured Disks are exposed with WCE=1. User is advised to enable Write Back mode only when the controller has battery backup. At this time Synhronize cache is not supported by the FW. Driver will short-cycle - the cmd and return sucess without sending down to FW. + the cmd and return success without sending down to FW. 1 Release Date : Sun Jan. 14 11:21:32 PDT 2007 - Sumant Patro /Bo Yang diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index deab51ddc33..4884cb33845 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary @@ -538,7 +538,7 @@ SPI MESSAGE QUEUE The bulk of the driver will be managing the I/O queue fed by transfer(). That queue could be purely conceptual. For example, a driver used only -for low-frequency sensor acess might be fine using synchronous PIO. +for low-frequency sensor access might be fine using synchronous PIO. But the queue will probably be very real, using message->queue, PIO, often DMA (especially if the root filesystem is in SPI flash), and diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index a6e360d2055..fc5790d36cd 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -370,7 +370,7 @@ The default is 1 percent. mmap_min_addr This file indicates the amount of address space which a user process will -be restricted from mmaping. Since kernel null dereference bugs could +be restricted from mmapping. Since kernel null dereference bugs could accidentally operate based on the information in the first couple of pages of memory userspace processes should not be allowed to write to them. By default this value is set to 0 and no protections will be enforced by the diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt index 3f61825be49..6b29555b58b 100644 --- a/Documentation/video4linux/gspca.txt +++ b/Documentation/video4linux/gspca.txt @@ -6,7 +6,7 @@ The modules are: xxxx vend:prod ---- -spca501 0000:0000 MystFromOri Unknow Camera +spca501 0000:0000 MystFromOri Unknown Camera m5602 0402:5602 ALi Video Camera Controller spca501 040a:0002 Kodak DVC-325 spca500 040a:0300 Kodak EZ200 diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c index 3ec4f2a2258..aa7f4d0639c 100644 --- a/Documentation/vm/page-types.c +++ b/Documentation/vm/page-types.c @@ -301,7 +301,7 @@ static char *page_flag_name(uint64_t flags) present = (flags >> i) & 1; if (!page_flag_names[i]) { if (present) - fatal("unkown flag bit %d\n", i); + fatal("unknown flag bit %d\n", i); continue; } buf[j++] = present ? page_flag_names[i][0] : '_'; diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c index 10b403554b6..7b2c56d8f93 100644 --- a/arch/alpha/mm/numa.c +++ b/arch/alpha/mm/numa.c @@ -197,7 +197,7 @@ setup_memory_node(int nid, void *kernel_end) } if (bootmap_start == -1) - panic("couldn't find a contigous place for the bootmap"); + panic("couldn't find a contiguous place for the bootmap"); /* Allocate the bootmap and mark the whole MM as reserved. */ bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index 7713a08bb10..37bda5f3dde 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@ -82,7 +82,7 @@ static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset) { struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio); - /* XXX: I'm usure, but it seems so */ + /* XXX: I'm unsure, but it seems so */ return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1)); } diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h index 375066ad018..cbf334d1c76 100644 --- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h +++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h @@ -83,7 +83,7 @@ typedef struct { * @brief Get next available transaction width * * -* @return On sucess : Next avail able transaction width +* @return On success : Next available transaction width * On failure : dmacHw_TRANSACTION_WIDTH_8 * * @note diff --git a/arch/arm/mach-bcmring/include/mach/dma.h b/arch/arm/mach-bcmring/include/mach/dma.h index 847980c85c8..1f2c5319c05 100644 --- a/arch/arm/mach-bcmring/include/mach/dma.h +++ b/arch/arm/mach-bcmring/include/mach/dma.h @@ -651,7 +651,7 @@ int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about t /** * Creates a descriptor ring from a memory mapping. * -* @return 0 on sucess, error code otherwise. +* @return 0 on success, error code otherwise. */ /****************************************************************************/ diff --git a/arch/arm/mach-lh7a40x/include/mach/hardware.h b/arch/arm/mach-lh7a40x/include/mach/hardware.h index 48e827d2fa5..59d2ace3521 100644 --- a/arch/arm/mach-lh7a40x/include/mach/hardware.h +++ b/arch/arm/mach-lh7a40x/include/mach/hardware.h @@ -31,7 +31,7 @@ /* * This __REG() version gives the same results as the one above, except * that we are fooling gcc somehow so it generates far better and smaller - * assembly code for access to contigous registers. It's a shame that gcc + * assembly code for access to contiguous registers. It's a shame that gcc * doesn't guess this by itself. */ #include diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c index 36dc5413cc9..bdf96eb523b 100644 --- a/arch/arm/mach-orion5x/pci.c +++ b/arch/arm/mach-orion5x/pci.c @@ -463,7 +463,7 @@ static void __init orion5x_setup_pci_wins(struct mbus_dram_target_info *dram) writel(win_enable, PCI_BAR_ENABLE); /* - * Disable automatic update of address remaping when writing to BARs. + * Disable automatic update of address remapping when writing to BARs. */ orion5x_setbits(PCI_ADDR_DECODE_CTRL, 1); } diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h index 8721b801022..ae536e86d8e 100644 --- a/arch/arm/mach-pxa/include/mach/palmld.h +++ b/arch/arm/mach-pxa/include/mach/palmld.h @@ -91,7 +91,7 @@ /* BATTERY */ #define PALMLD_BAT_MAX_VOLTAGE 4000 /* 4.00V maximum voltage */ #define PALMLD_BAT_MIN_VOLTAGE 3550 /* 3.55V critical voltage */ -#define PALMLD_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMLD_BAT_MAX_CURRENT 0 /* unknown */ #define PALMLD_BAT_MIN_CURRENT 0 /* unknown */ #define PALMLD_BAT_MAX_CHARGE 1 /* unknown */ #define PALMLD_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h index d15662aba00..6baf7469d4e 100644 --- a/arch/arm/mach-pxa/include/mach/palmt5.h +++ b/arch/arm/mach-pxa/include/mach/palmt5.h @@ -66,7 +66,7 @@ /* BATTERY */ #define PALMT5_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */ #define PALMT5_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */ -#define PALMT5_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMT5_BAT_MAX_CURRENT 0 /* unknown */ #define PALMT5_BAT_MIN_CURRENT 0 /* unknown */ #define PALMT5_BAT_MAX_CHARGE 1 /* unknown */ #define PALMT5_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h index 3dc9b074ab4..3f9dd3fd463 100644 --- a/arch/arm/mach-pxa/include/mach/palmtc.h +++ b/arch/arm/mach-pxa/include/mach/palmtc.h @@ -68,7 +68,7 @@ /* BATTERY */ #define PALMTC_BAT_MAX_VOLTAGE 4000 /* 4.00V maximum voltage */ #define PALMTC_BAT_MIN_VOLTAGE 3550 /* 3.55V critical voltage */ -#define PALMTC_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMTC_BAT_MAX_CURRENT 0 /* unknown */ #define PALMTC_BAT_MIN_CURRENT 0 /* unknown */ #define PALMTC_BAT_MAX_CHARGE 1 /* unknown */ #define PALMTC_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-pxa/include/mach/palmte2.h b/arch/arm/mach-pxa/include/mach/palmte2.h index 12361341f9d..f89e989a763 100644 --- a/arch/arm/mach-pxa/include/mach/palmte2.h +++ b/arch/arm/mach-pxa/include/mach/palmte2.h @@ -59,7 +59,7 @@ /* BATTERY */ #define PALMTE2_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */ #define PALMTE2_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */ -#define PALMTE2_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMTE2_BAT_MAX_CURRENT 0 /* unknown */ #define PALMTE2_BAT_MIN_CURRENT 0 /* unknown */ #define PALMTE2_BAT_MAX_CHARGE 1 /* unknown */ #define PALMTE2_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h index 1be0db6ed55..10abc4f2e8e 100644 --- a/arch/arm/mach-pxa/include/mach/palmtx.h +++ b/arch/arm/mach-pxa/include/mach/palmtx.h @@ -94,7 +94,7 @@ /* BATTERY */ #define PALMTX_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */ #define PALMTX_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */ -#define PALMTX_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMTX_BAT_MAX_CURRENT 0 /* unknown */ #define PALMTX_BAT_MIN_CURRENT 0 /* unknown */ #define PALMTX_BAT_MAX_CHARGE 1 /* unknown */ #define PALMTX_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-pxa/include/mach/palmz72.h b/arch/arm/mach-pxa/include/mach/palmz72.h index 2806ef69ba5..2bbcf70dd93 100644 --- a/arch/arm/mach-pxa/include/mach/palmz72.h +++ b/arch/arm/mach-pxa/include/mach/palmz72.h @@ -49,7 +49,7 @@ /* Battery */ #define PALMZ72_BAT_MAX_VOLTAGE 4000 /* 4.00v current voltage */ #define PALMZ72_BAT_MIN_VOLTAGE 3550 /* 3.55v critical voltage */ -#define PALMZ72_BAT_MAX_CURRENT 0 /* unknokn */ +#define PALMZ72_BAT_MAX_CURRENT 0 /* unknown */ #define PALMZ72_BAT_MIN_CURRENT 0 /* unknown */ #define PALMZ72_BAT_MAX_CHARGE 1 /* unknown */ #define PALMZ72_BAT_MIN_CHARGE 1 /* unknown */ diff --git a/arch/arm/mach-s3c6400/setup-sdhci.c b/arch/arm/mach-s3c6400/setup-sdhci.c index b93dafbee1f..1039937403b 100644 --- a/arch/arm/mach-s3c6400/setup-sdhci.c +++ b/arch/arm/mach-s3c6400/setup-sdhci.c @@ -30,7 +30,7 @@ char *s3c6400_hsmmc_clksrcs[4] = { [0] = "hsmmc", [1] = "hsmmc", [2] = "mmc_bus", - /* [3] = "48m", - note not succesfully used yet */ + /* [3] = "48m", - note not successfully used yet */ }; void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev, diff --git a/arch/arm/mach-s3c6410/setup-sdhci.c b/arch/arm/mach-s3c6410/setup-sdhci.c index 20666f3bd47..816d2d9f9ef 100644 --- a/arch/arm/mach-s3c6410/setup-sdhci.c +++ b/arch/arm/mach-s3c6410/setup-sdhci.c @@ -30,7 +30,7 @@ char *s3c6410_hsmmc_clksrcs[4] = { [0] = "hsmmc", [1] = "hsmmc", [2] = "mmc_bus", - /* [3] = "48m", - note not succesfully used yet */ + /* [3] = "48m", - note not successfully used yet */ }; diff --git a/arch/arm/mach-sa1100/dma.c b/arch/arm/mach-sa1100/dma.c index cb4521a6f42..ad660350c29 100644 --- a/arch/arm/mach-sa1100/dma.c +++ b/arch/arm/mach-sa1100/dma.c @@ -65,7 +65,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) /** - * sa1100_request_dma - allocate one of the SA11x0's DMA chanels + * sa1100_request_dma - allocate one of the SA11x0's DMA channels * @device: The SA11x0 peripheral targeted by this request * @device_id: An ascii name for the claiming device * @callback: Function to be called when the DMA completes diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx3.h b/arch/arm/plat-mxc/include/mach/iomux-mx3.h index 446f8676381..0c7802bbecc 100644 --- a/arch/arm/plat-mxc/include/mach/iomux-mx3.h +++ b/arch/arm/plat-mxc/include/mach/iomux-mx3.h @@ -112,7 +112,7 @@ enum iomux_gp_func { * setups a single pin: * - reserves the pin so that it is not claimed by another driver * - setups the iomux according to the configuration - * - if the pin is configured as a GPIO, we claim it throug kernel gpiolib + * - if the pin is configured as a GPIO, we claim it through kernel gpiolib */ int mxc_iomux_alloc_pin(const unsigned int pin, const char *label); /* diff --git a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h index 9f13061192c..3887f3fe29d 100644 --- a/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h +++ b/arch/arm/plat-mxc/include/mach/iomux-mxc91231.h @@ -48,7 +48,7 @@ * setups a single pin: * - reserves the pin so that it is not claimed by another driver * - setups the iomux according to the configuration - * - if the pin is configured as a GPIO, we claim it throug kernel gpiolib + * - if the pin is configured as a GPIO, we claim it through kernel gpiolib */ int mxc_iomux_alloc_pin(const unsigned int pin_mode, const char *label); /* diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c index 5cdbd605ac0..4ff6dfe0428 100644 --- a/arch/arm/plat-mxc/pwm.c +++ b/arch/arm/plat-mxc/pwm.c @@ -94,7 +94,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) * register to follow the ratio of duty_ns vs. period_ns * accordingly. * - * This is good enought for programming the brightness of + * This is good enough for programming the brightness of * the LCD backlight. * * The real implementation would divide PERCLK[0] first by diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index b53125f4129..0e308913291 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -1232,7 +1232,7 @@ static void create_dma_lch_chain(int lch_head, int lch_queue) * OMAP_DMA_DYNAMIC_CHAIN * @params - Channel parameters * - * @return - Succes : 0 + * @return - Success : 0 * Failure: -EINVAL/-ENOMEM */ int omap_request_dma_chain(int dev_id, const char *dev_name, diff --git a/arch/arm/plat-omap/include/mach/omap16xx.h b/arch/arm/plat-omap/include/mach/omap16xx.h index 0e69b504c25..7560b4d583a 100644 --- a/arch/arm/plat-omap/include/mach/omap16xx.h +++ b/arch/arm/plat-omap/include/mach/omap16xx.h @@ -124,7 +124,7 @@ #define TIPB_SWITCH_BASE (0xfffbc800) #define OMAP16XX_MMCSD2_SSW_MPU_CONF (TIPB_SWITCH_BASE + 0x160) -/* UART3 Registers Maping through MPU bus */ +/* UART3 Registers Mapping through MPU bus */ #define UART3_RHR (OMAP_UART3_BASE + 0) #define UART3_THR (OMAP_UART3_BASE + 0) #define UART3_DLL (OMAP_UART3_BASE + 0) diff --git a/arch/arm/plat-s3c24xx/include/plat/map.h b/arch/arm/plat-s3c24xx/include/plat/map.h index c4d133436fc..bd534d32b99 100644 --- a/arch/arm/plat-s3c24xx/include/plat/map.h +++ b/arch/arm/plat-s3c24xx/include/plat/map.h @@ -64,7 +64,7 @@ /* the calculation for the VA of this must ensure that * it is the same distance apart from the UART in the * phsyical address space, as the initial mapping for the IO - * is done as a 1:1 maping. This puts it (currently) at + * is done as a 1:1 mapping. This puts it (currently) at * 0xFA800000, which is not in the way of any current mapping * by the base system. */ diff --git a/arch/avr32/boards/hammerhead/Kconfig b/arch/avr32/boards/hammerhead/Kconfig index fda2331f978..5c13d785cc7 100644 --- a/arch/avr32/boards/hammerhead/Kconfig +++ b/arch/avr32/boards/hammerhead/Kconfig @@ -24,7 +24,7 @@ config BOARD_HAMMERHEAD_SND bool "Atmel AC97 Sound support" help This enables Sound support for the Hammerhead board. You may - also go trough the ALSA settings to get it working. + also go through the ALSA settings to get it working. Choose 'Y' here if you have ordered a Corona daugther board and want to make your board funky. diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 6b7325d634a..78cb3d38f89 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -619,7 +619,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) /* * Similar to get_user, do some address checking, then dereference - * Return true on sucess, false on bad address + * Return true on success, false on bad address */ static bool get_instruction(unsigned short *val, unsigned short *address) { diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h index e06f4112c69..f9fd2b2a295 100644 --- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h +++ b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h @@ -542,7 +542,7 @@ #define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */ @@ -550,7 +550,7 @@ #define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h index f821700716e..b9dbb73d7ef 100644 --- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h +++ b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h @@ -544,7 +544,7 @@ #define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */ @@ -552,7 +552,7 @@ #define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h index cebb14feb1b..a6d20ca5768 100644 --- a/arch/blackfin/mach-bf537/include/mach/defBF534.h +++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h @@ -934,7 +934,7 @@ #define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */ @@ -942,7 +942,7 @@ #define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h index dd414ae4ba4..39f588dcd38 100644 --- a/arch/blackfin/mach-bf548/include/mach/defBF544.h +++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h @@ -491,7 +491,7 @@ #define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */ @@ -501,7 +501,7 @@ #define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h index 5a9dbabe0a6..c4dcf302d9f 100644 --- a/arch/blackfin/mach-bf548/include/mach/defBF547.h +++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h @@ -470,7 +470,7 @@ #define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */ @@ -480,7 +480,7 @@ #define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h index 82cd593f739..a5079980968 100644 --- a/arch/blackfin/mach-bf548/include/mach/defBF548.h +++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h @@ -853,7 +853,7 @@ #define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */ @@ -863,7 +863,7 @@ #define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */ diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h index 6fc6e39ab61..f7f043560c6 100644 --- a/arch/blackfin/mach-bf548/include/mach/defBF549.h +++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h @@ -1024,7 +1024,7 @@ #define HMDMA0_CONTROL 0xffc04500 /* Handshake MDMA0 Control Register */ #define HMDMA0_ECINIT 0xffc04504 /* Handshake MDMA0 Initial Edge Count Register */ #define HMDMA0_BCINIT 0xffc04508 /* Handshake MDMA0 Initial Block Count Register */ -#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshhold Register */ +#define HMDMA0_ECURGENT 0xffc0450c /* Handshake MDMA0 Urgent Edge Count Threshold Register */ #define HMDMA0_ECOVERFLOW 0xffc04510 /* Handshake MDMA0 Edge Count Overflow Interrupt Register */ #define HMDMA0_ECOUNT 0xffc04514 /* Handshake MDMA0 Current Edge Count Register */ #define HMDMA0_BCOUNT 0xffc04518 /* Handshake MDMA0 Current Block Count Register */ @@ -1034,7 +1034,7 @@ #define HMDMA1_CONTROL 0xffc04540 /* Handshake MDMA1 Control Register */ #define HMDMA1_ECINIT 0xffc04544 /* Handshake MDMA1 Initial Edge Count Register */ #define HMDMA1_BCINIT 0xffc04548 /* Handshake MDMA1 Initial Block Count Register */ -#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshhold Register */ +#define HMDMA1_ECURGENT 0xffc0454c /* Handshake MDMA1 Urgent Edge Count Threshold Register */ #define HMDMA1_ECOVERFLOW 0xffc04550 /* Handshake MDMA1 Edge Count Overflow Interrupt Register */ #define HMDMA1_ECOUNT 0xffc04554 /* Handshake MDMA1 Current Edge Count Register */ #define HMDMA1_BCOUNT 0xffc04558 /* Handshake MDMA1 Current Block Count Register */ diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 4a7cdd9ea1e..380df1a73a6 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -209,7 +209,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source - * when it acesses user-memory. When it fails in one + * when it accesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 674a8374c6d..f332e3fe423 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1381,7 +1381,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, #endif /* - ** Not virtually contigous. + ** Not virtually contiguous. ** Terminate prev chunk. ** Start a new chunk. ** diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S index af9405cd70e..02d1fb73295 100644 --- a/arch/ia64/ia32/ia32_entry.S +++ b/arch/ia64/ia32/ia32_entry.S @@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone) (p6) br.cond.spnt .ia32_strace_check_retval ;; // prevent RAW on r8 END(ia32_ret_from_clone) - // fall thrugh + // fall through GLOBAL_ENTRY(ia32_ret_from_syscall) PT_REGS_UNWIND_INFO(0) diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h index 48822c0811d..74724b24c2b 100644 --- a/arch/ia64/include/asm/perfmon_default_smpl.h +++ b/arch/ia64/include/asm/perfmon_default_smpl.h @@ -67,7 +67,7 @@ typedef struct { unsigned long ip; /* where did the overflow interrupt happened */ unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ - unsigned short cpu; /* cpu on which the overfow occured */ + unsigned short cpu; /* cpu on which the overflow occured */ unsigned short set; /* event set active when overflow ocurred */ int tgid; /* thread group id (for NPTL, this is getpid()) */ } pfm_default_smpl_entry_t; diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h index 22a6f18a531..6052422a22b 100644 --- a/arch/ia64/include/asm/sn/shubio.h +++ b/arch/ia64/include/asm/sn/shubio.h @@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t; #define IIO_IIDSR_LVL_SHIFT 0 #define IIO_IIDSR_LVL_MASK 0x000000ff -/* Xtalk timeout threshhold register (IIO_IXTT) */ +/* Xtalk timeout threshold register (IIO_IXTT) */ #define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ #define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) #define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c index d5764a3d74a..b091111270c 100644 --- a/arch/ia64/kernel/esi.c +++ b/arch/ia64/kernel/esi.c @@ -84,7 +84,7 @@ static int __init esi_init (void) case ESI_DESC_ENTRY_POINT: break; default: - printk(KERN_WARNING "Unkown table type %d found in " + printk(KERN_WARNING "Unknown table type %d found in " "ESI table, ignoring rest of table\n", *p); return -ENODEV; } diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f1782705b1f..b3a1cb3e6b2 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -3523,7 +3523,7 @@ pfm_use_debug_registers(struct task_struct *task) * IA64_THREAD_DBG_VALID set. This indicates a task which was * able to use the debug registers for debugging purposes via * ptrace(). Therefore we know it was not using them for - * perfmormance monitoring, so we only decrement the number + * performance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */ int diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S index 6c1a9a21788..73613b5f1ee 100644 --- a/arch/m68k/ifpsp060/src/fpsp.S +++ b/arch/m68k/ifpsp060/src/fpsp.S @@ -753,7 +753,7 @@ fovfl_ovfl_on: bra.l _real_ovfl -# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore, +# overflow occurred but is disabled. meanwhile, inexact is enabled. Therefore, # we must jump to real_inex(). fovfl_inex_on: @@ -1015,7 +1015,7 @@ funfl_unfl_on2: bra.l _real_unfl -# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore, +# underflow occurred but is disabled. meanwhile, inexact is enabled. Therefore, # we must jump to real_inex(). funfl_inex_on: @@ -2963,7 +2963,7 @@ iea_disabled: tst.w %d0 # is instr fmovm? bmi.b iea_dis_fmovm # yes -# instruction is using an extended precision immediate operand. therefore, +# instruction is using an extended precision immediate operand. Therefore, # the total instruction length is 16 bytes. iea_dis_immed: mov.l &0x10,%d0 # 16 bytes of instruction @@ -9624,7 +9624,7 @@ sok_dnrm: bge.b sok_norm2 # thank goodness no # the multiply factor that we're trying to create should be a denorm -# for the multiply to work. therefore, we're going to actually do a +# for the multiply to work. Therefore, we're going to actually do a # multiply with a denorm which will cause an unimplemented data type # exception to be put into the machine which will be caught and corrected # later. we don't do this with the DENORMs above because this method @@ -12216,7 +12216,7 @@ fin_sd_unfl_dis: # # operand will underflow AND underflow or inexact is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fin_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -12746,7 +12746,7 @@ fdiv_zero_load_p: # # The destination was In Range and the source was a ZERO. The result, -# therefore, is an INF w/ the proper sign. +# Therefore, is an INF w/ the proper sign. # So, determine the sign and return a new INF (w/ the j-bit cleared). # global fdiv_inf_load # global for fsgldiv @@ -12996,7 +12996,7 @@ fneg_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fneg_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -13611,7 +13611,7 @@ fabs_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fabs_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -14973,7 +14973,7 @@ fadd_zero_2: # # the ZEROes have opposite signs: -# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP. +# - Therefore, we return +ZERO if the rounding modes are RN,RZ, or RP. # - -ZERO is returned in the case of RM. # fadd_zero_2_chk_rm: @@ -15425,7 +15425,7 @@ fsub_zero_2: # # the ZEROes have the same signs: -# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP +# - Therefore, we return +ZERO if the rounding mode is RN,RZ, or RP # - -ZERO is returned in the case of RM. # fsub_zero_2_chk_rm: @@ -15693,7 +15693,7 @@ fsqrt_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fsqrt_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -21000,7 +21000,7 @@ fout_pack_type: tst.l %d0 bne.b fout_pack_set # "mantissa" is all zero which means that the answer is zero. but, the '040 -# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore, +# algorithm allows the exponent to be non-zero. the 881/2 do not. Therefore, # if the mantissa is zero, I will zero the exponent, too. # the question now is whether the exponents sign bit is allowed to be non-zero # for a zero, also... @@ -21743,7 +21743,7 @@ denorm_set_stky: rts # # -# dnrm_lp(): normalize exponent/mantissa to specified threshhold # +# dnrm_lp(): normalize exponent/mantissa to specified threshold # # # # INPUT: # # %a0 : points to the operand to be denormalized # @@ -22402,7 +22402,7 @@ unnorm_shift: bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0 # -# exponent would not go < 0. therefore, number stays normalized +# exponent would not go < 0. Therefore, number stays normalized # sub.w %d0, %d1 # shift exponent value mov.w FTEMP_EX(%a0), %d0 # load old exponent diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S index 51b9f7d879d..e71ba0ab013 100644 --- a/arch/m68k/ifpsp060/src/pfpsp.S +++ b/arch/m68k/ifpsp060/src/pfpsp.S @@ -752,7 +752,7 @@ fovfl_ovfl_on: bra.l _real_ovfl -# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore, +# overflow occurred but is disabled. meanwhile, inexact is enabled. Therefore, # we must jump to real_inex(). fovfl_inex_on: @@ -1014,7 +1014,7 @@ funfl_unfl_on2: bra.l _real_unfl -# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore, +# underflow occurred but is disabled. meanwhile, inexact is enabled. Therefore, # we must jump to real_inex(). funfl_inex_on: @@ -2962,7 +2962,7 @@ iea_disabled: tst.w %d0 # is instr fmovm? bmi.b iea_dis_fmovm # yes -# instruction is using an extended precision immediate operand. therefore, +# instruction is using an extended precision immediate operand. Therefore, # the total instruction length is 16 bytes. iea_dis_immed: mov.l &0x10,%d0 # 16 bytes of instruction @@ -5865,7 +5865,7 @@ denorm_set_stky: rts # # -# dnrm_lp(): normalize exponent/mantissa to specified threshhold # +# dnrm_lp(): normalize exponent/mantissa to specified threshold # # # # INPUT: # # %a0 : points to the operand to be denormalized # @@ -6524,7 +6524,7 @@ unnorm_shift: bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0 # -# exponent would not go < 0. therefore, number stays normalized +# exponent would not go < 0. Therefore, number stays normalized # sub.w %d0, %d1 # shift exponent value mov.w FTEMP_EX(%a0), %d0 # load old exponent @@ -7901,7 +7901,7 @@ fout_pack_type: tst.l %d0 bne.b fout_pack_set # "mantissa" is all zero which means that the answer is zero. but, the '040 -# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore, +# algorithm allows the exponent to be non-zero. the 881/2 do not. Therefore, # if the mantissa is zero, I will zero the exponent, too. # the question now is whether the exponents sign bit is allowed to be non-zero # for a zero, also... @@ -8647,7 +8647,7 @@ fin_sd_unfl_dis: # # operand will underflow AND underflow or inexact is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fin_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -9177,7 +9177,7 @@ fdiv_zero_load_p: # # The destination was In Range and the source was a ZERO. The result, -# therefore, is an INF w/ the proper sign. +# Therefore, is an INF w/ the proper sign. # So, determine the sign and return a new INF (w/ the j-bit cleared). # global fdiv_inf_load # global for fsgldiv @@ -9427,7 +9427,7 @@ fneg_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fneg_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -10042,7 +10042,7 @@ fabs_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fabs_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) @@ -11404,7 +11404,7 @@ fadd_zero_2: # # the ZEROes have opposite signs: -# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP. +# - Therefore, we return +ZERO if the rounding modes are RN,RZ, or RP. # - -ZERO is returned in the case of RM. # fadd_zero_2_chk_rm: @@ -11856,7 +11856,7 @@ fsub_zero_2: # # the ZEROes have the same signs: -# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP +# - Therefore, we return +ZERO if the rounding mode is RN,RZ, or RP # - -ZERO is returned in the case of RM. # fsub_zero_2_chk_rm: @@ -12124,7 +12124,7 @@ fsqrt_sd_unfl_dis: # # operand will underflow AND underflow is enabled. -# therefore, we must return the result rounded to extended precision. +# Therefore, we must return the result rounded to extended precision. # fsqrt_sd_unfl_ena: mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6) diff --git a/arch/m68k/include/asm/bootinfo.h b/arch/m68k/include/asm/bootinfo.h index fb8a06b9ab6..67e7a78ad96 100644 --- a/arch/m68k/include/asm/bootinfo.h +++ b/arch/m68k/include/asm/bootinfo.h @@ -145,7 +145,7 @@ struct bi_record { /* * Macintosh hardware profile data - unused, see macintosh.h for - * resonable type values + * reasonable type values */ #define BI_MAC_VIA1BASE 0x8010 /* Mac VIA1 base address (always present) */ diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c index 6a907c58a4b..cc2108b6b26 100644 --- a/arch/microblaze/lib/memcpy.c +++ b/arch/microblaze/lib/memcpy.c @@ -9,7 +9,7 @@ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * - * Attempts were made, unsuccesfully, to contact the original + * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c index d4e9f49a71f..0929198c5e6 100644 --- a/arch/microblaze/lib/memmove.c +++ b/arch/microblaze/lib/memmove.c @@ -9,7 +9,7 @@ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * - * Attempts were made, unsuccesfully, to contact the original + * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c index 941dc8f94b0..4df851d41a2 100644 --- a/arch/microblaze/lib/memset.c +++ b/arch/microblaze/lib/memset.c @@ -9,7 +9,7 @@ * It is based on demo code originally Copyright 2001 by Intel Corp, taken from * http://www.embedded.com/showArticle.jhtml?articleID=19205567 * - * Attempts were made, unsuccesfully, to contact the original + * Attempts were made, unsuccessfully, to contact the original * author of this code (Michael Morrow, Intel). Below is the original * copyright notice. * diff --git a/arch/mips/include/asm/mach-pnx833x/gpio.h b/arch/mips/include/asm/mach-pnx833x/gpio.h index 8de0eb9c98a..ed3a88da70f 100644 --- a/arch/mips/include/asm/mach-pnx833x/gpio.h +++ b/arch/mips/include/asm/mach-pnx833x/gpio.h @@ -24,7 +24,7 @@ /* BIG FAT WARNING: races danger! No protections exist here. Current users are only early init code, - when locking is not needed because no cuncurency yet exists there, + when locking is not needed because no concurrency yet exists there, and GPIO IRQ dispatcher, which does locking. However, if many uses will ever happen, proper locking will be needed - including locking between different uses diff --git a/arch/mips/include/asm/sgi/ioc.h b/arch/mips/include/asm/sgi/ioc.h index 343ed15f8dc..57a971904cf 100644 --- a/arch/mips/include/asm/sgi/ioc.h +++ b/arch/mips/include/asm/sgi/ioc.h @@ -164,7 +164,7 @@ struct sgioc_regs { u32 _unused5; u8 _write[3]; volatile u8 write; -#define SGIOC_WRITE_NTHRESH 0x01 /* use 4.5db threshhold */ +#define SGIOC_WRITE_NTHRESH 0x01 /* use 4.5db threshold */ #define SGIOC_WRITE_TPSPEED 0x02 /* use 100ohm TP speed */ #define SGIOC_WRITE_EPSEL 0x04 /* force cable mode: 1=AUI 0=TP */ #define SGIOC_WRITE_EASEL 0x08 /* 1=autoselect 0=manual cable selection */ diff --git a/arch/mips/include/asm/sibyte/sb1250_mac.h b/arch/mips/include/asm/sibyte/sb1250_mac.h index b6faf08ca81..591b9061fd8 100644 --- a/arch/mips/include/asm/sibyte/sb1250_mac.h +++ b/arch/mips/include/asm/sibyte/sb1250_mac.h @@ -212,7 +212,7 @@ #define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1) /* - * MAC Fifo Threshhold registers (Table 9-14) + * MAC Fifo Threshold registers (Table 9-14) * Register: MAC_THRSH_CFG_0 * Register: MAC_THRSH_CFG_1 * Register: MAC_THRSH_CFG_2 diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h index d0c29d4de08..31c76c021bb 100644 --- a/arch/mips/include/asm/sn/sn0/hubio.h +++ b/arch/mips/include/asm/sn/sn0/hubio.h @@ -825,7 +825,7 @@ typedef union iprb_u { struct { u64 rsvd1: 15, error: 1, /* Widget rcvd wr resp pkt w/ error */ - ovflow: 5, /* Over flow count. perf measurement */ + ovflow: 5, /* Overflow count. perf measurement */ fire_and_forget: 1, /* Launch Write without response */ mode: 2, /* Widget operation Mode */ rsvd2: 2, diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 24630fd8ef6..a38e3ee9551 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1331,7 +1331,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) if (!((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); - /* Traverse all online CPUs (hack requires contigous range) */ + /* Traverse all online CPUs (hack requires contiguous range) */ for_each_online_cpu(i) { /* * We don't need to worry about our own CPU, nor those of diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c index b30c5b1f1a2..a2127d685a0 100644 --- a/arch/mips/math-emu/dp_sub.c +++ b/arch/mips/math-emu/dp_sub.c @@ -110,7 +110,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y) case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM): DPDNORMX; - /* FAAL THOROUGH */ + /* FALL THROUGH */ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM): /* normalize ym,ye */ diff --git a/arch/mips/txx9/generic/smsc_fdc37m81x.c b/arch/mips/txx9/generic/smsc_fdc37m81x.c index a2b2d62d88e..8ebc3848f3a 100644 --- a/arch/mips/txx9/generic/smsc_fdc37m81x.c +++ b/arch/mips/txx9/generic/smsc_fdc37m81x.c @@ -117,7 +117,7 @@ unsigned long __init smsc_fdc37m81x_init(unsigned long port) if (chip_id == SMSC_FDC37M81X_CHIP_ID) smsc_fdc37m81x_config_end(); else { - printk(KERN_WARNING "%s: unknow chip id 0x%02x\n", __func__, + printk(KERN_WARNING "%s: unknown chip id 0x%02x\n", __func__, chip_id); g_smsc_fdc37m81x_base = 0; } diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h index 1e180a59458..0de404dfee8 100644 --- a/arch/powerpc/include/asm/reg_fsl_emb.h +++ b/arch/powerpc/include/asm/reg_fsl_emb.h @@ -39,7 +39,7 @@ #define PMRN_PMLCB2 0x112 /* PM Local Control B2 */ #define PMRN_PMLCB3 0x113 /* PM Local Control B3 */ -#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshhold Multiple Field */ +#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */ #define PMLCB_THRESHMUL_SHIFT 8 #define PMLCB_THRESHOLD_MASK 0x003f /* Threshold Field */ diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 641c74bb8e2..b6bd1eaa1c2 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -52,7 +52,7 @@ static struct hard_trap_info { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */ { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */ { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */ - { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */ + { 0x2060, 0x0e /* SIGILL */ }, /* performance monitor */ { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */ { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */ { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */ diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index c3a56d65c5a..a753b72efbc 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c @@ -59,7 +59,7 @@ void set_thresholds(unsigned long cpu) mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID); /* setup THRM2, - * threshold, valid bit, enable interrupts, interrupt when above threshhold + * threshold, valid bit, enable interrupts, interrupt when above threshold */ mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE); #else diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 52c98edcd70..2c9e5226729 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c @@ -1594,7 +1594,7 @@ static void cell_handle_interrupt_spu(struct pt_regs *regs, * to a latch. The new values (interrupt setting bits, reset * counter value etc.) are not copied to the actual registers * until the performance monitor is enabled. In order to get - * this to work as desired, the permormance monitor needs to + * this to work as desired, the performance monitor needs to * be disabled while writing to the latches. This is a * HW design issue. */ @@ -1668,7 +1668,7 @@ static void cell_handle_interrupt_ppu(struct pt_regs *regs, * to a latch. The new values (interrupt setting bits, reset * counter value etc.) are not copied to the actual registers * until the performance monitor is enabled. In order to get - * this to work as desired, the permormance monitor needs to + * this to work as desired, the performance monitor needs to * be disabled while writing to the latches. This is a * HW design issue. */ diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c index dd43114e968..da110bd8834 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c @@ -100,7 +100,7 @@ const struct of_device_id mpc52xx_pci_ids[] __initdata = { }; /* ======================================================================== */ -/* PCI configuration acess */ +/* PCI configuration access */ /* ======================================================================== */ static int diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index e81403b245b..ab2027cdf89 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -302,7 +302,7 @@ static void __init setup_chaos(struct pci_controller *hose, * 1 -> Skip the device but act as if the access was successfull * (return 0xff's on reads, eventually, cache config space * accesses in a later version) - * -1 -> Hide the device (unsuccessful acess) + * -1 -> Hide the device (unsuccessful access) */ static int u3_ht_skip_device(struct pci_controller *hose, struct pci_bus *bus, unsigned int devfn) diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index ae3c4db86fe..bafc3f85360 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -160,7 +160,7 @@ static int dart_build(struct iommu_table *tbl, long index, dp = ((unsigned int*)tbl->it_base) + index; - /* On U3, all memory is contigous, so we can move this + /* On U3, all memory is contiguous, so we can move this * out of the loop. */ l = npages; diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c index 3ee78ccb617..cd4e9c168dd 100644 --- a/arch/s390/math-emu/math.c +++ b/arch/s390/math-emu/math.c @@ -2088,7 +2088,7 @@ int math_emu_ldr(__u8 *opcode) { __u16 opc = *((__u16 *) opcode); if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therfore ry can't be in {0,2,4,6} */ + /* we got an exception therefore ry can't be in {0,2,4,6} */ asm volatile( /* load rx from fp_regs.fprs[ry] */ " bras 1,0f\n" " ld 0,0(%1)\n" @@ -2118,7 +2118,7 @@ int math_emu_ler(__u8 *opcode) { __u16 opc = *((__u16 *) opcode); if ((opc & 0x90) == 0) { /* test if rx in {0,2,4,6} */ - /* we got an exception therfore ry can't be in {0,2,4,6} */ + /* we got an exception therefore ry can't be in {0,2,4,6} */ asm volatile( /* load rx from fp_regs.fprs[ry] */ " bras 1,0f\n" " le 0,0(%1)\n" diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h index 9d6684849fd..278441f3985 100644 --- a/arch/x86/include/asm/desc_defs.h +++ b/arch/x86/include/asm/desc_defs.h @@ -12,9 +12,9 @@ #include /* - * FIXME: Acessing the desc_struct through its fields is more elegant, + * FIXME: Accessing the desc_struct through its fields is more elegant, * and should be the one valid thing to do. However, a lot of open code - * still touches the a and b acessors, and doing this allow us to do it + * still touches the a and b accessors, and doing this allow us to do it * incrementally. We keep the signature as a struct, rather than an union, * so we can get rid of it transparently in the future -- glommer */ diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index ede6998bd92..91df7c51806 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h @@ -47,7 +47,7 @@ static inline void resume_map_numa_kva(pgd_t *pgd) {} /* * generic node memory support, the following assumptions apply: * - * 1) memory comes in 64Mb contigious chunks which are either present or not + * 1) memory comes in 64Mb contiguous chunks which are either present or not * 2) we will not have more than 64Gb in total * * for now assume that 64Gb is max amount of RAM for whole system diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 80e2984f521..b414d2b401f 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -55,7 +55,7 @@ #define DESC_STATUS_SOURCE_TIMEOUT 3 /* - * source side threshholds at which message retries print a warning + * source side thresholds at which message retries print a warning */ #define SOURCE_TIMEOUT_LIMIT 20 #define DESTINATION_TIMEOUT_LIMIT 20 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 67e929b8987..1c2c4838d35 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1122,7 +1122,7 @@ static int __init acpi_parse_madt_ioapic_entries(void) if (!acpi_sci_override_gsi) acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0); - /* Fill in identity legacy mapings where no override */ + /* Fill in identity legacy mappings where no override */ mp_config_acpi_legacy_irqs(); count = diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0285521e0a9..42ac5e00099 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1637,7 +1637,7 @@ retry: goto out; /* - * aperture was sucessfully enlarged by 128 MB, try + * aperture was successfully enlarged by 128 MB, try * allocation again */ goto retry; @@ -2396,7 +2396,7 @@ int __init amd_iommu_init_passthrough(void) struct pci_dev *dev = NULL; u16 devid, devid2; - /* allocate passthroug domain */ + /* allocate passthrough domain */ pt_domain = protection_domain_alloc(); if (!pt_domain) return -ENOMEM; diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b5801c31184..35be5802ac1 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1229,7 +1229,7 @@ x86_perf_event_set_period(struct perf_event *event, return 0; /* - * If we are way outside a reasoable range then just skip forward: + * If we are way outside a reasonable range then just skip forward: */ if (unlikely(left <= -period)) { left = period; diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b00..7d377379fa4 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -514,7 +514,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they - * remain disabled thorough out this function. + * remain disabled throughout this function. */ static int __kprobes kprobe_handler(struct pt_regs *regs) { @@ -851,7 +851,7 @@ no_change: /* * Interrupts are disabled on entry as trap1 is an interrupt gate and they - * remain disabled thoroughout this function. + * remain disabled throughout this function. */ static int __kprobes post_kprobe_handler(struct pt_regs *regs) { diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 16ccbd77917..d16d576beeb 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -203,7 +203,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) */ /* * Interrupts are disabled on entry as trap3 is an interrupt gate - * and they remain disabled thorough out this function. + * and they remain disabled throughout this function. */ int kmmio_handler(struct pt_regs *regs, unsigned long addr) { @@ -302,7 +302,7 @@ no_kmmio: /* * Interrupts are disabled on entry as trap1 is an interrupt gate - * and they remain disabled thorough out this function. + * and they remain disabled throughout this function. * This must always get called as the pair to kmmio_handler(). */ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index ca564202ed7..58916afbbda 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -28,7 +28,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); * Description: * Add this blk_iopoll structure to the pending poll list and trigger the * raise of the blk iopoll softirq. The driver must already have gotten a - * succesful return from blk_iopoll_sched_prep() before calling this. + * successful return from blk_iopoll_sched_prep() before calling this. **/ void blk_iopoll_sched(struct blk_iopoll *iop) { diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 9ac4e378992..3aadded05a0 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -599,7 +599,7 @@ static const struct ich_laptop ich_laptop[] = { { 0x27DF, 0x1028, 0x02b0 }, /* ICH7 on unknown Dell */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ - { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unkown HP */ + { 0x27DF, 0x103C, 0x361a }, /* ICH7 on unknown HP */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index d344db42a00..0d9d2f20788 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -43,9 +43,9 @@ enum { /* * SATA-FSL host controller supports a max. of (15+1) direct PRDEs, and * chained indirect PRDEs upto a max count of 63. - * We are allocating an array of 63 PRDEs contigiously, but PRDE#15 will + * We are allocating an array of 63 PRDEs contiguously, but PRDE#15 will * be setup as an indirect descriptor, pointing to it's next - * (contigious) PRDE. Though chained indirect PRDE arrays are + * (contiguous) PRDE. Though chained indirect PRDE arrays are * supported,it will be more efficient to use a direct PRDT and * a single chain/link to indirect PRDE array/PRDT. */ @@ -314,7 +314,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, u32 ttl_dwords = 0; /* - * NOTE : direct & indirect prdt's are contigiously allocated + * NOTE : direct & indirect prdt's are contiguously allocated */ struct prde *prd = (struct prde *)&((struct command_desc *) cmd_desc)->prdt; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index b2c1b37ab2e..f734b345ac7 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1132,7 +1132,7 @@ static int rx_pkt(struct atm_dev *dev) IF_ERR(printk(" cause: packet time out\n");) } else { - IF_ERR(printk(" cause: buffer over flow\n");) + IF_ERR(printk(" cause: buffer overflow\n");) } goto out_free_desc; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 979d159b5cd..ee95c76bfd3 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); * @dev: device to try to bind to the driver * * This function returns -ENODEV if the device is not registered, - * 1 if the device is bound sucessfully and 0 otherwise. + * 1 if the device is bound successfully and 0 otherwise. * * This function must be called with @dev->sem held. When called for a * USB interface, @dev->parent->sem must be held as well. diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 5b33b85790f..63bfc543679 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -535,7 +535,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) break; default: - BT_ERR("Unknow packet type:%d", type); + BT_ERR("Unknown packet type:%d", type); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload, blksz * buf_block_len); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 4895f0e0532..aa0919386b8 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -214,7 +214,7 @@ static int hci_uart_send_frame(struct sk_buff *skb) struct hci_uart *hu; if (!hdev) { - BT_ERR("Frame for uknown device (hdev=NULL)"); + BT_ERR("Frame for unknown device (hdev=NULL)"); return -ENODEV; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index a074fceb67d..42e65cf8ab5 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -5,7 +5,7 @@ * * Added devfs support. * Jan-11-1998, C. Scott Ananian - * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar + * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar */ #include diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 1997270bb6f..ecb89d798e3 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -248,7 +248,7 @@ static const struct vm_operations_struct mspec_vm_ops = { /* * mspec_mmap * - * Called when mmaping the device. Initializes the vma with a fault handler + * Called when mmapping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c index 6934025a1ac..c1d8b54c816 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/char/n_r3964.c @@ -602,7 +602,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c) } break; case R3964_WAIT_FOR_RX_REPEAT: - /* FALLTROUGH */ + /* FALLTHROUGH */ case R3964_IDLE: if (c == STX) { /* Prevent rx_queue from overflow: */ diff --git a/drivers/char/rio/route.h b/drivers/char/rio/route.h index 20ed73f3fd7..46e963771c3 100644 --- a/drivers/char/rio/route.h +++ b/drivers/char/rio/route.h @@ -67,7 +67,7 @@ typedef struct COST_ROUTE COST_ROUTE; struct COST_ROUTE { unsigned char cost; /* Cost down this link */ - unsigned char route[NODE_BYTES]; /* Nodes thorough this route */ + unsigned char route[NODE_BYTES]; /* Nodes through this route */ }; typedef struct ROUTE_STR ROUTE_STR; diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index 5f753fc0873..09ad9154d86 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -863,7 +863,7 @@ static int hifn_init_pubrng(struct hifn_device *dev) dev->dmareg |= HIFN_DMAIER_PUBDONE; hifn_write_1(dev, HIFN_1_DMA_IER, dev->dmareg); - dprintk("Chip %s: Public key engine has been sucessfully " + dprintk("Chip %s: Public key engine has been successfully " "initialised.\n", dev->name); } diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7585c4164bd..c52ac9efd0b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -99,7 +99,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, } /** - * atc_desc_get - get a unsused descriptor from free_list + * atc_desc_get - get an unused descriptor from free_list * @atchan: channel we want a new descriptor for */ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index fddf2b35893..d373d17257e 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -183,7 +183,7 @@ static inline struct fw_node *fw_node(struct list_head *l) * This function builds the tree representation of the topology given * by the self IDs from the latest bus reset. During the construction * of the tree, the function checks that the self IDs are valid and - * internally consistent. On succcess this function returns the + * internally consistent. On success this function returns the * fw_node corresponding to the local card otherwise NULL. */ static struct fw_node *build_tree(struct fw_card *card, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9..3f7c500b211 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -272,7 +272,7 @@ EXPORT_SYMBOL(drm_mode_object_find); * functions & device file and adds it to the master fd list. * * RETURNS: - * Zero on success, error code on falure. + * Zero on success, error code on failure. */ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) @@ -2328,7 +2328,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, out_resp->value); - /* store the property value if succesful */ + /* store the property value if successful */ if (!ret) drm_connector_property_set_value(connector, property, out_resp->value); out: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abfc27b0c2e..a2a3fa59992 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1309,7 +1309,7 @@ out_free_list: * i915_gem_release_mmap - remove physical page mappings * @obj: obj in question * - * Preserve the reservation of the mmaping with the DRM core code, but + * Preserve the reservation of the mmapping with the DRM core code, but * relinquish ownership of the pages back to the system. * * It is vital that we remove the page mapping if we have mapped a tiled diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 2b0fe54cd92..40fcf6fdef3 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -70,7 +70,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { /** - * Curretly it is assumed that the old framebuffer is reused. + * Currently it is assumed that the old framebuffer is reused. * * LOCKING * caller should hold the mode config lock. diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 083bec2e50f..e7fa3279e2f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2726,7 +2726,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) /* Wrap with our custom algo which switches to DDC mode */ intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; - /* In defaut case sdvo lvds is false */ + /* In default case sdvo lvds is false */ intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); if (intel_sdvo_output_setup(intel_output, diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 609719490ec..00c739c4484 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -389,11 +389,11 @@ int r600_mc_init(struct radeon_device *rdev) * AGP so that GPU can catch out of VRAM/AGP access */ if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { - /* Enought place before */ + /* Enough place before */ rdev->mc.vram_location = rdev->mc.gtt_location - rdev->mc.mc_vram_size; } else if (tmp > rdev->mc.mc_vram_size) { - /* Enought place after */ + /* Enough place after */ rdev->mc.vram_location = rdev->mc.gtt_location + rdev->mc.gtt_size; } else { diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index b38c4c8e2c6..d10eb43645c 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -59,7 +59,7 @@ static struct fb_ops radeonfb_ops = { }; /** - * Curretly it is assumed that the old framebuffer is reused. + * Currently it is assumed that the old framebuffer is reused. * * LOCKING * caller should hold the mode config lock. diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 38537d971a3..067167cb39c 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -1950,7 +1950,7 @@ static void radeon_apply_surface_regs(int surf_index, * Note that refcount can be at most 2, since during a free refcount=3 * might mean we have to allocate a new surface which might not always * be available. - * For example : we allocate three contigous surfaces ABC. If B is + * For example : we allocate three contiguous surfaces ABC. If B is * freed, we suddenly need two surfaces to store A and C, which might * not always be available. */ diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 765bd184b6f..5a664000bf7 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -372,7 +372,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, new_mem->mem_type == TTM_PL_SYSTEM) || (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { - /* bind is enought */ + /* bind is enough */ radeon_move_null(bo, new_mem); return 0; } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 595ac638039..9e9826ace30 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -807,11 +807,11 @@ int rv770_mc_init(struct radeon_device *rdev) * AGP so that GPU can catch out of VRAM/AGP access */ if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { - /* Enought place before */ + /* Enough place before */ rdev->mc.vram_location = rdev->mc.gtt_location - rdev->mc.mc_vram_size; } else if (tmp > rdev->mc.mc_vram_size) { - /* Enought place after */ + /* Enough place after */ rdev->mc.vram_location = rdev->mc.gtt_location + rdev->mc.gtt_size; } else { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c70927ecda2..61c5572d2b9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -427,7 +427,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, /* * We need to use vmap to get the desired page protection - * or to make the buffer object look contigous. + * or to make the buffer object look contiguous. */ prot = (mem->placement & TTM_PL_FLAG_CACHED) ? PAGE_KERNEL : diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c index 36718150b47..e845b75ccee 100644 --- a/drivers/hwmon/adm1029.c +++ b/drivers/hwmon/adm1029.c @@ -432,7 +432,7 @@ static int adm1029_remove(struct i2c_client *client) } /* -function that update the status of the chips (temperature for exemple) +function that update the status of the chips (temperature for example) */ static struct adm1029_data *adm1029_update_device(struct device *dev) { diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index fc36cadf36f..c48a284b831 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -928,7 +928,7 @@ static void lm93_update_client_common(struct lm93_data *data, data->prochot_interval = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL); - /* Fan Boost Termperature registers */ + /* Fan Boost Temperature registers */ for (i = 0; i < 4; i++) data->boost[i] = lm93_read_byte(client, LM93_REG_BOOST(i)); diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 2cd00b5b45b..9fd4a0d3206 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -125,7 +125,7 @@ 0 - no debugging messages 1 - some debugging messages, but none during DMA frame transmission 2 - lots of messages, including during DMA frame transmission - (will cause undeflows if your machine is too slow!) + (will cause underflows if your machine is too slow!) */ #define DV1394_DEBUG_LEVEL 0 diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 4bd39c8af80..37d12e5efa4 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c @@ -381,7 +381,7 @@ static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr = #define IPATH_GPIO_SCL \ (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) -/* keep the code below somewhat more readonable; not used elsewhere */ +/* keep the code below somewhat more readable; not used elsewhere */ #define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \ infinipath_hwe_htclnkabyte1crcerr) #define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \ diff --git a/drivers/infiniband/hw/ipath/ipath_sd7220.c b/drivers/infiniband/hw/ipath/ipath_sd7220.c index aa47eb54952..2a68d9f624d 100644 --- a/drivers/infiniband/hw/ipath/ipath_sd7220.c +++ b/drivers/infiniband/hw/ipath/ipath_sd7220.c @@ -614,7 +614,7 @@ static int epb_trans(struct ipath_devdata *dd, u16 reg, u64 i_val, u64 *o_vp) * @wd: Write Data - value to set in register * @mask: ones where data should be spliced into reg. * - * Basic register read/modify/write, with un-needed acesses elided. That is, + * Basic register read/modify/write, with un-needed accesses elided. That is, * a mask of zero will prevent write, while a mask of 0xFF will prevent read. * returns current (presumed, if a write was done) contents of selected * register, or <0 if errors. @@ -989,7 +989,7 @@ static struct rxeq_init { /* Set DFELTHFDR/HDR thresholds */ RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR */ RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */ - /* Set TLTHFDR/HDR theshold */ + /* Set TLTHFDR/HDR threshold */ RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR */ RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR */ /* Set Preamp setting 2 (ZFR/ZCNT) */ diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 219b10397b4..256a00c6aee 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -352,7 +352,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, * anymore, so we do this only if selective signaling is off. * * Further, on 32-bit platforms, we can't use vmap() to make - * the QP buffer virtually contigious. Thus we have to use + * the QP buffer virtually contiguous. Thus we have to use * constant-sized WRs to make sure a WR is always fully within * a single page-sized chunk. * diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index 1c9410d1822..bcc2d30ec24 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -955,7 +955,7 @@ static int __init hp_sdc_init_hppa(struct parisc_device *d) INIT_DELAYED_WORK(&moduleloader_work, request_module_delayed); ret = hp_sdc_init(); - /* after sucessfull initialization give SDC some time to settle + /* after successfull initialization give SDC some time to settle * and then load the hp_sdc_mlc upper layer driver */ if (!ret) schedule_delayed_work(&moduleloader_work, diff --git a/drivers/input/serio/hp_sdc_mlc.c b/drivers/input/serio/hp_sdc_mlc.c index 820e51673b2..7d2b820ef58 100644 --- a/drivers/input/serio/hp_sdc_mlc.c +++ b/drivers/input/serio/hp_sdc_mlc.c @@ -125,7 +125,7 @@ static void hp_sdc_mlc_isr (int irq, void *dev_id, break; default: - printk(KERN_WARNING PREFIX "Unkown HIL Error status (%x)!\n", data); + printk(KERN_WARNING PREFIX "Unknown HIL Error status (%x)!\n", data); break; } diff --git a/drivers/input/touchscreen/atmel-wm97xx.c b/drivers/input/touchscreen/atmel-wm97xx.c index 35377f583e2..a12242f77e2 100644 --- a/drivers/input/touchscreen/atmel-wm97xx.c +++ b/drivers/input/touchscreen/atmel-wm97xx.c @@ -59,7 +59,7 @@ #define ATMEL_WM97XX_AC97C_IRQ (29) #define ATMEL_WM97XX_GPIO_DEFAULT (32+16) /* Pin 16 on port B. */ #else -#error Unkown CPU, this driver only supports AT32AP700X CPUs. +#error Unknown CPU, this driver only supports AT32AP700X CPUs. #endif struct continuous { diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index 8fc3b08deb3..6cdcf2a6e03 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c @@ -14,7 +14,7 @@ * * Notes: * This is a wm97xx extended touch driver to capture touch - * data in a continuous manner on the Intel XScale archictecture + * data in a continuous manner on the Intel XScale architecture * * Features: * - codecs supported:- WM9705, WM9712, WM9713 @@ -131,7 +131,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm) /* When the AC97 queue has been drained we need to allow time * to buffer up samples otherwise we end up spinning polling * for samples. The controller can't have a suitably low - * threashold set to use the notifications it gives. + * threshold set to use the notifications it gives. */ schedule_timeout_uninterruptible(1); diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c index 41e4359c277..eca54dbdf49 100644 --- a/drivers/input/touchscreen/zylonite-wm97xx.c +++ b/drivers/input/touchscreen/zylonite-wm97xx.c @@ -96,7 +96,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm) /* When the AC97 queue has been drained we need to allow time * to buffer up samples otherwise we end up spinning polling * for samples. The controller can't have a suitably low - * threashold set to use the notifications it gives. + * threshold set to use the notifications it gives. */ msleep(1); diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c index 3e6d17f42a9..66b7d7a8647 100644 --- a/drivers/isdn/capi/capidrv.c +++ b/drivers/isdn/capi/capidrv.c @@ -830,7 +830,7 @@ static void handle_controller(_cmsg * cmsg) case 0: break; case 1: s = "unknown class"; break; case 2: s = "unknown function"; break; - default: s = "unkown error"; break; + default: s = "unknown error"; break; } if (s) printk(KERN_INFO "capidrv-%d: %s from controller 0x%x function %d: %s\n", diff --git a/drivers/isdn/hardware/eicon/di.c b/drivers/isdn/hardware/eicon/di.c index b029d130eb2..cb14ae3e715 100644 --- a/drivers/isdn/hardware/eicon/di.c +++ b/drivers/isdn/hardware/eicon/di.c @@ -806,7 +806,7 @@ static void xdi_xlog_request (byte Adapter, byte Id, DELIVERY - indication entered isdn_rc function RNR=... - application had returned RNR=... after the look ahead callback - RNum=0 - aplication had not returned any buffer to copy + RNum=0 - application had not returned any buffer to copy this indication and will copy it self COMPLETE - XDI had copied the data to the buffers provided bu the application and is about to issue the diff --git a/drivers/isdn/hardware/eicon/maintidi.c b/drivers/isdn/hardware/eicon/maintidi.c index 23960cb6eaa..e7cfb3b5647 100644 --- a/drivers/isdn/hardware/eicon/maintidi.c +++ b/drivers/isdn/hardware/eicon/maintidi.c @@ -385,7 +385,7 @@ static int SuperTraceMessageInput (void* hLib) { } break; default: - diva_mnt_internal_dprintf (0, DLI_ERR, "Unknon IDI Ind (DMA mode): %02x", Ind); + diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind (DMA mode): %02x", Ind); } p += (this_ind_length+1); total_length -= (4 + this_ind_length); @@ -420,7 +420,7 @@ static int SuperTraceMessageInput (void* hLib) { } break; default: - diva_mnt_internal_dprintf (0, DLI_ERR, "Unknon IDI Ind: %02x", Ind); + diva_mnt_internal_dprintf (0, DLI_ERR, "Unknown IDI Ind: %02x", Ind); } } } diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index fc46a26cb14..a64bb6c67ba 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -721,7 +721,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol) switch (protocol) { case (-1): /* used for init */ bch->state = -1; - /* fall trough */ + /* fall through */ case (ISDN_P_NONE): if (bch->state == ISDN_P_NONE) return 0; /* already in idle state */ diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h index 43efe7358fa..369196adae0 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.h +++ b/drivers/isdn/hardware/mISDN/hfcsusb.h @@ -150,7 +150,7 @@ symbolic(struct hfcusb_symbolic_list list[], const int num) for (i = 0; list[i].name != NULL; i++) if (list[i].num == num) return list[i].name; - return ""; + return ""; } /* USB descriptor need to contain one of the following EndPoint combination: */ diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index de352a17673..09095c74711 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -860,7 +860,7 @@ isar_pump_statev_modem(struct isar_ch *ch, u8 devt) { pr_debug("%s: pump stev GSTN CLEAR\n", ch->is->name); break; default: - pr_info("u%s: nknown pump stev %x\n", ch->is->name, devt); + pr_info("u%s: unknown pump stev %x\n", ch->is->name, devt); break; } } diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index 9de54202c90..ad5831f37d8 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -1086,7 +1086,7 @@ hfc_usb_l2l1(struct hisax_if *my_hisax_if, int pr, void *arg) break; default: DBG(HFCUSB_DBG_STATES, - "HFC_USB: hfc_usb_d_l2l1: unkown state : %#x", pr); + "HFC_USB: hfc_usb_d_l2l1: unknown state : %#x", pr); break; } } diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index 2d14b64202a..0f4ea7d16a1 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c @@ -836,7 +836,7 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count) unsigned short hl; struct sk_buff *skb; /* - * we need to reserve enought space in front of + * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want */ @@ -1326,7 +1326,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev) struct sk_buff *new_skb; unsigned short hl; /* - * we need to reserve enought space in front of + * we need to reserve enough space in front of * sk_buff. old call to dev_alloc_skb only reserved * 16 bytes, now we are looking what the driver want. */ @@ -1685,7 +1685,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, * * try to accomplish several tasks: * - reassemble any complete fragment sequence (non-null 'start' - * indicates there is a continguous sequence present) + * indicates there is a contiguous sequence present) * - discard any incomplete sequences that are below minseq -- due * to the fact that sender always increment sequence number, if there * is an incomplete sequence below minseq, no new fragments would diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index 78f7660c1d0..4c41f191d4e 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c @@ -470,7 +470,7 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) } return 0; } - /* BADMUL=value - dummy 0=disable errorchk disabled (treshold multiplier) */ + /* BADMUL=value - dummy 0=disable errorchk disabled (threshold multiplier) */ if (!strncmp(p[0], "BADMUL", 6)) { p[0] += 6; switch (*p[0]) { diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 77ee2867c8b..43ff4d3b046 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -110,7 +110,7 @@ * crossconnections and conferences via software if not possible through * hardware. If hardware capability is available, hardware is used. * - * Echo: Is generated by CMX and is used to check performane of hard and + * Echo: Is generated by CMX and is used to check performance of hard and * software CMX. * * The CMX has special functions for conferences with one, two and more diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c index e04bad6c5ba..6d4da609588 100644 --- a/drivers/isdn/mISDN/tei.c +++ b/drivers/isdn/mISDN/tei.c @@ -725,7 +725,7 @@ tei_id_ver_tout_net(struct FsmInst *fi, int event, void *arg) if (tm->rcnt == 1) { if (*debug & DEBUG_L2_TEI) tm->tei_m.printdebug(fi, - "check req for tei %d sucessful\n", tm->l2->tei); + "check req for tei %d successful\n", tm->l2->tei); mISDN_FsmChangeState(fi, ST_TEI_NOP); } else if (tm->rcnt > 1) { /* duplicate assignment; remove */ diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index 8b9364434aa..3fbe41b0ac0 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -15,7 +15,7 @@ * * WARNING: This driver has only been testen on Apple's * 1.25 MHz Dual G4 (March 03). It is tuned for a CPU - * temperatur around 57 C. + * temperature around 57 C. * * Copyright (C) 2003, 2004 Samuel Rydh (samuel@ibrium.se) * diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c index 7e8f5681599..48cb154c7a4 100644 --- a/drivers/media/common/saa7146_i2c.c +++ b/drivers/media/common/saa7146_i2c.c @@ -98,7 +98,7 @@ static int saa7146_i2c_msg_cleanup(const struct i2c_msg *m, int num, __le32 *op) op_count++; - /* loop throgh all bytes of message i */ + /* loop through all bytes of message i */ for(j = 0; j < m[i].len; j++) { /* write back all bytes that could have been read */ m[i].buf[j] = (le32_to_cpu(op[op_count/3]) >> ((3-(op_count%3))*8)); diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h index 810f07d6324..52e4ce4304e 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.h +++ b/drivers/media/dvb/dvb-core/dvb_frontend.h @@ -160,7 +160,7 @@ struct tuner_state { * search callback possible return status * * DVBFE_ALGO_SEARCH_SUCCESS - * The frontend search algorithm completed and returned succesfully + * The frontend search algorithm completed and returned successfully * * DVBFE_ALGO_SEARCH_ASLEEP * The frontend search algorithm is sleeping diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c index 2ae7f648eff..bb69f3719f9 100644 --- a/drivers/media/dvb/dvb-usb/anysee.c +++ b/drivers/media/dvb/dvb-usb/anysee.c @@ -344,7 +344,7 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap) if (ret) return ret; - err("Unkown Anysee version: %02x %02x %02x. "\ + err("Unknown Anysee version: %02x %02x %02x. "\ "Please report the .", hw_info[0], hw_info[1], hw_info[2]); diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c index eeef50bff4f..5c0126dc1ff 100644 --- a/drivers/media/dvb/dvb-usb/dibusb-mb.c +++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c @@ -242,7 +242,7 @@ static struct dvb_usb_device_properties dibusb1_1_properties = { { &dibusb_dib3000mb_table[9], &dibusb_dib3000mb_table[11], NULL }, { &dibusb_dib3000mb_table[10], &dibusb_dib3000mb_table[12], NULL }, }, - { "Unkown USB1.1 DVB-T device ???? please report the name to the author", + { "Unknown USB1.1 DVB-T device ???? please report the name to the author", { &dibusb_dib3000mb_table[13], NULL }, { &dibusb_dib3000mb_table[14], NULL }, }, diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c index edde87c6aa3..6b5ded9e7d5 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c @@ -259,7 +259,7 @@ int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *d, *state = REMOTE_KEY_REPEAT; break; default: - deb_err("unkown type of remote status: %d\n",keybuf[0]); + deb_err("unknown type of remote status: %d\n",keybuf[0]); break; } return 0; diff --git a/drivers/media/dvb/dvb-usb/usb-urb.c b/drivers/media/dvb/dvb-usb/usb-urb.c index 9da2cc95ca1..f9702e3756b 100644 --- a/drivers/media/dvb/dvb-usb/usb-urb.c +++ b/drivers/media/dvb/dvb-usb/usb-urb.c @@ -56,7 +56,7 @@ static void usb_urb_complete(struct urb *urb) stream->complete(stream, b, urb->actual_length); break; default: - err("unkown endpoint type in completition handler."); + err("unknown endpoint type in completition handler."); return; } usb_submit_urb(urb,GFP_ATOMIC); @@ -228,7 +228,7 @@ int usb_urb_init(struct usb_data_stream *stream, struct usb_data_stream_properti case USB_ISOC: return usb_isoc_urb_init(stream); default: - err("unkown URB-type for data transfer."); + err("unknown URB-type for data transfer."); return -EINVAL; } } diff --git a/drivers/media/dvb/frontends/au8522_decoder.c b/drivers/media/dvb/frontends/au8522_decoder.c index 74981ee923c..7c6431fe33e 100644 --- a/drivers/media/dvb/frontends/au8522_decoder.c +++ b/drivers/media/dvb/frontends/au8522_decoder.c @@ -315,7 +315,7 @@ static void setup_decoder_defaults(struct au8522_state *state, u8 input_mode) if (input_mode == AU8522_INPUT_CONTROL_REG081H_SVIDEO_CH13 || input_mode == AU8522_INPUT_CONTROL_REG081H_SVIDEO_CH24) { /* Despite what the table says, for the HVR-950q we still need - to be in CVBS mode for the S-Video input (reason uknown). */ + to be in CVBS mode for the S-Video input (reason unknown). */ /* filter_coef_type = 3; */ filter_coef_type = 5; } else { diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c index ffbcfabd83f..00a4e8f0330 100644 --- a/drivers/media/dvb/frontends/cx24110.c +++ b/drivers/media/dvb/frontends/cx24110.c @@ -1,4 +1,4 @@ - /* +/* cx24110 - Single Chip Satellite Channel Receiver driver module Copyright (C) 2002 Peter Hettkamp based on @@ -96,7 +96,7 @@ static struct {u8 reg; u8 data;} cx24110_regdata[]= {0x42,0x00}, /* @ middle bytes " */ {0x43,0x00}, /* @ LSB " */ /* leave the carrier tracking loop parameters on default */ - /* leave the bit timing loop parameters at gefault */ + /* leave the bit timing loop parameters at default */ {0x56,0x4d}, /* set the filtune voltage to 2.7V, as recommended by */ /* the cx24108 data sheet for symbol rates above 15MS/s */ {0x57,0x00}, /* @ Filter sigma delta enabled, positive */ diff --git a/drivers/media/dvb/frontends/cx24113.c b/drivers/media/dvb/frontends/cx24113.c index 075b2b57cf0..e9ee55592fd 100644 --- a/drivers/media/dvb/frontends/cx24113.c +++ b/drivers/media/dvb/frontends/cx24113.c @@ -589,7 +589,7 @@ struct dvb_frontend *cx24113_attach(struct dvb_frontend *fe, info("detected CX24113 variant\n"); break; case REV_CX24113: - info("sucessfully detected\n"); + info("successfully detected\n"); break; default: err("unsupported device id: %x\n", state->rev); diff --git a/drivers/media/dvb/frontends/dib3000mb.c b/drivers/media/dvb/frontends/dib3000mb.c index 136b9d2164d..ad4c8cfd809 100644 --- a/drivers/media/dvb/frontends/dib3000mb.c +++ b/drivers/media/dvb/frontends/dib3000mb.c @@ -154,7 +154,7 @@ static int dib3000mb_set_frontend(struct dvb_frontend* fe, case BANDWIDTH_AUTO: return -EOPNOTSUPP; default: - err("unkown bandwidth value."); + err("unknown bandwidth value."); return -EINVAL; } } diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c index 056387b41a8..43971e63baa 100644 --- a/drivers/media/dvb/frontends/lgdt330x.c +++ b/drivers/media/dvb/frontends/lgdt330x.c @@ -479,7 +479,7 @@ static int lgdt3302_read_status(struct dvb_frontend* fe, fe_status_t* status) switch (state->current_modulation) { case QAM_256: case QAM_64: - /* Need to undestand why there are 3 lock levels here */ + /* Need to understand why there are 3 lock levels here */ if ((buf[0] & 0x07) == 0x07) *status |= FE_HAS_CARRIER; break; @@ -520,7 +520,7 @@ static int lgdt3303_read_status(struct dvb_frontend* fe, fe_status_t* status) switch (state->current_modulation) { case QAM_256: case QAM_64: - /* Need to undestand why there are 3 lock levels here */ + /* Need to understand why there are 3 lock levels here */ if ((buf[0] & 0x07) == 0x07) *status |= FE_HAS_CARRIER; else diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c index a04c782fff8..1570669837e 100644 --- a/drivers/media/dvb/frontends/stb0899_drv.c +++ b/drivers/media/dvb/frontends/stb0899_drv.c @@ -1571,7 +1571,7 @@ static enum dvbfe_search stb0899_search(struct dvb_frontend *fe, struct dvb_fron * stb0899_track * periodically check the signal level against a specified * threshold level and perform derotator centering. - * called once we have a lock from a succesful search + * called once we have a lock from a successful search * event. * * Will be called periodically called to maintain the diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index 8d65c652ba5..baf3159a3aa 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -2425,7 +2425,7 @@ static int __devinit av7110_attach(struct saa7146_dev* dev, * use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called */ saa7146_write(dev, EC1SSR, (0x03<<2) | 3 ); - /* set event counter 1 treshold to maximum allowed value (rEC p55) */ + /* set event counter 1 threshold to maximum allowed value (rEC p55) */ saa7146_write(dev, ECT1R, 0x3fff ); #endif /* Set RPS1 Address register to point to RPS code (r108 p42) */ @@ -2559,7 +2559,7 @@ static int __devinit av7110_attach(struct saa7146_dev* dev, * use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called */ saa7146_write(dev, EC1SSR, (0x03<<2) | 3 ); - /* set event counter 1 treshold to maximum allowed value (rEC p55) */ + /* set event counter 1 threshold to maximum allowed value (rEC p55) */ saa7146_write(dev, ECT1R, 0x3fff ); #endif /* Setup BUDGETPATCH MAIN RPS1 "program" (p35) */ diff --git a/drivers/media/dvb/ttpci/budget-patch.c b/drivers/media/dvb/ttpci/budget-patch.c index 60136688a9a..9c92f9ddd22 100644 --- a/drivers/media/dvb/ttpci/budget-patch.c +++ b/drivers/media/dvb/ttpci/budget-patch.c @@ -456,7 +456,7 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte // use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled // use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called saa7146_write(dev, EC1SSR, (0x03<<2) | 3 ); - // set event counter 1 treshold to maximum allowed value (rEC p55) + // set event counter 1 threshold to maximum allowed value (rEC p55) saa7146_write(dev, ECT1R, 0x3fff ); #endif // Fix VSYNC level diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index a1239083472..5f79acb56e4 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c @@ -28,7 +28,7 @@ * http://av-usbradio.sourceforge.net/index.php * http://sourceforge.net/projects/av-usbradio/ * Latest release of theirs project was in 2005. - * Probably, this driver could be improved trough using their + * Probably, this driver could be improved through using their * achievements (specifications given). * Also, Faidon Liambotis wrote nice driver for this radio * in 2007. He allowed to use his driver to improve current mr800 radio driver. diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c index 28f48f41f21..c2174413ab2 100644 --- a/drivers/media/video/cx231xx/cx231xx-avcore.c +++ b/drivers/media/video/cx231xx/cx231xx-avcore.c @@ -2414,9 +2414,11 @@ int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev) cx231xx_info("No ACK after %d msec -GPIO I2C failed!", nInit * 10); - /* readAck - throuth clock stretch ,slave has given a SCL signal, - so the SDA data can be directly read. */ + /* + * readAck + * through clock stretch, slave has given a SCL signal, + * so the SDA data can be directly read. + */ status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, (u8 *)&dev->gpio_val); if ((dev->gpio_val & 1 << dev->board.tuner_sda_gpio) == 0) { diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c index 45e13ee66dc..16c6a921f40 100644 --- a/drivers/media/video/cx23885/cx23885-dvb.c +++ b/drivers/media/video/cx23885/cx23885-dvb.c @@ -940,7 +940,7 @@ int cx23885_dvb_register(struct cx23885_tsport *port) int err, i; /* Here we need to allocate the correct number of frontends, - * as reflected in the cards struct. The reality is that currrently + * as reflected in the cards struct. The reality is that currently * no cx23885 boards support this - yet. But, if we don't modify this * code then the second frontend would never be allocated (later) * and fail with error before the attach in dvb_register(). diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c index cf634606ba9..b35411160f0 100644 --- a/drivers/media/video/cx88/cx88-core.c +++ b/drivers/media/video/cx88/cx88-core.c @@ -624,7 +624,7 @@ int cx88_reset(struct cx88_core *core) /* setup image format */ cx_andor(MO_COLOR_CTRL, 0x4000, 0x4000); - /* setup FIFO Threshholds */ + /* setup FIFO Thresholds */ cx_write(MO_PDMA_STHRSH, 0x0807); cx_write(MO_PDMA_DTHRSH, 0x0807); diff --git a/drivers/media/video/davinci/dm355_ccdc.c b/drivers/media/video/davinci/dm355_ccdc.c index 56fbefe036a..31439001637 100644 --- a/drivers/media/video/davinci/dm355_ccdc.c +++ b/drivers/media/video/davinci/dm355_ccdc.c @@ -285,7 +285,7 @@ static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam) if ((ccdcparam->med_filt_thres < 0) || (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) { - dev_dbg(dev, "Invalid value of median filter thresold\n"); + dev_dbg(dev, "Invalid value of median filter threshold\n"); return -EINVAL; } diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c index 6d709ca8cfb..453236bd755 100644 --- a/drivers/media/video/davinci/vpss.c +++ b/drivers/media/video/davinci/vpss.c @@ -53,7 +53,7 @@ struct vpss_hw_ops { int (*enable_clock)(enum vpss_clock_sel clock_sel, int en); /* select input to ccdc */ void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel); - /* clear wbl overlflow bit */ + /* clear wbl overflow bit */ int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel); }; diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c index cf3af8de6e9..e39efb45fa1 100644 --- a/drivers/media/video/gspca/sonixb.c +++ b/drivers/media/video/gspca/sonixb.c @@ -304,7 +304,7 @@ static const __u8 initOv6650[] = { }; static const __u8 ov6650_sensor_init[][8] = { - /* Bright, contrast, etc are set througth SCBB interface. + /* Bright, contrast, etc are set through SCBB interface. * AVCAP on win2 do not send any data on this controls. */ /* Anyway, some registers appears to alter bright and constrat */ diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c index fab7ef85a6c..7dbd5eea6cc 100644 --- a/drivers/media/video/gspca/spca500.c +++ b/drivers/media/video/gspca/spca500.c @@ -589,7 +589,7 @@ static void spca500_reinit(struct gspca_dev *gspca_dev) int err; __u8 Data; - /* some unknow command from Aiptek pocket dv and family300 */ + /* some unknown command from Aiptek pocket dv and family300 */ reg_w(gspca_dev, 0x00, 0x0d01, 0x01); reg_w(gspca_dev, 0x00, 0x0d03, 0x00); diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c index b74a34218da..66f9f005614 100644 --- a/drivers/media/video/gspca/spca501.c +++ b/drivers/media/video/gspca/spca501.c @@ -1636,7 +1636,7 @@ static const __u16 spca501c_arowana_init_data[][3] = { {} }; -/* Unknow camera from Ori Usbid 0x0000:0x0000 */ +/* Unknown camera from Ori Usbid 0x0000:0x0000 */ /* Based on snoops from Ori Cohen */ static const __u16 spca501c_mysterious_open_data[][3] = { {0x02, 0x000f, 0x0005}, @@ -1945,7 +1945,7 @@ static int sd_init(struct gspca_dev *gspca_dev) goto error; break; case MystFromOriUnknownCamera: - /* UnKnow Ori CMOS Camera data */ + /* Unknown Ori CMOS Camera data */ if (write_vector(gspca_dev, spca501c_mysterious_open_data)) goto error; break; @@ -1978,7 +1978,7 @@ static int sd_start(struct gspca_dev *gspca_dev) write_vector(gspca_dev, spca501c_arowana_open_data); break; case MystFromOriUnknownCamera: - /* UnKnow CMOS Camera data */ + /* Unknown CMOS Camera data */ write_vector(gspca_dev, spca501c_mysterious_init_data); break; default: diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c index aa8f995ce04..1a9af2ebdbe 100644 --- a/drivers/media/video/gspca/sunplus.c +++ b/drivers/media/video/gspca/sunplus.c @@ -631,7 +631,7 @@ static void spca504A_acknowledged_command(struct gspca_dev *gspca_dev, count = 200; while (--count > 0) { msleep(10); - /* gsmart mini2 write a each wait setting 1 ms is enought */ + /* gsmart mini2 write a each wait setting 1 ms is enough */ /* reg_w_riv(dev, req, idx, val); */ status = reg_r_12(gspca_dev, 0x01, 0x0001, 1); if (status == endcode) { diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c index cdf3357b4c9..49c3c1226e0 100644 --- a/drivers/media/video/gspca/zc3xx.c +++ b/drivers/media/video/gspca/zc3xx.c @@ -7065,7 +7065,7 @@ static int sd_config(struct gspca_dev *gspca_dev, break; default: PDEBUG(D_PROBE, - "Sensor UNKNOW_0 force Tas5130"); + "Sensor UNKNOWN_0 force Tas5130"); sd->sensor = SENSOR_TAS5130CXX; } break; diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h index 5b152ff20bd..5fcad28211d 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h @@ -332,7 +332,7 @@ struct pvr2_hdw { /* Bit mask of PVR2_CVAL_INPUT choices which are valid for the hardware */ unsigned int input_avail_mask; - /* Bit mask of PVR2_CVAL_INPUT choices which are currenly allowed */ + /* Bit mask of PVR2_CVAL_INPUT choices which are currently allowed */ unsigned int input_allowed_mask; /* Location of eeprom or a negative number if none */ diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index 9e3262c0ba3..a4c84368eb1 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c @@ -1985,7 +1985,7 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info) wake_up(&dev->fw_data->wait_fw); break; default: - printk(KERN_INFO "s2255 unknwn resp\n"); + printk(KERN_INFO "s2255 unknown resp\n"); } default: pdata++; diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h index d439c76b27e..cb1de7ea197 100644 --- a/drivers/media/video/zoran/zoran.h +++ b/drivers/media/video/zoran/zoran.h @@ -106,7 +106,7 @@ struct zoran_params { unsigned long jpeg_markers; /* Which markers should go into the JPEG output. * Unless you exactly know what you do, leave them untouched. * Inluding less markers will make the resulting code - * smaller, but there will be fewer aplications + * smaller, but there will be fewer applications * which can read it. * The presence of the APP and COM marker is * influenced by APP0_len and COM_len ONLY! */ diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index d505b68cd37..e39986a7827 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -940,7 +940,7 @@ static const struct block_device_operations i2o_block_fops = { * Allocate memory for the i2o_block_device struct, gendisk and request * queue and initialize them as far as no additional information is needed. * - * Returns a pointer to the allocated I2O Block device on succes or a + * Returns a pointer to the allocated I2O Block device on success or a * negative error code on failure. */ static struct i2o_block_device *i2o_block_device_alloc(void) diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 27cf4af0e13..e5ab6214150 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c @@ -132,7 +132,7 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) * Removes a previously added pointer from the context list and returns * the matching context id. * - * Returns context id on succes or 0 on failure. + * Returns context id on success or 0 on failure. */ u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr) { @@ -198,7 +198,7 @@ void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) * @c: controller to which the context list belong * @ptr: pointer to which the context id should be fetched * - * Returns context id which matches to the pointer on succes or 0 on + * Returns context id which matches to the pointer on success or 0 on * failure. */ u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr) diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 41c8fe2a928..ce5eda985ab 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -92,7 +92,7 @@ static void gru_vma_close(struct vm_area_struct *vma) /* * gru_file_mmap * - * Called when mmaping the device. Initializes the vma with a fault handler + * Called when mmapping the device. Initializes the vma with a fault handler * and private data structure necessary to allocate, track, and free the * underlying pages. */ diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 99b74a35102..941a4d35ef8 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1360,7 +1360,7 @@ static struct mmc_host_ops s3cmci_ops = { static struct s3c24xx_mci_pdata s3cmci_def_pdata = { /* This is currently here to avoid a number of if (host->pdata) - * checks. Any zero fields to ensure reaonable defaults are picked. */ + * checks. Any zero fields to ensure reasonable defaults are picked. */ }; #ifdef CONFIG_CPU_FREQ diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 3aa05cd18ea..592016a0668 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -18,7 +18,7 @@ to specify the offset instead of the absolute address NOTE: - With slram it's only possible to map a contigous memory region. Therfore + With slram it's only possible to map a contiguous memory region. Therefore if there's a device mapped somewhere in the region specified slram will fail to load (see kernel log if modprobe fails). diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index e51c1ed7ac1..b126cf88747 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -1056,7 +1056,7 @@ static struct nand_ecclayout doc200x_oobinfo = { }; /* Find the (I)NFTL Media Header, and optionally also the mirror media header. - On sucessful return, buf will contain a copy of the media header for + On successful return, buf will contain a copy of the media header for further processing. id is the string to scan for, and will presumably be either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media header. The page #s of the found media headers are placed in mh0_page and diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index db7ae9d6a29..92320a64327 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c @@ -475,7 +475,7 @@ int __nand_correct_data(unsigned char *buf, * * The b2 shift is there to get rid of the lowest two bits. * We could also do addressbits[b2] >> 1 but for the - * performace it does not make any difference + * performance it does not make any difference */ if (eccsize_mult == 1) byte_addr = (addressbits[b1] << 4) + addressbits[b0]; diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 11dc7e69c4f..68b5b3a486a 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -875,7 +875,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, * @info: The controller instance. * @nmtd: The driver version of the MTD instance. * - * This routine is called after the chip probe has succesfully completed + * This routine is called after the chip probe has successfully completed * and the relevant per-chip information updated. This call ensure that * we update the internal state accordingly. * diff --git a/drivers/net/82596.c b/drivers/net/82596.c index ea6b139b812..1663bc9e45d 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -19,7 +19,7 @@ TBD: * look at deferring rx frames rather than discarding (as per tulip) * handle tx ring full as per tulip - * performace test to tune rx_copybreak + * performance test to tune rx_copybreak Most of my modifications relate to the braindead big-endian implementation by Intel. When the i596 is operating in diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index 4e6359fff0e..766aabfdfc7 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -1633,8 +1633,13 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp) readl(lp->mmio + CMD7); return 0; } -/* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */ +/* + * This function is called when a packet transmission fails to complete + * within a reasonable period, on the assumption that an interrupt have + * failed or the interface is locked up. This function will reinitialize + * the hardware. + */ static void amd8111e_tx_timeout(struct net_device *dev) { struct amd8111e_priv* lp = netdev_priv(dev); diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index b5dc7f55072..9d828aed968 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -120,7 +120,7 @@ static int irq = 5; /* Default IRQ */ * DAYNA driver mode: * Dayna DL2000/DaynaTalk PC (Half Length), COPS LT-95, * Farallon PhoneNET PC III, Farallon PhoneNET PC II - * Other cards possibly supported mode unkown though: + * Other cards possibly supported mode unknown though: * Dayna DL2000 (Full length), COPS LT/M (Micro-Channel) * * Cards NOT supported by this driver but supported by the ltpc.c diff --git a/drivers/net/ariadne.h b/drivers/net/ariadne.h index bb613f292e0..727be5cdd1e 100644 --- a/drivers/net/ariadne.h +++ b/drivers/net/ariadne.h @@ -244,7 +244,7 @@ struct Am79C960 { #define DLNKTST 0x0010 /* Disable Link Status */ #define DAPC 0x0008 /* Disable Automatic Polarity Correction */ #define MENDECL 0x0004 /* MENDEC Loopback Mode */ -#define LRTTSEL 0x0002 /* Low Receive Treshold/Transmit Mode Select */ +#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */ #define PORTSEL1 0x0001 /* Port Select Bits */ #define PORTSEL2 0x8000 /* Port Select Bits */ #define INTL 0x4000 /* Internal Loopback */ diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 1372e9a99f5..96506eacc13 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -1543,7 +1543,7 @@ static irqreturn_t atl1c_intr(int irq, void *data) if (status & ISR_OVER) if (netif_msg_intr(adapter)) dev_warn(&pdev->dev, - "TX/RX over flow (status = 0x%x)\n", + "TX/RX overflow (status = 0x%x)\n", status & ISR_OVER); /* link event */ diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 49953787e41..f0bb62b5ca9 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -435,7 +435,7 @@ enum be_if_flags { * filtering capabilities. */ struct be_cmd_req_if_create { struct be_cmd_req_hdr hdr; - u32 version; /* ignore currntly */ + u32 version; /* ignore currently */ u32 capability_flags; u32 enable_flags; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 1f941f02771..02a0908707e 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1876,7 +1876,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) goto fw_exit; } - dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n"); + dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); fw_exit: release_firmware(fw); diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index aa76cbada5e..732eafdeb0f 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h @@ -2536,7 +2536,7 @@ /* [RC 1] A flag to indicate that overflow error occurred in one of the queues. */ #define QM_REG_OVFERROR 0x16805c -/* [RC 7] the Q were the qverflow occurs */ +/* [RC 7] the Q where the overflow occurs */ #define QM_REG_OVFQNUM 0x168058 /* [R 16] Pause state for physical queues 15-0 */ #define QM_REG_PAUSESTATE0 0x168410 diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index f86612857a7..56ba872be9c 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -1285,7 +1285,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) /* * We do not use Tx completion interrupts to free DMAd Tx packets. - * This is good for performamce but means that we rely on new Tx + * This is good for performance but means that we rely on new Tx * packets arriving to run the destructors of completed packets, * which open up space in their sockets' send queues. Sometimes * we do not get such new packets causing Tx to stall. A single diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index d7688522336..75b099ce49c 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -118,7 +118,7 @@ doit: ret = ehea_set_portspeed(port, sp); if (!ret) - ehea_info("%s: Port speed succesfully set: %dMbps " + ehea_info("%s: Port speed successfully set: %dMbps " "%s Duplex", port->netdev->name, port->port_speed, port->full_duplex == 1 ? "Full" : "Half"); @@ -134,7 +134,7 @@ static int ehea_nway_reset(struct net_device *dev) ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); if (!ret) - ehea_info("%s: Port speed succesfully set: %dMbps " + ehea_info("%s: Port speed successfully set: %dMbps " "%s Duplex", port->netdev->name, port->port_speed, port->full_duplex == 1 ? "Full" : "Half"); diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index ed60fd66427..0cab992b3d1 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -35,7 +35,7 @@ * driver only supports standard serial hardware (8250, 16450, 16550A) * * This modem usually draws its supply current out of the otherwise unused - * TXD pin of the serial port. Thus a contignuous stream of 0x00-bytes + * TXD pin of the serial port. Thus a contiguous stream of 0x00-bytes * is transmitted to achieve a positive supply voltage. * * hsk: This is a 4800 baud FSK modem, designed for TNC use. It works fine diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index aa7286bc436..8739ba850f8 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -1384,7 +1384,7 @@ static inline void veth_build_dma_list(struct dma_chunk *list, unsigned long done; int i = 1; - /* FIXME: skbs are continguous in real addresses. Do we + /* FIXME: skbs are contiguous in real addresses. Do we * really need to break it into PAGE_SIZE chunks, or can we do * it just at the granularity of iSeries real->absolute * mapping? Indeed, given the way the allocator works, can we diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c index a0c578585a5..b77238dbafb 100644 --- a/drivers/net/lasi_82596.c +++ b/drivers/net/lasi_82596.c @@ -47,7 +47,7 @@ TBD: * look at deferring rx frames rather than discarding (as per tulip) * handle tx ring full as per tulip - * performace test to tune rx_copybreak + * performance test to tune rx_copybreak Most of my modifications relate to the braindead big-endian implementation by Intel. When the i596 is operating in diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 51e11c3e53e..c0dbfc185b5 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -47,7 +47,7 @@ TBD: * look at deferring rx frames rather than discarding (as per tulip) * handle tx ring full as per tulip - * performace test to tune rx_copybreak + * performance test to tune rx_copybreak Most of my modifications relate to the braindead big-endian implementation by Intel. When the i596 is operating in diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 03b781a7a18..829b9ec9ff6 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c @@ -204,7 +204,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv, en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); dma = be64_to_cpu(rx_desc->data[nr].addr); - en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); + en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma); pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, PCI_DMA_FROMDEVICE); put_page(skb_frags[nr].page); diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 8c7279965b4..3d1396af946 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c @@ -47,7 +47,7 @@ enum { static int inline_thold __read_mostly = MAX_INLINE; module_param_named(inline_thold, inline_thold, int, 0444); -MODULE_PARM_DESC(inline_thold, "treshold for using inline data"); +MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 size, diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index 4376147b0ea..82c3ebc584e 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h @@ -162,7 +162,7 @@ enum { #define MLX4_EN_DEF_RX_PAUSE 1 #define MLX4_EN_DEF_TX_PAUSE 1 -/* Interval between sucessive polls in the Tx routine when polling is used +/* Interval between successive polls in the Tx routine when polling is used instead of interrupts (in per-core Tx rings) - should be power of 2 */ #define MLX4_EN_TX_POLL_MODER 16 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4) diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index b211613e9db..86fde1a90a5 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -296,7 +296,7 @@ static void gelic_card_reset_chain(struct gelic_card *card, * @card: card structure * @descr: descriptor to re-init * - * return 0 on succes, <0 on failure + * return 0 on success, <0 on failure * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index c072f7f36ac..9d94a141555 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -1760,7 +1760,7 @@ static int sis900_rx(struct net_device *net_dev) sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - /* refill the Rx buffer, what if there is not enought + /* refill the Rx buffer, what if there is not enough * memory for new socket buffer ?? */ if ((skb = dev_alloc_skb(RX_BUF_SIZE)) == NULL) { /* @@ -1775,7 +1775,7 @@ static int sis900_rx(struct net_device *net_dev) } /* This situation should never happen, but due to - some unknow bugs, it is possible that + some unknown bugs, it is possible that we are working on NULL sk_buff :-( */ if (sis_priv->rx_skbuff[entry] == NULL) { if (netif_msg_rx_err(sis_priv)) diff --git a/drivers/net/skfp/h/smc.h b/drivers/net/skfp/h/smc.h index 1758d954836..026a83b9f74 100644 --- a/drivers/net/skfp/h/smc.h +++ b/drivers/net/skfp/h/smc.h @@ -393,10 +393,10 @@ struct smt_config { */ u_long mac_d_max ; /* MAC : D_Max timer value */ - u_long lct_short ; /* LCT : error threshhold */ - u_long lct_medium ; /* LCT : error threshhold */ - u_long lct_long ; /* LCT : error threshhold */ - u_long lct_extended ; /* LCT : error threshhold */ + u_long lct_short ; /* LCT : error threshold */ + u_long lct_medium ; /* LCT : error threshold */ + u_long lct_long ; /* LCT : error threshold */ + u_long lct_extended ; /* LCT : error threshold */ } ; #ifdef DEBUG diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index b27156eaf26..db216a72850 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -1002,7 +1002,7 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } break; default: - printk("ioctl for %s: unknow cmd: %04x\n", dev->name, ioc.cmd); + printk("ioctl for %s: unknown cmd: %04x\n", dev->name, ioc.cmd); status = -EOPNOTSUPP; } // switch diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index ccdd196f529..4a00940d0a5 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -816,7 +816,7 @@ static int smsc911x_mii_probe(struct net_device *dev) SMSC_TRACE(HW, "Passed Loop Back Test"); #endif /* USE_PHY_WORK_AROUND */ - SMSC_TRACE(HW, "phy initialised succesfully"); + SMSC_TRACE(HW, "phy initialised successfully"); return 0; } diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h index b5716bd8a59..016360c65ce 100644 --- a/drivers/net/smsc911x.h +++ b/drivers/net/smsc911x.h @@ -30,7 +30,7 @@ #define SMSC_NAPI_WEIGHT 16 /* implements a PHY loopback test at initialisation time, to ensure a packet - * can be succesfully looped back */ + * can be successfully looped back */ #define USE_PHY_WORK_AROUND #define DPRINTK(nlevel, klevel, fmt, args...) \ diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 90e663f4515..40b51e6bc77 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -409,7 +409,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) * @card: card structure * @descr: descriptor to re-init * - * Return 0 on succes, <0 on failure. + * Return 0 on success, <0 on failure. * * Allocates a new rx skb, iommu-maps it and attaches it to the * descriptor. Mark the descriptor as activated, ready-to-use. diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c index b624bb5bae0..52586ee6895 100644 --- a/drivers/net/stmmac/gmac.c +++ b/drivers/net/stmmac/gmac.c @@ -112,7 +112,7 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, " (threshold = %d)\n", txmode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; - /* Set the transmit threashold */ + /* Set the transmit threshold */ if (txmode <= 32) csr6 |= DMA_CONTROL_TTC_32; else if (txmode <= 64) diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h index 684a363120a..2e82d6c9a14 100644 --- a/drivers/net/stmmac/gmac.h +++ b/drivers/net/stmmac/gmac.h @@ -154,14 +154,14 @@ enum rx_tx_priority_ratio { #define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */ #define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ #define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ -/* Theshold for Activating the FC */ +/* Threshold for Activating the FC */ enum rfa { act_full_minus_1 = 0x00800000, act_full_minus_2 = 0x00800200, act_full_minus_3 = 0x00800400, act_full_minus_4 = 0x00800600, }; -/* Theshold for Deactivating the FC */ +/* Threshold for Deactivating the FC */ enum rfd { deac_full_minus_1 = 0x00400000, deac_full_minus_2 = 0x00400800, diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c index ebda61bc4c2..78e12b5e3ac 100644 --- a/drivers/net/tokenring/smctr.c +++ b/drivers/net/tokenring/smctr.c @@ -426,7 +426,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev) smctr_malloc(dev, 1L); /* Allocate Non-MAC receive data buffers. - * To guarantee a minimum of 256 contigous memory to + * To guarantee a minimum of 256 contiguous memory to * UM_Receive_Packet's lookahead pointer, before a page * change or ring end is encountered, place each rx buffer on * a 256 byte boundary. diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 4469f2451a6..5e9adbaf674 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -3798,7 +3798,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma prop = of_get_property(np, "tx-clock", NULL); if (!prop) { printk(KERN_ERR - "ucc_geth: mising tx-clock-name property\n"); + "ucc_geth: missing tx-clock-name property\n"); return -EINVAL; } if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 03a6ca016d5..a007e2acf65 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -80,16 +80,16 @@ struct ucc_geth { frames) received that were between 128 (Including FCS length==4) and 255 octets */ u32 txok; /* Total number of octets residing in frames - that where involved in succesfull + that where involved in successfull transmission */ u16 txcf; /* Total number of PAUSE control frames transmitted by this MAC */ u8 res4[0x2]; u32 tmca; /* Total number of frames that were transmitted - succesfully with the group address bit set + successfully with the group address bit set that are not broadcast frames */ u32 tbca; /* Total number of frames transmitted - succesfully that had destination address + successfully that had destination address field equal to the broadcast address */ u32 rxfok; /* Total number of frames received OK */ u32 rxbok; /* Total number of octets received OK */ @@ -98,9 +98,9 @@ struct ucc_geth { HW because it includes octets in frames that never even reach the UCC */ u32 rmca; /* Total number of frames that were received - succesfully with the group address bit set + successfully with the group address bit set that are not broadcast frames */ - u32 rbca; /* Total number of frames received succesfully + u32 rbca; /* Total number of frames received successfully that had destination address equal to the broadcast address */ u32 scar; /* Statistics carry register */ @@ -759,15 +759,15 @@ struct ucc_geth_hardware_statistics { frames) received that were between 128 (Including FCS length==4) and 255 octets */ u32 txok; /* Total number of octets residing in frames - that where involved in succesfull + that where involved in successfull transmission */ u16 txcf; /* Total number of PAUSE control frames transmitted by this MAC */ u32 tmca; /* Total number of frames that were transmitted - succesfully with the group address bit set + successfully with the group address bit set that are not broadcast frames */ u32 tbca; /* Total number of frames transmitted - succesfully that had destination address + successfully that had destination address field equal to the broadcast address */ u32 rxfok; /* Total number of frames received OK */ u32 rxbok; /* Total number of octets received OK */ @@ -776,9 +776,9 @@ struct ucc_geth_hardware_statistics { HW because it includes octets in frames that never even reach the UCC */ u32 rmca; /* Total number of frames that were received - succesfully with the group address bit set + successfully with the group address bit set that are not broadcast frames */ - u32 rbca; /* Total number of frames received succesfully + u32 rbca; /* Total number of frames received successfully that had destination address equal to the broadcast address */ } __attribute__ ((packed)); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index c6c922247d0..0c3c738d741 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -748,7 +748,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev) mii_nway_restart(&dev->mii); if (netif_msg_ifup(dev)) - devdbg(dev, "phy initialised succesfully"); + devdbg(dev, "phy initialised successfully"); return 0; } diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 7ea71b33d2e..ee784e091f6 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -927,7 +927,7 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, sc->lmc_media = &lmc_t1_media; break; default: - printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); + printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name); break; } diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 07c32e68909..99d27473ba3 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1114,7 +1114,7 @@ error: * device. See the file header for the format. Run all checks on the * buffer header, then run over each payload's descriptors, verify * their consistency and act on each payload's contents. If - * everything is succesful, update the device's statistics. + * everything is successful, update the device's statistics. * * Note: You need to set the skb to contain only the length of the * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE). diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 1a039f2bd73..6f04cc758dc 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c @@ -117,7 +117,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah, /* * This code is used to optimize rf gain on different environments - * (temprature mostly) based on feedback from a power detector. + * (temperature mostly) based on feedback from a power detector. * * It's only used on RF5111 and RF5112, later RF chips seem to have * auto adjustment on hw -notice they have a much smaller BANK 7 and @@ -2675,7 +2675,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah, /* Fill curves in reverse order * from lower power (max gain) * to higher power. Use curve -> idx - * backmaping we did on eeprom init */ + * backmapping we did on eeprom init */ u8 idx = pdg_curve_to_idx[pdg]; /* Grab the needed curves by index */ @@ -2777,7 +2777,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah, /* Now we have a set of curves for this * channel on tmpL (x range is table_max - table_min * and y values are tmpL[pdg][]) sorted in the same - * order as EEPROM (because we've used the backmaping). + * order as EEPROM (because we've used the backmapping). * So for RF5112 it's from higher power to lower power * and for RF2413 it's from lower power to higher power. * For RF5111 we only have one curve. */ diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 1895d63aad0..0a35ee62a02 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -969,7 +969,7 @@ static bool ath_rc_update_per(struct ath_softc *sc, * Since this probe succeeded, we allow the next * probe twice as soon. This allows the maxRate * to move up faster if the probes are - * succesful. + * successful. */ ath_rc_priv->probe_time = now_msec - rate_table->probe_interval / 2; diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index a741d37fd96..e1b33002320 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -551,7 +551,7 @@ static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, /* get number of entries */ field_count = *(((u16 *) & field_info) + 1); - /* abort if no enought memory */ + /* abort if no enough memory */ total_length = field_len * field_count; if (total_length > *len) { *len = total_length; @@ -3044,7 +3044,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) IPW_MAX_BDS)) { /* TODO: Support merging buffers if more than * IPW_MAX_BDS are used */ - IPW_DEBUG_INFO("%s: Maximum BD theshold exceeded. " + IPW_DEBUG_INFO("%s: Maximum BD threshold exceeded. " "Increase fragmentation level.\n", priv->net_dev->name); } @@ -6823,7 +6823,7 @@ static int ipw2100_wx_get_range(struct net_device *dev, range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */ - /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 9b0f2c0646e..b2aa960b834 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -787,7 +787,7 @@ static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len) /* get number of entries */ field_count = *(((u16 *) & field_info) + 1); - /* abort if not enought memory */ + /* abort if not enough memory */ total_len = field_len * field_count; if (total_len > *len) { *len = total_len; @@ -7751,7 +7751,7 @@ static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv, case SEC_LEVEL_0: break; default: - printk(KERN_ERR "Unknow security level %d\n", + printk(KERN_ERR "Unknown security level %d\n", priv->ieee->sec.level); break; } @@ -8917,7 +8917,7 @@ static int ipw_wx_get_range(struct net_device *dev, range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 70; - /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ + /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 0; /* FIXME to real average level */ range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ @@ -10290,7 +10290,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, case SEC_LEVEL_0: break; default: - printk(KERN_ERR "Unknow security level %d\n", + printk(KERN_ERR "Unknown security level %d\n", priv->ieee->sec.level); break; } diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c index be5b809ec97..20b8a8a2064 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -199,7 +199,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) ieee->host_decrypt = 1; ieee->host_mc_decrypt = 1; - /* Host fragementation in Open mode. Default is enabled. + /* Host fragmentation in Open mode. Default is enabled. * Note: host fragmentation is always enabled if host encryption * is enabled. For cards can do hardware encryption, they must do * hardware fragmentation as well. So we don't need a variable diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c index c430418248b..d13c8853ee8 100644 --- a/drivers/net/wireless/iwmc3200wifi/hal.c +++ b/drivers/net/wireless/iwmc3200wifi/hal.c @@ -411,7 +411,7 @@ static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr, /* * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC. * Sending command to the LMAC is equivalent to sending a - * regular UMAC command with the LMAC passtrough or the LMAC + * regular UMAC command with the LMAC passthrough or the LMAC * wrapper UMAC command IDs. */ int iwm_hal_send_host_cmd(struct iwm_priv *iwm, diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 771a301003c..8ddb51a2a97 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -1448,7 +1448,7 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm, kfree_skb(packet->skb); break; default: - IWM_ERR(iwm, "Unknow ticket action: %d\n", + IWM_ERR(iwm, "Unknown ticket action: %d\n", le16_to_cpu(ticket_node->ticket->action)); kfree_skb(packet->skb); } diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 485a8d40652..afe6abecc04 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -934,7 +934,7 @@ static int if_sdio_probe(struct sdio_func *func, } if (i == ARRAY_SIZE(if_sdio_models)) { - lbs_pr_err("unkown card model 0x%x\n", card->model); + lbs_pr_err("unknown card model 0x%x\n", card->model); ret = -ENODEV; goto free; } diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index bc08464d832..f7f5c793514 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -1897,7 +1897,7 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info, return 0; } -/* Setting policy also clears the MAC acl, even if we don't change the defaut +/* Setting policy also clears the MAC acl, even if we don't change the default * policy */ @@ -2323,7 +2323,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, case DOT11_OID_BEACON: send_formatted_event(priv, - "Received a beacon from an unkown AP", + "Received a beacon from an unknown AP", mlme, 0); break; diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h index ccd644104ad..aced0577569 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.h +++ b/drivers/net/wireless/rt2x00/rt2400pci.h @@ -35,7 +35,7 @@ /* * Signal information. - * Defaul offset is required for RSSI <-> dBm conversion. + * Default offset is required for RSSI <-> dBm conversion. */ #define DEFAULT_RSSI_OFFSET 100 diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h index 54d37957883..3db9041838a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.h +++ b/drivers/net/wireless/rt2x00/rt2500pci.h @@ -46,7 +46,7 @@ /* * Signal information. - * Defaul offset is required for RSSI <-> dBm conversion. + * Default offset is required for RSSI <-> dBm conversion. */ #define DEFAULT_RSSI_OFFSET 121 diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h index b01edca4258..d3000827883 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/rt2x00/rt2500usb.h @@ -46,7 +46,7 @@ /* * Signal information. - * Defaul offset is required for RSSI <-> dBm conversion. + * Default offset is required for RSSI <-> dBm conversion. */ #define DEFAULT_RSSI_OFFSET 120 diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h index 93eb699165c..77b5116f549 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.h +++ b/drivers/net/wireless/rt2x00/rt61pci.h @@ -37,7 +37,7 @@ /* * Signal information. - * Defaul offset is required for RSSI <-> dBm conversion. + * Default offset is required for RSSI <-> dBm conversion. */ #define DEFAULT_RSSI_OFFSET 120 diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h index 81fe0be51c4..e194332dac5 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.h +++ b/drivers/net/wireless/rt2x00/rt73usb.h @@ -37,7 +37,7 @@ /* * Signal information. - * Defaul offset is required for RSSI <-> dBm conversion. + * Default offset is required for RSSI <-> dBm conversion. */ #define DEFAULT_RSSI_OFFSET 120 diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 431a20ec6db..b3b0b5b685c 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -4011,7 +4011,7 @@ wavelan_interrupt(int irq, #endif /* Prevent reentrancy. We need to do that because we may have - * multiple interrupt handler running concurently. + * multiple interrupt handler running concurrently. * It is safe because interrupts are disabled before aquiring * the spinlock. */ spin_lock(&lp->spinlock); diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 6d666359a42..2b7f9659437 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -312,7 +312,7 @@ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, * zd_mac_tx_failed - callback for failed frames * @dev: the mac80211 wireless device * - * This function is called if a frame couldn't be succesfully be + * This function is called if a frame couldn't be successfully be * transferred. The first frame from the tx queue, will be selected and * reported as error to the upper layers. */ diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index a6b4a5a53d4..f511e70d454 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -650,7 +650,7 @@ ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt) * Mark the I/O Pdir entries invalid and blow away the corresponding I/O * TLB entries. * - * FIXME: at some threshhold it might be "cheaper" to just blow + * FIXME: at some threshold it might be "cheaper" to just blow * away the entire I/O TLB instead of individual entries. * * FIXME: Uturn has 256 TLB entries. We don't need to purge every diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index d93108d148f..36db20a8f89 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -6533,7 +6533,7 @@ static struct ibm_struct volume_driver_data = { * The speeds are stored on handles * (FANA:FAN9), (FANC:FANB), (FANE:FAND). * - * There are three default speed sets, acessible as handles: + * There are three default speed sets, accessible as handles: * FS1L,FS1M,FS1H; FS2L,FS2M,FS2H; FS3L,FS3M,FS3H * * ACPI DSDT switches which set is in use depending on various diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c index 87b4f49a525..a5135ebe5f0 100644 --- a/drivers/pnp/pnpbios/rsparser.c +++ b/drivers/pnp/pnpbios/rsparser.c @@ -191,7 +191,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev, return (unsigned char *)p; break; - default: /* an unkown tag */ + default: /* an unknown tag */ len_err: dev_err(&dev->dev, "unknown tag %#x length %d\n", tag, len); @@ -405,7 +405,7 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end, case SMALL_TAG_END: return p + 2; - default: /* an unkown tag */ + default: /* an unknown tag */ len_err: dev_err(&dev->dev, "unknown tag %#x length %d\n", tag, len); @@ -475,7 +475,7 @@ static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p, return (unsigned char *)p; break; - default: /* an unkown tag */ + default: /* an unknown tag */ len_err: dev_err(&dev->dev, "unknown tag %#x length %d\n", tag, len); @@ -744,7 +744,7 @@ static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev return (unsigned char *)p; break; - default: /* an unkown tag */ + default: /* an unknown tag */ len_err: dev_err(&dev->dev, "unknown tag %#x length %d\n", tag, len); diff --git a/drivers/ps3/ps3-sys-manager.c b/drivers/ps3/ps3-sys-manager.c index 88cb7408861..3cbaf1811bd 100644 --- a/drivers/ps3/ps3-sys-manager.c +++ b/drivers/ps3/ps3-sys-manager.c @@ -46,7 +46,7 @@ /** * struct ps3_sys_manager_header - System manager message header. * @version: Header version, currently 1. - * @size: Header size in bytes, curently 16. + * @size: Header size in bytes, currently 16. * @payload_size: Message payload size in bytes. * @service_id: Message type, one of enum ps3_sys_manager_service_id. * @request_tag: Unique number to identify reply. diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c index ad164056feb..434e92fdb96 100644 --- a/drivers/rtc/rtc-v3020.c +++ b/drivers/rtc/rtc-v3020.c @@ -335,7 +335,7 @@ static int rtc_probe(struct platform_device *pdev) goto err_io; } - /* Make sure frequency measurment mode, test modes, and lock + /* Make sure frequency measurement mode, test modes, and lock * are all disabled */ v3020_set_reg(chip, V3020_STATUS_0, 0x0); diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 097d3846a82..f54b1eec6dd 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -74,7 +74,7 @@ fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) } rc = raw3270_start(view, rq); if (rc == 0) { - /* Started sucessfully. Now wait for completion. */ + /* Started successfully. Now wait for completion. */ wait_event(fp->wait, raw3270_request_final(rq)); } } while (rc == -EACCES); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 8ab51608da5..c268a2e5b7c 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -65,7 +65,7 @@ static void set_chp_logically_online(struct chp_id chpid, int onoff) chpid_to_chp(chpid)->state = onoff; } -/* On succes return 0 if channel-path is varied offline, 1 if it is varied +/* On success return 0 if channel-path is varied offline, 1 if it is varied * online. Return -ENODEV if channel-path is not registered. */ int chp_get_status(struct chp_id chpid) { diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 30f51611130..2985eb43948 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -462,7 +462,7 @@ static struct cmb_area cmb_area = { * block of memory, which can not be moved as long as any channel * is active. Therefore, a maximum number of subchannels needs to * be defined somewhere. This is a module parameter, defaulting to - * a resonable value of 1024, or 32 kb of memory. + * a reasonable value of 1024, or 32 kb of memory. * Current kernels don't allow kmalloc with more than 128kb, so the * maximum is 4096. */ diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 58e583b61e6..aa2b60a868b 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -92,11 +92,11 @@ #define ENVCTRL_CPUTEMP_MON 1 /* cpu temperature monitor */ #define ENVCTRL_CPUVOLTAGE_MON 2 /* voltage monitor */ #define ENVCTRL_FANSTAT_MON 3 /* fan status monitor */ -#define ENVCTRL_ETHERTEMP_MON 4 /* ethernet temperarture */ +#define ENVCTRL_ETHERTEMP_MON 4 /* ethernet temperature */ /* monitor */ #define ENVCTRL_VOLTAGESTAT_MON 5 /* voltage status monitor */ #define ENVCTRL_MTHRBDTEMP_MON 6 /* motherboard temperature */ -#define ENVCTRL_SCSITEMP_MON 7 /* scsi temperarture */ +#define ENVCTRL_SCSITEMP_MON 7 /* scsi temperature */ #define ENVCTRL_GLOBALADDR_MON 8 /* global address */ /* Child device type. diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index f5a9addb705..07ce9bfcdf0 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1491,7 +1491,7 @@ NCR_700_intr(int irq, void *dev_id) unsigned long flags; int handled = 0; - /* Use the host lock to serialise acess to the 53c700 + /* Use the host lock to serialise access to the 53c700 * hardware. Note: In future, we may need to take the queue * lock to enter the done routines. When that happens, we * need to ensure that for this driver, the host lock and the diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index cdbdec9f4fb..83986ed8655 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -526,10 +526,10 @@ struct aac_driver_ident /* * The adapter interface specs all queues to be located in the same - * physically contigous block. The host structure that defines the + * physically contiguous block. The host structure that defines the * commuication queues will assume they are each a separate physically - * contigous memory region that will support them all being one big - * contigous block. + * contiguous memory region that will support them all being one big + * contiguous block. * There is a command and response queue for each level and direction of * commuication. These regions are accessed by both the host and adapter. */ diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index d598eba630d..666d5151d62 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -226,7 +226,7 @@ static int aac_comm_init(struct aac_dev * dev) spin_lock_init(&dev->fib_lock); /* - * Allocate the physically contigous space for the commuication + * Allocate the physically contiguous space for the commuication * queue headers. */ diff --git a/drivers/scsi/aic7xxx/aic79xx.seq b/drivers/scsi/aic7xxx/aic79xx.seq index 3b66b5ae3d9..2fb78e35a9e 100644 --- a/drivers/scsi/aic7xxx/aic79xx.seq +++ b/drivers/scsi/aic7xxx/aic79xx.seq @@ -217,7 +217,7 @@ BEGIN_CRITICAL; scbdma_tohost_done: test CCSCBCTL, CCARREN jz fill_qoutfifo_dmadone; /* - * An SCB has been succesfully uploaded to the host. + * An SCB has been successfully uploaded to the host. * If the SCB was uploaded for some reason other than * bad SCSI status (currently only for underruns), we * queue the SCB for normal completion. Otherwise, we diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 63b521d615f..4d419c155ce 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -2487,7 +2487,7 @@ ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy - * LED does. SELINGO is only cleared by a sucessfull + * LED does. SELINGO is only cleared by a successfull * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 8dfb59d5899..45aa728a76b 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -1733,7 +1733,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy - * LED does. SELINGO is only cleared by a sucessfull + * LED does. SELINGO is only cleared by a successfull * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h index a000bc4e2d4..bf320412ee2 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h @@ -61,7 +61,7 @@ enum bfa_pport_speed { * Port operational type (in sync with SNIA port type). */ enum bfa_pport_type { - BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unkown */ + BFA_PPORT_TYPE_UNKNOWN = 1, /* port type is unknown */ BFA_PPORT_TYPE_TRUNKED = 2, /* Trunked mode */ BFA_PPORT_TYPE_NPORT = 5, /* P2P with switched fabric */ BFA_PPORT_TYPE_NLPORT = 6, /* public loop */ diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h index 31881d21851..ade763dbc8c 100644 --- a/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h +++ b/drivers/scsi/bfa/include/defs/bfa_defs_tsensor.h @@ -25,7 +25,7 @@ * Temperature sensor status values */ enum bfa_tsensor_status { - BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unkown status */ + BFA_TSENSOR_STATUS_UNKNOWN = 1, /* unknown status */ BFA_TSENSOR_STATUS_FAULTY = 2, /* sensor is faulty */ BFA_TSENSOR_STATUS_BELOW_MIN = 3, /* temperature below mininum */ BFA_TSENSOR_STATUS_NOMINAL = 4, /* normal temperature */ diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index a0e7e711ff9..5be67a6fca9 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -834,7 +834,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba) atomic_read(&hba->resetting) == 0, 60 * HZ); if (atomic_read(&hba->resetting)) { - /* IOP is in unkown state, abort reset */ + /* IOP is in unknown state, abort reset */ printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no); return -1; } diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index bd2f7719744..6486ae4591b 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -56,7 +56,7 @@ * at the same time. * * When discovery succeeds or fails a callback is made to the lport as - * notification. Currently, succesful discovery causes the lport to take no + * notification. Currently, successful discovery causes the lport to take no * action. A failure will cause the lport to reset. There is likely a circular * locking problem with this implementation. */ diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 2e0746d7030..ca25ee5190b 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -1004,7 +1004,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) * iscsi_tcp_task_xmit - xmit normal PDU task * @task: iscsi command task * - * We're expected to return 0 when everything was transmitted succesfully, + * We're expected to return 0 when everything was transmitted successfully, * -EAGAIN if there's still data in the queue, or != 0 for any other kind * of error. */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index e1a30a16a9f..9bd19aa1424 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -654,7 +654,7 @@ lpfc_selective_reset(struct lpfc_hba *phba) * Notes: * Assumes any error from lpfc_selective_reset() will be negative. * If lpfc_selective_reset() returns zero then the length of the buffer - * is returned which indicates succcess + * is returned which indicates success * * Returns: * -EINVAL if the buffer does not contain the string "selective" @@ -3147,7 +3147,7 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr, * sysfs_ctlreg_read - Read method for reading from ctlreg * @kobj: kernel kobject that contains the kernel class device. * @bin_attr: kernel attributes passed to us. - * @buf: if succesful contains the data from the adapter IOREG space. + * @buf: if successful contains the data from the adapter IOREG space. * @off: offset into buffer to beginning of data. * @count: bytes to transfer. * diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 45337cd23fe..a14ab4580d4 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -802,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes sucessfully " + "0101 FLOGI completes successfully " "Data: x%x x%x x%x x%x\n", irsp->un.ulpWord[4], sp->cmn.e_d_tov, sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); @@ -4133,7 +4133,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Indicate we are walking fc_rscn_id_list on this vport */ vport->fc_rscn_flush = 1; spin_unlock_irq(shost->host_lock); - /* Get the array count after sucessfully have the token */ + /* Get the array count after successfully have the token */ rscn_cnt = vport->fc_rscn_id_cnt; /* If we are already processing an RSCN, save the received * RSCN payload buffer, cmdiocb->context2 to process later. diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 562d8cee874..82f8ab5c72c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -645,7 +645,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) * down the SLI Layer. * * Return codes - * 0 - sucess. + * 0 - success. * Any other value - error. **/ static int @@ -700,7 +700,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) * down the SLI Layer. * * Return codes - * 0 - sucess. + * 0 - success. * Any other value - error. **/ static int @@ -755,7 +755,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) * uninitialization after the HBA is reset when bring down the SLI Layer. * * Return codes - * 0 - sucess. + * 0 - success. * Any other value - error. **/ int @@ -1254,7 +1254,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) * routine from the API jump table function pointer from the lpfc_hba struct. * * Return codes - * 0 - sucess. + * 0 - success. * Any other value - error. **/ void @@ -3124,7 +3124,7 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) * PCI devices. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3220,7 +3220,7 @@ lpfc_reset_hba(struct lpfc_hba *phba) * support the SLI-3 HBA device it attached to. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3321,7 +3321,7 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) * support the SLI-4 HBA device it attached to. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3642,7 +3642,7 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) * device specific resource setup to support the HBA device it attached to. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3688,7 +3688,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) * device specific resource setup to support the HBA device it attached to. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3753,7 +3753,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba) * list and set up the IOCB tag array accordingly. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3872,7 +3872,7 @@ lpfc_free_active_sgl(struct lpfc_hba *phba) * list and set up the sgl xritag tag array accordingly. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -3986,7 +3986,7 @@ out_free_mem: * enabled and the driver is reinitializing the device. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -4146,7 +4146,7 @@ lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) * PCI device data structure is set. * * Return codes - * pointer to @phba - sucessful + * pointer to @phba - successful * NULL - error **/ static struct lpfc_hba * @@ -4202,7 +4202,7 @@ lpfc_hba_free(struct lpfc_hba *phba) * host with it. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -4365,7 +4365,7 @@ lpfc_post_init_setup(struct lpfc_hba *phba) * with SLI-3 interface spec. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -4662,7 +4662,7 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) * this routine. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - could not allocated memory. **/ static int @@ -4761,7 +4761,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) * allocation for the port. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -4861,7 +4861,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) * HBA consistent with the SLI-4 interface spec. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -4910,7 +4910,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) * we just use some constant number as place holder. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -5218,7 +5218,7 @@ out_error: * operation. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -5286,7 +5286,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) * operation. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -5552,7 +5552,7 @@ out_error: * operation. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -5599,7 +5599,7 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) * Later, this can be used for all the slow-path events. * * Return codes - * 0 - sucessful + * 0 - successful * -ENOMEM - No availble memory **/ static int @@ -5760,7 +5760,7 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) * all resources assigned to the PCI function which originates this request. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No availble memory * EIO - The mailbox failed to complete successfully. **/ @@ -5923,7 +5923,7 @@ lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) * with SLI-4 interface spec. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -6052,7 +6052,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) * will be left with MSI-X enabled and leaks its vectors. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -6184,7 +6184,7 @@ lpfc_sli_disable_msix(struct lpfc_hba *phba) * is done in this function. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error */ static int @@ -6243,7 +6243,7 @@ lpfc_sli_disable_msi(struct lpfc_hba *phba) * MSI-X -> MSI -> IRQ. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static uint32_t @@ -6333,7 +6333,7 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) * enabled and leaks its vectors. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -6443,7 +6443,7 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) * which is done in this function. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static int @@ -6508,7 +6508,7 @@ lpfc_sli4_disable_msi(struct lpfc_hba *phba) * MSI-X -> MSI -> IRQ. * * Return codes - * 0 - sucessful + * 0 - successful * other values - error **/ static uint32_t diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 43cbe336f1f..42d4f3dae1d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1794,7 +1794,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) */ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == MBX_SHUTDOWN) { - /* Unknow mailbox command compl */ + /* Unknown mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " "x%x (x%x) Cmpl\n", @@ -4163,7 +4163,7 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, * addition, this routine gets the port vpd data. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - could not allocated memory. **/ static int @@ -11091,7 +11091,7 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) * sequential. * * Return codes - * 0 - sucessful + * 0 - successful * EIO - The mailbox failed to complete successfully. * When this error occurs, the driver is not guaranteed * to have any rpi regions posted to the device and @@ -11129,7 +11129,7 @@ lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) * maps up to 64 rpi context regions. * * Return codes - * 0 - sucessful + * 0 - successful * ENOMEM - No available memory * EIO - The mailbox failed to complete successfully. **/ @@ -11191,7 +11191,7 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) * PAGE_SIZE modulo 64 rpi context headers. * * Returns - * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful * LPFC_RPI_ALLOC_ERROR if no rpis are available. **/ int diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 512c2cc1a33..d310f49d077 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -381,7 +381,7 @@ typedef struct { u8 battery_status; /* * BIT 0: battery module missing * BIT 1: VBAD - * BIT 2: temprature high + * BIT 2: temperature high * BIT 3: battery pack missing * BIT 4,5: * 00 - charge complete diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h index b25b74764ec..ce2487a888e 100644 --- a/drivers/scsi/megaraid/mbox_defs.h +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -497,7 +497,7 @@ typedef struct { * @inserted_drive : channel:Id of inserted drive * @battery_status : bit 0: battery module missing * bit 1: VBAD - * bit 2: temprature high + * bit 2: temperature high * bit 3: battery pack missing * bit 4,5: * 00 - charge complete diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 234f0b7eb21..f9ae8037a71 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -2704,7 +2704,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp) } else { con_log(CL_ANN, (KERN_NOTICE - "megaraid mbox: reset sequence completed sucessfully\n")); + "megaraid mbox: reset sequence completed successfully\n")); } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 86ab32d7ab1..756e509d495 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2894,7 +2894,7 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) #ifdef CONFIG_SCSI_MPT2SAS_LOGGING /** - * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request + * _scsih_scsi_ioc_info - translated non-successfull SCSI_IO request * @ioc: per adapter object * @scmd: pointer to scsi command object * @mpi_reply: reply mf payload returned from firmware diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index e3c482aa87b..a2d56982830 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -6495,7 +6495,7 @@ static void ncr_int_ma (struct ncb *np) ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids ** bloat for such a should_not_happen situation). ** In all other situation, we reset the BUS. - ** Are these assumptions reasonnable ? (Wait and see ...) + ** Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index f7c70e2a822..d3d39f86fcf 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3342,7 +3342,7 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode) * @direction : data transfer direction * * Return value - * 0 on sucess, non-zero error code on failure + * 0 on success, non-zero error code on failure */ static int pmcraid_build_passthrough_ioadls( struct pmcraid_cmd *cmd, @@ -3401,7 +3401,7 @@ static int pmcraid_build_passthrough_ioadls( * @direction: data transfer direction * * Return value - * 0 on sucess, non-zero error code on failure + * 0 on success, non-zero error code on failure */ static void pmcraid_release_passthrough_ioadls( struct pmcraid_cmd *cmd, @@ -3429,7 +3429,7 @@ static void pmcraid_release_passthrough_ioadls( * @arg: pointer to pmcraid_passthrough_buffer user buffer * * Return value - * 0 on sucess, non-zero error code on failure + * 0 on success, non-zero error code on failure */ static long pmcraid_ioctl_passthrough( struct pmcraid_instance *pinstance, diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 3441b3f9082..2752b56cad5 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -771,11 +771,11 @@ static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = { {0x01180600, IOASC_LOG_LEVEL_MUST, "Recovered Error, soft media error, sector reassignment suggested"}, {0x015D0000, IOASC_LOG_LEVEL_MUST, - "Recovered Error, failure prediction thresold exceeded"}, + "Recovered Error, failure prediction threshold exceeded"}, {0x015D9200, IOASC_LOG_LEVEL_MUST, - "Recovered Error, soft Cache Card Battery error thresold"}, + "Recovered Error, soft Cache Card Battery error threshold"}, {0x015D9200, IOASC_LOG_LEVEL_MUST, - "Recovered Error, soft Cache Card Battery error thresold"}, + "Recovered Error, soft Cache Card Battery error threshold"}, {0x02048000, IOASC_LOG_LEVEL_MUST, "Not Ready, IOA Reset Required"}, {0x02408500, IOASC_LOG_LEVEL_MUST, diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 723fdecd91b..0fd6ae6911a 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(scsi_nl_send_transport_msg); * @data_buf: pointer to vendor unique data buffer * * Returns: - * 0 on succesful return + * 0 on successful return * otherwise, failing error code * * Notes: diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index fd47cb1bee1..f27e52d963d 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -666,7 +666,7 @@ EXPORT_SYMBOL(sas_phy_add); * * Note: * This function must only be called on a PHY that has not - * sucessfully been added using sas_phy_add(). + * successfully been added using sas_phy_add(). */ void sas_phy_free(struct sas_phy *phy) { @@ -896,7 +896,7 @@ EXPORT_SYMBOL(sas_port_add); * * Note: * This function must only be called on a PORT that has not - * sucessfully been added using sas_port_add(). + * successfully been added using sas_port_add(). */ void sas_port_free(struct sas_port *port) { @@ -1476,7 +1476,7 @@ EXPORT_SYMBOL(sas_rphy_add); * * Note: * This function must only be called on a remote - * PHY that has not sucessfully been added using + * PHY that has not successfully been added using * sas_rphy_add() (or has been sas_rphy_remove()'d) */ void sas_rphy_free(struct sas_rphy *rphy) diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 45374d66d26..2b38f6ad6e1 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1864,7 +1864,7 @@ static pci_ers_result_t sym2_io_slot_dump(struct pci_dev *pdev) * * This routine is similar to sym_set_workarounds(), except * that, at this point, we already know that the device was - * succesfully intialized at least once before, and so most + * successfully intialized at least once before, and so most * of the steps taken there are un-needed here. */ static void sym2_reset_workarounds(struct pci_dev *pdev) diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 297deb817a5..a7bc8b7b09a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -2692,7 +2692,7 @@ static void sym_int_ma (struct sym_hcb *np) * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids * bloat for such a should_not_happen situation). * In all other situation, we reset the BUS. - * Are these assumptions reasonnable ? (Wait and see ...) + * Are these assumptions reasonable ? (Wait and see ...) */ unexpected_phase: dsp -= 8; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 053e63c8682..5a80cbac3f9 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h @@ -54,7 +54,7 @@ * * SYM_OPT_LIMIT_COMMAND_REORDERING * When this option is set, the driver tries to limit tagged - * command reordering to some reasonnable value. + * command reordering to some reasonable value. * (set for Linux) */ #if 0 diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index d71dfe39894..36ede02ceac 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -361,9 +361,9 @@ static const struct pnp_device_id pnp_dev_table[] = { { "LTS0001", 0 }, /* Rockwell's (PORALiNK) 33600 INT PNP */ { "WCI0003", 0 }, - /* Unkown PnP modems */ + /* Unknown PnP modems */ { "PNPCXXX", UNKNOWN_DEV }, - /* More unkown PnP modems */ + /* More unknown PnP modems */ { "PNPDXXX", UNKNOWN_DEV }, { "", 0 } }; diff --git a/drivers/serial/pmac_zilog.h b/drivers/serial/pmac_zilog.h index 570b0d925e8..f6e77f12acd 100644 --- a/drivers/serial/pmac_zilog.h +++ b/drivers/serial/pmac_zilog.h @@ -73,7 +73,7 @@ static inline struct uart_pmac_port *pmz_get_port_A(struct uart_pmac_port *uap) } /* - * Register acessors. Note that we don't need to enforce a recovery + * Register accessors. Note that we don't need to enforce a recovery * delay on PCI PowerMac hardware, it's dealt in HW by the MacIO chip, * though if we try to use this driver on older machines, we might have * to add it back diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c index 0c08f286a2e..46de564aaea 100644 --- a/drivers/serial/ucc_uart.c +++ b/drivers/serial/ucc_uart.c @@ -313,7 +313,7 @@ static void qe_uart_stop_tx(struct uart_port *port) * This function will attempt to stuff of all the characters from the * kernel's transmit buffer into TX BDs. * - * A return value of non-zero indicates that it sucessfully stuffed all + * A return value of non-zero indicates that it successfully stuffed all * characters from the kernel buffer. * * A return value of zero indicates that there are still characters in the diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index 40de151f278..e89304c7256 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -4190,7 +4190,7 @@ static void ixj_aec_start(IXJ *j, int level) ixj_WriteDSPCommand(0x1224, j); ixj_WriteDSPCommand(0xE014, j); - ixj_WriteDSPCommand(0x0003, j); /* Lock threashold at 3dB */ + ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */ @@ -4235,7 +4235,7 @@ static void ixj_aec_start(IXJ *j, int level) ixj_WriteDSPCommand(0x1224, j); ixj_WriteDSPCommand(0xE014, j); - ixj_WriteDSPCommand(0x0003, j); /* Lock threashold at 3dB */ + ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */ ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */ diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index d171b563e94..bba4d3eabe0 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -1958,7 +1958,7 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) goto bad1; /* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to - * the first MEMACESS cmv. Ignore it... + * the first MEMACCESS cmv. Ignore it... */ if (cmv->bFunction != dsc->function) { if (UEA_CHIP_VERSION(sc) == ADI930 diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 2473cf0c6b1..b4bd2411c66 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -1,5 +1,5 @@ /** - * drivers/usb/class/usbtmc.c - USB Test & Measurment class driver + * drivers/usb/class/usbtmc.c - USB Test & Measurement class driver * * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany * Copyright (C) 2008 Novell, Inc. diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index da718e84d58..e80f1af438c 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1890,7 +1890,7 @@ static void cancel_async_set_config(struct usb_device *udev) * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * - * Returns 0 if the request was succesfully queued, error code otherwise. + * Returns 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index 7953948bfe4..4e3657808b0 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -432,7 +432,7 @@ static void acm_disable(struct usb_function *f) * @length: size of data * Context: irqs blocked, acm->lock held, acm_notify_req non-null * - * Returns zero on sucess or a negative errno. + * Returns zero on success or a negative errno. * * See section 6.3.5 of the CDC 1.1 specification for information * about the only notification we issue: SerialState change. diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index 1937d8c7b43..adda1208a1e 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c @@ -1524,7 +1524,7 @@ static int pxa_udc_get_frame(struct usb_gadget *_gadget) * pxa_udc_wakeup - Force udc device out of suspend * @_gadget: usb gadget * - * Returns 0 if succesfull, error code otherwise + * Returns 0 if successfull, error code otherwise */ static int pxa_udc_wakeup(struct usb_gadget *_gadget) { diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index 62a226b6167..00a29855d0c 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c @@ -627,7 +627,7 @@ irqreturn_t fhci_irq(struct usb_hcd *hcd) /* - * Process normal completions(error or sucess) and clean the schedule. + * Process normal completions(error or success) and clean the schedule. * * This is the main path for handing urbs back to drivers. The only other patth * is process_del_list(),which unlinks URBs by scanning EDs,instead of scanning diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index 9ec7fd5da48..9579cf4c38b 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c @@ -111,7 +111,7 @@ struct aes_ccm_b1 { * * CCM uses Ax blocks to generate a keystream with which the MIC and * the message's payload are encoded. A0 always encrypts/decrypts the - * MIC. Ax (x>0) are used for the sucesive payload blocks. + * MIC. Ax (x>0) are used for the successive payload blocks. * * The x is the counter, and is increased for each block. */ diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 613a5fc490d..489b47833e2 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -558,7 +558,7 @@ static void wa_seg_dto_cb(struct urb *urb) /* * Callback for the segment request * - * If succesful transition state (unless already transitioned or + * If successful transition state (unless already transitioned or * outbound transfer); otherwise, take a note of the error, mark this * segment done and try completion. * @@ -1364,7 +1364,7 @@ segment_aborted: /* * Callback for the IN data phase * - * If succesful transition state; otherwise, take a note of the + * If successful transition state; otherwise, take a note of the * error, mark this segment done and try completion. * * Note we don't access until we are sure that the transfer hasn't diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index c7080d49731..0bb665a0c02 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c @@ -229,7 +229,7 @@ void i1480_usb_neep_cb(struct urb *urb) * will verify it. * * Set i1480->evt_result with the result of getting the event or its - * size (if succesful). + * size (if successful). * * Delivers the data directly to i1480->evt_buf */ diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c index 86a853b8411..7350ed6909f 100644 --- a/drivers/uwb/wlp/txrx.c +++ b/drivers/uwb/wlp/txrx.c @@ -282,7 +282,7 @@ EXPORT_SYMBOL_GPL(wlp_receive_frame); * and transmission will be done by the calling function. * @dst: On return this will contain the device address to which the * frame is destined. - * @returns: 0 on success no tx : WLP header sucessfully applied to skb buffer, + * @returns: 0 on success no tx : WLP header successfully applied to skb buffer, * calling function can proceed with tx * 1 on success with tx : WLP will take over transmission of this * frame diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c index 913b4a47ae5..1ddeb4c3476 100644 --- a/drivers/video/aty/atyfb_base.c +++ b/drivers/video/aty/atyfb_base.c @@ -3276,7 +3276,7 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base) txtformat = "24 bit interface"; break; default: - txtformat = "unkown format"; + txtformat = "unknown format"; } } else { switch (format & 7) { @@ -3299,7 +3299,7 @@ static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base) txtformat = "262144 colours (FDPI-2 mode)"; break; default: - txtformat = "unkown format"; + txtformat = "unknown format"; } } PRINTKI("%s%s %s monitor detected: %s\n", diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c index 505c0823a10..2cf7ba52f67 100644 --- a/drivers/video/backlight/atmel-pwm-bl.c +++ b/drivers/video/backlight/atmel-pwm-bl.c @@ -158,7 +158,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev) goto err_free_pwm; } - /* Turn display off by defatult. */ + /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index 50ec17dfc51..fa32b94a454 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -177,7 +177,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi) if (!data) return -ENOMEM; - data->is_vga = true; /* defaut to VGA mode */ + data->is_vga = true; /* default to VGA mode */ /* * bits_per_word cannot be configured in platform data diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index 857b3668b3b..6468a297e34 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -436,7 +436,7 @@ sti_init_glob_cfg(struct sti_struct *sti, (offs < PCI_BASE_ADDRESS_0 || offs > PCI_BASE_ADDRESS_5)) { printk (KERN_WARNING - "STI pci region maping for region %d (%02x) can't be mapped\n", + "STI pci region mapping for region %d (%02x) can't be mapped\n", i,sti->rm_entry[i]); continue; } diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index 1a83709f961..492e6e64b65 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c @@ -701,7 +701,7 @@ static int gbefb_set_par(struct fb_info *info) blocks of 512x128, 256x128 or 128x128 pixels, respectively for 8bit, 16bit and 32 bit modes (64 kB). They cover the screen with partial tiles on the right and/or bottom of the screen if needed. - For exemple in 640x480 8 bit mode the mapping is: + For example in 640x480 8 bit mode the mapping is: <-------- 640 -----> <---- 512 ----><128|384 offscreen> diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c index 6120f0c526f..876648e15e9 100644 --- a/drivers/video/stifb.c +++ b/drivers/video/stifb.c @@ -756,9 +756,9 @@ hyperResetPlanes(struct stifb_info *fb, int enable) if (fb->info.var.bits_per_pixel == 32) controlPlaneReg = 0x04000F00; else - controlPlaneReg = 0x00000F00; /* 0x00000800 should be enought, but lets clear all 4 bits */ + controlPlaneReg = 0x00000F00; /* 0x00000800 should be enough, but lets clear all 4 bits */ else - controlPlaneReg = 0x00000F00; /* 0x00000100 should be enought, but lets clear all 4 bits */ + controlPlaneReg = 0x00000F00; /* 0x00000100 should be enough, but lets clear all 4 bits */ switch (enable) { case ENABLE: diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c index ff43c888502..98054839004 100644 --- a/drivers/video/tdfxfb.c +++ b/drivers/video/tdfxfb.c @@ -52,7 +52,7 @@ * * 0.1.3 (released 1999-11-02) added Attila's panning support, code * reorg, hwcursor address page size alignment - * (for mmaping both frame buffer and regs), + * (for mmapping both frame buffer and regs), * and my changes to get rid of hardcoded * VGA i/o register locations (uses PCI * configuration info now) diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c index c5c32b6b6e6..67b36932212 100644 --- a/drivers/video/via/dvi.c +++ b/drivers/video/via/dvi.c @@ -467,7 +467,7 @@ static int dvi_get_panel_size_from_DDCv1(void) default: viaparinfo->tmds_setting_info->dvi_panel_size = VIA_RES_1024X768; - DEBUG_MSG(KERN_INFO "Unknow panel size max resolution = %d !\ + DEBUG_MSG(KERN_INFO "Unknown panel size max resolution = %d !\ set default panel size.\n", max_h); break; } @@ -534,7 +534,7 @@ static int dvi_get_panel_size_from_DDCv2(void) default: viaparinfo->tmds_setting_info->dvi_panel_size = VIA_RES_1024X768; - DEBUG_MSG(KERN_INFO "Unknow panel size max resolution = %d!\ + DEBUG_MSG(KERN_INFO "Unknown panel size max resolution = %d!\ set default panel size.\n", HSize); break; } diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c index 3df17dc8c3d..65ccd215d49 100644 --- a/drivers/video/vt8623fb.c +++ b/drivers/video/vt8623fb.c @@ -446,7 +446,7 @@ static int vt8623fb_set_par(struct fb_info *info) svga_wseq_mask(0x1E, 0xF0, 0xF0); // DI/DVP bus svga_wseq_mask(0x2A, 0x0F, 0x0F); // DI/DVP bus - svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read treshold + svga_wseq_mask(0x16, 0x08, 0xBF); // FIFO read threshold vga_wseq(NULL, 0x17, 0x1F); // FIFO depth vga_wseq(NULL, 0x18, 0x4E); svga_wseq_mask(0x1A, 0x08, 0x08); // enable MMIO ? diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c index 381026c0bd7..923cc68dba2 100644 --- a/drivers/watchdog/coh901327_wdt.c +++ b/drivers/watchdog/coh901327_wdt.c @@ -508,7 +508,7 @@ void coh901327_watchdog_reset(void) * deactivating the watchdog before it is shut down by it. * * NOTE: on future versions of the watchdog, this restriction is - * gone: the watchdog will be reloaded with a defaul value (1 min) + * gone: the watchdog will be reloaded with a default value (1 min) * instead of last value, and you can conveniently set the watchdog * timeout to 10ms (value = 1) without any problems. */ diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index b6b3f59ab44..47d719717a3 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -21,7 +21,7 @@ * wd#1 - 2 seconds; * wd#2 - 7.2 ms; * After the expiration of wd#1, it can generate a NMI, SCI, SMI, or - * a system RESET and it starts wd#2 that unconditionaly will RESET + * a system RESET and it starts wd#2 that unconditionally will RESET * the system when the counter reaches zero. * * 14-Dec-2001 Matt Domsch diff --git a/drivers/watchdog/wdrtas.c b/drivers/watchdog/wdrtas.c index 3bde56bce63..5bfb1f2c531 100644 --- a/drivers/watchdog/wdrtas.c +++ b/drivers/watchdog/wdrtas.c @@ -542,7 +542,7 @@ static struct notifier_block wdrtas_notifier = { /** * wdrtas_get_tokens - reads in RTAS tokens * - * returns 0 on succes, <0 on failure + * returns 0 on success, <0 on failure * * wdrtas_get_tokens reads in the tokens for the RTAS calls used in * this watchdog driver. It tolerates, if "get-sensor-state" and @@ -598,7 +598,7 @@ static void wdrtas_unregister_devs(void) /** * wdrtas_register_devs - registers the misc dev handlers * - * returns 0 on succes, <0 on failure + * returns 0 on success, <0 on failure * * wdrtas_register_devs registers the watchdog and temperature watchdog * misc devs @@ -630,7 +630,7 @@ static int wdrtas_register_devs(void) /** * wdrtas_init - init function of the watchdog driver * - * returns 0 on succes, <0 on failure + * returns 0 on success, <0 on failure * * registers the file handlers and the reboot notifier */ diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index b9b3bb51b1e..d15ea1790bf 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -767,7 +767,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->start_stack = bprm->p; - /* Now we do a little grungy work by mmaping the ELF image into + /* Now we do a little grungy work by mmapping the ELF image into the correct location in memory. */ for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { diff --git a/fs/bio.c b/fs/bio.c index 12da5db8682..2bd671a08e3 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -272,7 +272,7 @@ EXPORT_SYMBOL(bio_init); * for a &struct bio to become free. If a %NULL @bs is passed in, we will * fall back to just using @kmalloc to allocate the required memory. * - * Note that the caller must set ->bi_destructor on succesful return + * Note that the caller must set ->bi_destructor on successful return * of a bio, to do the appropriate freeing of the bio once the reference * count drops to zero. **/ diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 2c726b7b9fa..6815d2a84b9 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -256,7 +256,7 @@ out: * Insert @em into @tree or perform a simple forward/backward merge with * existing mappings. The extent_map struct passed in will be inserted * into the tree directly, with an additional reference taken, or a - * reference dropped if the merge attempt was sucessfull. + * reference dropped if the merge attempt was successfull. */ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) diff --git a/fs/cifs/README b/fs/cifs/README index 79c1a93400b..a727b7cb075 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -423,7 +423,7 @@ A partial list of the supported mount options follows: source name to use to represent the client netbios machine name when doing the RFC1001 netbios session initialize. direct Do not do inode data caching on files opened on this mount. - This precludes mmaping files on this mount. In some cases + This precludes mmapping files on this mount. In some cases with fast networks and little or no caching benefits on the client (e.g. when the application is doing large sequential reads bigger than page size without rereading the same data) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 5d0fde18039..4b35f7ec058 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -39,7 +39,7 @@ /* * MAX_REQ is the maximum number of requests that WE will send - * on one socket concurently. It also matches the most common + * on one socket concurrently. It also matches the most common * value of max multiplex returned by servers. We may * eventually want to use the negotiated value (in case * future servers can handle more) when we are more confident that diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5e2492535da..83580213fca 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -917,8 +917,8 @@ undo_setattr: /* * If dentry->d_inode is null (usually meaning the cached dentry * is a negative dentry) then we would attempt a standard SMB delete, but - * if that fails we can not attempt the fall back mechanisms on EACESS - * but will return the EACESS to the caller. Note that the VFS does not call + * if that fails we can not attempt the fall back mechanisms on EACCESS + * but will return the EACCESS to the caller. Note that the VFS does not call * unlink on negative dentries currently. */ int cifs_unlink(struct inode *dir, struct dentry *dentry) diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c index 224a1f47896..b6b6dcb500b 100644 --- a/fs/cifs/smbdes.c +++ b/fs/cifs/smbdes.c @@ -371,7 +371,7 @@ E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) smbhash(p24 + 16, c8, p21 + 14, 1); } -#if 0 /* currently unsued */ +#if 0 /* currently unused */ static void D_P16(unsigned char *p14, unsigned char *in, unsigned char *out) { diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index 16f682e26c0..b540aa5d1f6 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -143,7 +143,7 @@ out: } EXPORT_SYMBOL_GPL(dlm_posix_lock); -/* Returns failure iff a succesful lock operation should be canceled */ +/* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { struct file *file; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 618ca95cbb5..0282ec78cf8 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2932,7 +2932,7 @@ retry: ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd); /* - * If we have a contigous extent of pages and we + * If we have a contiguous extent of pages and we * haven't done the I/O yet, map the blocks and submit * them for I/O. */ @@ -5370,7 +5370,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over - * different block groups too. If they are contiugous, with flexbg, + * different block groups too. If they are contiuguous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks @@ -5446,7 +5446,7 @@ int ext4_writepage_trans_blocks(struct inode *inode) * Calculate the journal credits for a chunk of data modification. * * This is called from DIO, fallocate or whoever calling - * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks. + * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks. * * journal buffers for data blocks are not included here, as DIO * and fallocate do no need to journal data buffers. diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index bba12824def..74e495dabe0 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -142,7 +142,7 @@ * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The * value of s_mb_order2_reqs can be tuned via * /sys/fs/ext4//mb_order2_req. If the request len is equal to - * stripe size (sbi->s_stripe), we try to search for contigous block in + * stripe size (sbi->s_stripe), we try to search for contiguous block in * stripe size. This should result in better allocation on RAID setups. If * not, we search in the specific group using bitmap for best extents. The * tunable min_to_scan and max_to_scan control the behaviour here. diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index f25e70c1b51..f0294410868 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c @@ -177,7 +177,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, spin_unlock(&jffs2_compressor_list_lock); break; default: - printk(KERN_ERR "JFFS2: unknow compression mode.\n"); + printk(KERN_ERR "JFFS2: unknown compression mode.\n"); } out: if (ret == JFFS2_COMPR_NONE) { diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 1a80301004b..378991cfe40 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -931,7 +931,7 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re * Helper function for jffs2_get_inode_nodes(). * The function detects whether more data should be read and reads it if yes. * - * Returns: 0 on succes; + * Returns: 0 on success; * negative error code on failure. */ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 082e844ab2d..4b107881acd 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -31,7 +31,7 @@ * is used to release xattr name/value pair and detach from c->xattrindex. * reclaim_xattr_datum(c) * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when - * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold + * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold * is hard coded as 32KiB. * do_verify_xattr_datum(c, xd) * is used to load the xdatum informations without name/value pair from the medium. diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 2bc7d8aa574..d9b031cf69f 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -755,7 +755,7 @@ int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) * allocation group. */ if ((blkno & (bmp->db_agsize - 1)) == 0) - /* check if the AG is currenly being written to. + /* check if the AG is currently being written to. * if so, call dbNextAG() to find a non-busy * AG with sufficient free space. */ @@ -3337,7 +3337,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) for (i = 0, n = 0; i < agno; n++) { bmp->db_agfree[n] = 0; /* init collection point */ - /* coalesce cotiguous k AGs; */ + /* coalesce contiguous k AGs; */ for (j = 0; j < k && i < agno; j++, i++) { /* merge AGi to AGn */ bmp->db_agfree[n] += bmp->db_agfree[i]; diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 0d58caf4a6e..ec8f45f12e0 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -835,7 +835,7 @@ static int ncp_ioctl_need_write(unsigned int cmd) case NCP_IOC_SETROOT: return 0; default: - /* unkown IOCTL command, assume write */ + /* unknown IOCTL command, assume write */ return 1; } } diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index 9669541d011..08f7530e934 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -927,7 +927,7 @@ lock_retry_remap: return 0; ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ? - "EOVERFLOW" : (!err ? "EIO" : "unkown error")); + "EOVERFLOW" : (!err ? "EIO" : "unknown error")); return err < 0 ? err : -EIO; read_err: diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 663c0e341f8..43179ddd336 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -399,7 +399,7 @@ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov, * @cached_page: allocated but as yet unused page * @lru_pvec: lru-buffering pagevec of caller * - * Obtain @nr_pages locked page cache pages from the mapping @maping and + * Obtain @nr_pages locked page cache pages from the mapping @mapping and * starting at index @index. * * If a page is newly created, increment its refcount and add it to the @@ -1281,7 +1281,7 @@ rl_not_mapped_enoent: /* * Copy as much as we can into the pages and return the number of bytes which - * were sucessfully copied. If a fault is encountered then clear the pages + * were successfully copied. If a fault is encountered then clear the pages * out to (ofs + bytes) and return the number of bytes which were copied. */ static inline size_t ntfs_copy_from_user(struct page **pages, diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 89b02985c05..4dadcdf3d45 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c @@ -338,7 +338,7 @@ err_out: * copy of the complete multi sector transfer deprotected page. On failure, * *@wrp is undefined. * - * Simillarly, if @lsn is not NULL, on succes *@lsn will be set to the current + * Simillarly, if @lsn is not NULL, on success *@lsn will be set to the current * logfile lsn according to this restart page. On failure, *@lsn is undefined. * * The following error codes are defined: diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 38a42f5d59f..7c7198a5bc9 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -2398,7 +2398,7 @@ static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos) * * The array is assumed to be large enough to hold an entire path (tree depth). * - * Upon succesful return from this function: + * Upon successful return from this function: * * - The 'right_path' array will contain a path to the leaf block * whose range contains e_cpos. diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 83bcaf266b3..03ccf9a7b1f 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2586,7 +2586,7 @@ fail: * is complete everywhere. if the target dies while this is * going on, some nodes could potentially see the target as the * master, so it is important that my recovery finds the migration - * mle and sets the master to UNKNONWN. */ + * mle and sets the master to UNKNOWN. */ /* wait for new node to assert master */ diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 0d38d67194c..c5e4a49e3a1 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1855,7 +1855,7 @@ int ocfs2_file_lock(struct file *file, int ex, int trylock) * outstanding lock request, so a cancel convert is * required. We intentionally overwrite 'ret' - if the * cancel fails and the lock was granted, it's easier - * to just bubble sucess back up to the user. + * to just bubble success back up to the user. */ ret = ocfs2_flock_handle_signal(lockres, level); } else if (!ret && (level > lockres->l_level)) { diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 54c16b66327..bf34c491ae9 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -659,7 +659,7 @@ static int __ocfs2_journal_access(handle_t *handle, default: status = -EINVAL; - mlog(ML_ERROR, "Uknown access type!\n"); + mlog(ML_ERROR, "Unknown access type!\n"); } if (!status && ocfs2_meta_ecc(osb) && triggers) jbd2_journal_set_triggers(bh, &triggers->ot_triggers); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 60287fc56bc..d00c658ca15 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2431,7 +2431,7 @@ out: * we gonna touch and whether we need to create new blocks. * * Normally the refcount blocks store these refcount should be - * continguous also, so that we can get the number easily. + * contiguous also, so that we can get the number easily. * As for meta_ac, we will at most add split 2 refcount record and * 2 more refcount block, so just check it in a rough way. * diff --git a/fs/omfs/bitmap.c b/fs/omfs/bitmap.c index e1c0ec0ae98..082234581d0 100644 --- a/fs/omfs/bitmap.c +++ b/fs/omfs/bitmap.c @@ -85,7 +85,7 @@ out: } /* - * Tries to allocate exactly one block. Returns true if sucessful. + * Tries to allocate exactly one block. Returns true if successful. */ int omfs_allocate_block(struct super_block *sb, u64 block) { diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index f94ddf7efba..868a55ee080 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -23,7 +23,7 @@ /* * This file implements functions needed to recover from unclean un-mounts. * When UBIFS is mounted, it checks a flag on the master node to determine if - * an un-mount was completed sucessfully. If not, the process of mounting + * an un-mount was completed successfully. If not, the process of mounting * incorparates additional checking and fixing of on-flash data structures. * UBIFS always cleans away all remnants of an unclean un-mount, so that * errors do not accumulate. However UBIFS defers recovery if it is mounted diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index 6533ead9b88..a2c16bcee90 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h @@ -98,7 +98,7 @@ typedef struct xfs_dquot { #define dq_flags q_lists.dqm_flags /* - * Lock hierachy for q_qlock: + * Lock hierarchy for q_qlock: * XFS_QLOCK_NORMAL is the implicit default, * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 */ diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 4c8d0afae71..fb2d63f13f4 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -47,7 +47,7 @@ #elif defined(CONFIG_SPARSEMEM_VMEMMAP) -/* memmap is virtually contigious. */ +/* memmap is virtually contiguous. */ #define __pfn_to_page(pfn) (vmemmap + (pfn)) #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h index d76b66acea9..7c38c147e5e 100644 --- a/include/asm-generic/unistd.h +++ b/include/asm-generic/unistd.h @@ -631,7 +631,7 @@ __SYSCALL(__NR_perf_event_open, sys_perf_event_open) * these are provided for both review and as a porting * help for the C library version. * - * Last chance: are any of these important enought to + * Last chance: are any of these important enough to * enable by default? */ #ifdef __ARCH_WANT_SYSCALL_NO_AT diff --git a/include/linux/chio.h b/include/linux/chio.h index 519248d8b2b..d9bac7f9728 100644 --- a/include/linux/chio.h +++ b/include/linux/chio.h @@ -21,7 +21,7 @@ * query vendor-specific element types * * accessing elements works by specifing type and unit of the element. - * for eample, storage elements are addressed with type = CHET_ST and + * for example, storage elements are addressed with type = CHET_ST and * unit = 0 .. cp_nslots-1 * */ diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index e5124ceea76..3402042ddc3 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -45,7 +45,7 @@ void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff #define PCAP_MASK_ALL_INTERRUPT 0x01ffffff -/* registers acessible by both pcap ports */ +/* registers accessible by both pcap ports */ #define PCAP_REG_ISR 0x0 /* Interrupt Status */ #define PCAP_REG_MSR 0x1 /* Interrupt Mask */ #define PCAP_REG_PSTAT 0x2 /* Processor Status */ @@ -67,7 +67,7 @@ void pcap_set_ts_bits(struct pcap_chip *, u32); #define PCAP_REG_VENDOR_TEST1 0x1e #define PCAP_REG_VENDOR_TEST2 0x1f -/* registers acessible by pcap port 1 only (a1200, e2 & e6) */ +/* registers accessible by pcap port 1 only (a1200, e2 & e6) */ #define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */ #define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */ #define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */ diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index d745f5b6c7b..76e5053e1fa 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h @@ -30,7 +30,7 @@ /* * use drive write caching -- we need deferred error handling to be - * able to sucessfully recover with this option (drive will return good + * able to successfully recover with this option (drive will return good * status as soon as the cdb is validated). */ #if defined(CONFIG_CDROM_PKTCDVD_WCACHE) diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index 850db2e8051..cf9327c051a 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h @@ -216,10 +216,10 @@ #define UART_IIR_TOD 0x08 /* Character Timeout Indication Detected */ -#define UART_FCR_PXAR1 0x00 /* receive FIFO treshold = 1 */ -#define UART_FCR_PXAR8 0x40 /* receive FIFO treshold = 8 */ -#define UART_FCR_PXAR16 0x80 /* receive FIFO treshold = 16 */ -#define UART_FCR_PXAR32 0xc0 /* receive FIFO treshold = 32 */ +#define UART_FCR_PXAR1 0x00 /* receive FIFO threshold = 1 */ +#define UART_FCR_PXAR8 0x40 /* receive FIFO threshold = 8 */ +#define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */ +#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index b59e78c5716..dfd4745a955 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -490,7 +490,7 @@ struct v4l2_jpegcompression { * you do, leave them untouched. * Inluding less markers will make the * resulting code smaller, but there will - * be fewer aplications which can read it. + * be fewer applications which can read it. * The presence of the APP and COM marker * is influenced by APP_len and COM_len * ONLY, not by this property! */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6e5f0e0c796..ca4789e4f1e 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -988,7 +988,7 @@ struct sctp_transport { int init_sent_count; /* state : The current state of this destination, - * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKOWN. + * : i.e. SCTP_ACTIVE, SCTP_INACTIVE, SCTP_UNKNOWN. */ int state; diff --git a/include/net/tcp.h b/include/net/tcp.h index 03a49c70337..1827e7f217d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1260,7 +1260,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) /* This function calculates a "timeout" which is equivalent to the timeout of a - * TCP connection after "boundary" unsucessful, exponentially backed-off + * TCP connection after "boundary" unsuccessful, exponentially backed-off * retransmissions with an initial RTO of TCP_RTO_MIN. */ static inline bool retransmits_timed_out(const struct sock *sk, diff --git a/include/net/wimax.h b/include/net/wimax.h index 2af7bf839f2..3b07f6aad10 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -79,7 +79,7 @@ * drivers have to only report state changes due to external * conditions. * - * All API operations are 'atomic', serialized thorough a mutex in the + * All API operations are 'atomic', serialized through a mutex in the * `struct wimax_dev`. * * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK diff --git a/include/sound/wm8993.h b/include/sound/wm8993.h index 9c661f2f8cd..eee19f63c0d 100644 --- a/include/sound/wm8993.h +++ b/include/sound/wm8993.h @@ -36,7 +36,7 @@ struct wm8993_platform_data { unsigned int micbias1_lvl:1; unsigned int micbias2_lvl:1; - /* Jack detect threashold levels, see datasheet for values */ + /* Jack detect threshold levels, see datasheet for values */ unsigned int jd_scthr:2; unsigned int jd_thr:2; }; diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 7f29643c898..759a629cc4b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -419,7 +419,7 @@ static void perf_event_remove_from_context(struct perf_event *event) if (!task) { /* * Per cpu events are removed via an smp call and - * the removal is always sucessful. + * the removal is always successful. */ smp_call_function_single(event->cpu, __perf_event_remove_from_context, @@ -827,7 +827,7 @@ perf_install_in_context(struct perf_event_context *ctx, if (!task) { /* * Per cpu events are installed via an smp call and - * the install is always sucessful. + * the install is always successful. */ smp_call_function_single(cpu, __perf_install_in_context, event, 1); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 234ceb10861..456b2bc6b1f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -105,7 +105,7 @@ config DEBUG_SECTION_MISMATCH bool "Enable full Section mismatch analysis" depends on UNDEFINED # This option is on purpose disabled for now. - # It will be enabled when we are down to a resonable number + # It will be enabled when we are down to a reasonable number # of section mismatch warnings (< 10 for an allyesconfig build) help The section mismatch analysis checks if there are illegal diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 600f473a561..76074209f9a 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd) again when using them (during symbol decoding).*/ base = hufGroup->base-1; limit = hufGroup->limit-1; - /* Calculate permute[]. Concurently, initialize + /* Calculate permute[]. Concurrently, initialize * temp[] and limit[]. */ pp = 0; for (i = minLen; i <= maxLen; i++) { diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ce6b7eabf67..d9b08e0f7f5 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false - * positives being reported. Therfore we implement a + * positives being reported. Therefore we implement a * best-fit algorithm here which returns the entry from * the hash which fits best to the reference value * instead of the first-fit. diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ac25cd28e80..853907e4586 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -453,7 +453,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) /* * Return the buffer to the free list by setting the corresponding - * entries to indicate the number of contigous entries available. + * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ diff --git a/mm/filemap.c b/mm/filemap.c index ef169f37156..c3d3506ecab 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1844,7 +1844,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr, /* * Copy as much as we can into the page and return the number of bytes which - * were sucessfully copied. If a fault is encountered then return the number of + * were successfully copied. If a fault is encountered then return the number of * bytes which were copied. */ size_t iov_iter_copy_from_user_atomic(struct page *page, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7226e60e52a..c31a310aa14 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -209,7 +209,7 @@ struct mem_cgroup { int prev_priority; /* for recording reclaim priority */ /* - * While reclaiming in a hiearchy, we cache the last child we + * While reclaiming in a hierarchy, we cache the last child we * reclaimed from. */ int last_scanned_child; @@ -2466,7 +2466,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, cgroup_lock(); /* - * If parent's use_hiearchy is set, we can't make any modifications + * If parent's use_hierarchy is set, we can't make any modifications * in the child subtrees. If it is unset, then the change can * occur, provided the current cgroup has no children. * diff --git a/mm/memory-failure.c b/mm/memory-failure.c index dacc6418387..1ac49fef95a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -174,7 +174,7 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, list_for_each_entry_safe (tk, next, to_kill, nd) { if (doit) { /* - * In case something went wrong with munmaping + * In case something went wrong with munmapping * make sure the process doesn't catch the * signal and then access the memory. Just kill it. * the signal handlers diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c index f7e2fa0974d..16ad251c972 100644 --- a/net/ipv4/netfilter/ipt_ECN.c +++ b/net/ipv4/netfilter/ipt_ECN.c @@ -50,7 +50,7 @@ set_ect_tcp(struct sk_buff *skb, const struct ipt_ECN_info *einfo) struct tcphdr _tcph, *tcph; __be16 oldval; - /* Not enought header? */ + /* Not enough header? */ tcph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); if (!tcph) return false; diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 356e65b1dc4..783c5f367d2 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -450,10 +450,10 @@ void irlap_disconnect_request(struct irlap_cb *self) /* Check if we are in the right state for disconnecting */ switch (self->state) { - case LAP_XMIT_P: /* FALLTROUGH */ - case LAP_XMIT_S: /* FALLTROUGH */ - case LAP_CONN: /* FALLTROUGH */ - case LAP_RESET_WAIT: /* FALLTROUGH */ + case LAP_XMIT_P: /* FALLTHROUGH */ + case LAP_XMIT_S: /* FALLTHROUGH */ + case LAP_CONN: /* FALLTHROUGH */ + case LAP_RESET_WAIT: /* FALLTHROUGH */ case LAP_RESET_CHECK: irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); break; @@ -485,9 +485,9 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); irlap_do_event(self, RESET_REQUEST, NULL, NULL); break; - case LAP_NO_RESPONSE: /* FALLTROUGH */ - case LAP_DISC_INDICATION: /* FALLTROUGH */ - case LAP_FOUND_NONE: /* FALLTROUGH */ + case LAP_NO_RESPONSE: /* FALLTHROUGH */ + case LAP_DISC_INDICATION: /* FALLTHROUGH */ + case LAP_FOUND_NONE: /* FALLTHROUGH */ case LAP_MEDIA_BUSY: irlmp_link_disconnect_indication(self->notify.instance, self, reason, NULL); diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index c5c51959e3c..94a9884d714 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -1741,7 +1741,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, * Function irlap_state_xmit_s (event, skb, info) * * XMIT_S, The secondary station has been given the right to transmit, - * and we therefor do not expect to receive any transmissions from other + * and we therefore do not expect to receive any transmissions from other * stations. */ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 7bf5b913828..0e7d8bde145 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -105,7 +105,7 @@ int __init irlmp_init(void) init_timer(&irlmp->discovery_timer); - /* Do discovery every 3 seconds, conditionaly */ + /* Do discovery every 3 seconds, conditionally */ if (sysctl_discovery) irlmp_start_discovery_timer(irlmp, sysctl_discovery_timeout*HZ); @@ -1842,7 +1842,7 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) reason = LM_CONNECT_FAILURE; break; default: - IRDA_DEBUG(1, "%s(), Unknow IrLAP disconnect reason %d!\n", + IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n", __func__, lap_reason); reason = LM_LAP_DISCONNECT; break; diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 751c4d0e2b3..719ddbc9e48 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -244,7 +244,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data * @addr: destination address of the path (ETH_ALEN length) * @sdata: local subif * - * Returns: 0 on sucess + * Returns: 0 on success * * State: the initial state of the new path is set to 0 */ @@ -530,7 +530,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) * @addr: dst address (ETH_ALEN length) * @sdata: local subif * - * Returns: 0 if succesful + * Returns: 0 if successful */ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) { diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index 7a10bbe02c1..c5d9f97ef21 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -682,7 +682,7 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain, * buckets and @skip_chain entries. For each entry in the table call * @callback, if @callback returns a negative value stop 'walking' through the * table and return. Updates the values in @skip_bkt and @skip_chain on - * return. Returns zero on succcess, negative values on failure. + * return. Returns zero on success, negative values on failure. * */ int netlbl_domhsh_walk(u32 *skip_bkt, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 8674d491955..29d8501bf15 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -719,7 +719,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, if (sctp_style(sk, TCP)) { /* Change the sk->sk_state of a TCP-style socket that has - * sucessfully completed a connect() call. + * successfully completed a connect() call. */ if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) sk->sk_state = SCTP_SS_ESTABLISHED; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index f11be72a1a8..b15e1ebb2bf 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -54,7 +54,7 @@ * Assumptions: * - head[0] is physically contiguous. * - tail[0] is physically contiguous. - * - pages[] is not physically or virtually contigous and consists of + * - pages[] is not physically or virtually contiguous and consists of * PAGE_SIZE elements. * * Output: diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c index ca269178c4d..35f370091f4 100644 --- a/net/wimax/op-reset.c +++ b/net/wimax/op-reset.c @@ -62,7 +62,7 @@ * Called when wanting to reset the device for any reason. Device is * taken back to power on status. * - * This call blocks; on succesful return, the device has completed the + * This call blocks; on successful return, the device has completed the * reset process and is ready to operate. */ int wimax_reset(struct wimax_dev *wimax_dev) diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c index d8295357358..8413cf38ed2 100644 --- a/scripts/kconfig/mconf.c +++ b/scripts/kconfig/mconf.c @@ -213,7 +213,7 @@ load_config_help[] = N_( "to modify that configuration.\n" "\n" "If you are uncertain, then you have probably never used alternate\n" - "configuration files. You should therefor leave this blank to abort.\n"), + "configuration files. You should therefore leave this blank to abort.\n"), save_config_text[] = N_( "Enter a filename to which this configuration should be saved " "as an alternate. Leave blank to abort."), diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index e68823741ad..2534400317c 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c @@ -204,7 +204,7 @@ int selinux_netlbl_skbuff_getsid(struct sk_buff *skb, * * Description * Call the NetLabel mechanism to set the label of a packet using @sid. - * Returns zero on auccess, negative values on failure. + * Returns zero on success, negative values on failure. * */ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index ff17820d35e..5914eeb0b33 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -741,7 +741,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) goto out; } - /* type/domain unchaned */ + /* type/domain unchanged */ if (old_context->type == new_context->type) { rc = 0; goto out; diff --git a/sound/Kconfig b/sound/Kconfig index 4b5365ad6b4..fcad760f569 100644 --- a/sound/Kconfig +++ b/sound/Kconfig @@ -55,7 +55,7 @@ config SOUND_OSS_CORE_PRECLAIM Please read Documentation/feature-removal-schedule.txt for details. - If unusre, say Y. + If unsure, say Y. source "sound/oss/dmasound/Kconfig" diff --git a/sound/isa/cs423x/cs4236.c b/sound/isa/cs423x/cs4236.c index a076a6ce807..a828baaab63 100644 --- a/sound/isa/cs423x/cs4236.c +++ b/sound/isa/cs423x/cs4236.c @@ -177,7 +177,7 @@ static struct pnp_card_device_id snd_cs423x_pnpids[] = { { .id = "CSC0437", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Digital PC 5000 Onboard - CS4236B */ { .id = "CSC0735", .devs = { { "CSC0000" }, { "CSC0010" } } }, - /* some uknown CS4236B */ + /* some unknown CS4236B */ { .id = "CSC0b35", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, /* Intel PR440FX Onboard sound */ { .id = "CSC0b36", .devs = { { "CSC0000" }, { "CSC0010" }, { "CSC0003" } } }, diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c index 02e30d7c6a9..ddad60ef3f3 100644 --- a/sound/isa/opti9xx/miro.c +++ b/sound/isa/opti9xx/miro.c @@ -137,7 +137,7 @@ struct snd_miro { static void snd_miro_proc_init(struct snd_miro * miro); static char * snd_opti9xx_names[] = { - "unkown", + "unknown", "82C928", "82C929", "82C924", "82C925", "82C930", "82C931", "82C933" diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c index 5cd555325b9..848007508ff 100644 --- a/sound/isa/opti9xx/opti92x-ad1848.c +++ b/sound/isa/opti9xx/opti92x-ad1848.c @@ -185,7 +185,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids); #endif static char * snd_opti9xx_names[] = { - "unkown", + "unknown", "82C928", "82C929", "82C924", "82C925", "82C930", "82C931", "82C933" diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c index 06e9e88e4c0..bb14e4c67e8 100644 --- a/sound/oss/dmasound/dmasound_paula.c +++ b/sound/oss/dmasound/dmasound_paula.c @@ -657,7 +657,7 @@ static int AmiStateInfo(char *buffer, size_t space) len += sprintf(buffer+len, "\tsound.volume_right = %d [0...64]\n", dmasound.volume_right); if (len >= space) { - printk(KERN_ERR "dmasound_paula: overlowed state buffer alloc.\n") ; + printk(KERN_ERR "dmasound_paula: overflowed state buffer alloc.\n") ; len = space ; } return len; diff --git a/sound/pci/ca0106/ca0106_proc.c b/sound/pci/ca0106/ca0106_proc.c index c62b7d10ec6..8d13092300d 100644 --- a/sound/pci/ca0106/ca0106_proc.c +++ b/sound/pci/ca0106/ca0106_proc.c @@ -233,7 +233,7 @@ static void snd_ca0106_proc_dump_iec958( struct snd_info_buffer *buffer, u32 val snd_iprintf(buffer, "user-defined\n"); break; default: - snd_iprintf(buffer, "unkown\n"); + snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Sample Bits: "); diff --git a/sound/pci/cs46xx/imgs/cwcdma.asp b/sound/pci/cs46xx/imgs/cwcdma.asp index 09d24c76f03..a65e1193c89 100644 --- a/sound/pci/cs46xx/imgs/cwcdma.asp +++ b/sound/pci/cs46xx/imgs/cwcdma.asp @@ -26,10 +26,11 @@ // // // The purpose of this code is very simple: make it possible to tranfser -// the samples 'as they are' with no alteration from a PCMreader SCB (DMA from host) -// to any other SCB. This is useful for AC3 throug SPDIF. SRC (source rate converters) -// task always alters the samples in some how, however it's from 48khz -> 48khz. The -// alterations are not audible, but AC3 wont work. +// the samples 'as they are' with no alteration from a PCMreader +// SCB (DMA from host) to any other SCB. This is useful for AC3 through SPDIF. +// SRC (source rate converters) task always alters the samples in somehow, +// however it's from 48khz -> 48khz. +// The alterations are not audible, but AC3 wont work. // // ... // | diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c index 36e08bd2b3c..360e3809a60 100644 --- a/sound/pci/emu10k1/emu10k1x.c +++ b/sound/pci/emu10k1/emu10k1x.c @@ -184,7 +184,7 @@ MODULE_PARM_DESC(enable, "Enable the EMU10K1X soundcard."); * The hardware has 3 channels for playback and 1 for capture. * - channel 0 is the front channel * - channel 1 is the rear channel - * - channel 2 is the center/lfe chanel + * - channel 2 is the center/lfe channel * Volume is controlled by the AC97 for the front and rear channels by * the PCM Playback Volume, Sigmatel Surround Playback Volume and * Surround Playback Volume. The Sigmatel 4-Speaker Stereo switch affects diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c index 780e1a72114..8917071d5b6 100644 --- a/sound/pci/hda/patch_cmedia.c +++ b/sound/pci/hda/patch_cmedia.c @@ -66,7 +66,7 @@ struct cmi_spec { struct hda_pcm pcm_rec[2]; /* PCM information */ - /* pin deafault configuration */ + /* pin default configuration */ hda_nid_t pin_nid[NUM_PINS]; unsigned int def_conf[NUM_PINS]; unsigned int pin_def_confs; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ff20048504b..872731eb49e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6619,7 +6619,7 @@ static struct hda_input_mux alc889A_mb31_capture_source = { /* Front Mic (0x01) unused */ { "Line", 0x2 }, /* Line 2 (0x03) unused */ - /* CD (0x04) unsused? */ + /* CD (0x04) unused? */ }, }; diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 0dce331a2a3..a1b10d1a384 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -3017,7 +3017,7 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry, insel = "Coaxial"; break; default: - insel = "Unkown"; + insel = "Unknown"; } switch (hdspm->control_register & HDSPM_SyncRefMask) { @@ -3028,7 +3028,7 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry, syncref = "MADI"; break; default: - syncref = "Unkown"; + syncref = "Unknown"; } snd_iprintf(buffer, "Inputsel = %s, SyncRef = %s\n", insel, syncref); diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index c33b92edbde..8ce1c9b2e5b 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c @@ -101,7 +101,7 @@ static int uda134x_write(struct snd_soc_codec *codec, unsigned int reg, pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value); if (reg >= UDA134X_REGS_NUM) { - printk(KERN_ERR "%s unkown register: reg: %u", + printk(KERN_ERR "%s unknown register: reg: %u", __func__, reg); return -EINVAL; } @@ -552,7 +552,7 @@ static int uda134x_soc_probe(struct platform_device *pdev) ARRAY_SIZE(uda1341_snd_controls)); break; default: - printk(KERN_ERR "%s unkown codec type: %d", + printk(KERN_ERR "%s unknown codec type: %d", __func__, pd->model); return -EINVAL; } diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index fe1307b500c..d72347d90b7 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -607,7 +607,7 @@ SOC_SINGLE("Right Input PGA Common Mode Switch", WM8903_ANALOGUE_RIGHT_INPUT_1, SOC_SINGLE("DRC Switch", WM8903_DRC_0, 15, 1, 0), SOC_ENUM("DRC Compressor Slope R0", drc_slope_r0), SOC_ENUM("DRC Compressor Slope R1", drc_slope_r1), -SOC_SINGLE_TLV("DRC Compressor Threashold Volume", WM8903_DRC_3, 5, 124, 1, +SOC_SINGLE_TLV("DRC Compressor Threshold Volume", WM8903_DRC_3, 5, 124, 1, drc_tlv_thresh), SOC_SINGLE_TLV("DRC Volume", WM8903_DRC_3, 0, 30, 1, drc_tlv_amp), SOC_SINGLE_TLV("DRC Minimum Gain Volume", WM8903_DRC_1, 2, 3, 1, drc_tlv_min), @@ -617,11 +617,11 @@ SOC_ENUM("DRC Decay Rate", drc_decay), SOC_ENUM("DRC FF Delay", drc_ff_delay), SOC_SINGLE("DRC Anticlip Switch", WM8903_DRC_0, 1, 1, 0), SOC_SINGLE("DRC QR Switch", WM8903_DRC_0, 2, 1, 0), -SOC_SINGLE_TLV("DRC QR Threashold Volume", WM8903_DRC_0, 6, 3, 0, drc_tlv_max), +SOC_SINGLE_TLV("DRC QR Threshold Volume", WM8903_DRC_0, 6, 3, 0, drc_tlv_max), SOC_ENUM("DRC QR Decay Rate", drc_qr_decay), SOC_SINGLE("DRC Smoothing Switch", WM8903_DRC_0, 3, 1, 0), SOC_SINGLE("DRC Smoothing Hysteresis Switch", WM8903_DRC_0, 0, 1, 0), -SOC_ENUM("DRC Smoothing Threashold", drc_smoothing), +SOC_ENUM("DRC Smoothing Threshold", drc_smoothing), SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup), SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT, diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c index d9987999e92..bc033687b22 100644 --- a/sound/soc/codecs/wm8993.c +++ b/sound/soc/codecs/wm8993.c @@ -689,7 +689,7 @@ SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8993_DIGITAL_SIDE_TONE, SOC_SINGLE("DRC Switch", WM8993_DRC_CONTROL_1, 15, 1, 0), SOC_ENUM("DRC Path", drc_path), -SOC_SINGLE_TLV("DRC Compressor Threashold Volume", WM8993_DRC_CONTROL_2, +SOC_SINGLE_TLV("DRC Compressor Threshold Volume", WM8993_DRC_CONTROL_2, 2, 60, 1, drc_comp_threash), SOC_SINGLE_TLV("DRC Compressor Amplitude Volume", WM8993_DRC_CONTROL_3, 11, 30, 1, drc_comp_amp), @@ -709,7 +709,7 @@ SOC_SINGLE_TLV("DRC Quick Release Volume", WM8993_DRC_CONTROL_3, 2, 3, 0, SOC_ENUM("DRC Quick Release Rate", drc_qr_rate), SOC_SINGLE("DRC Smoothing Switch", WM8993_DRC_CONTROL_1, 11, 1, 0), SOC_SINGLE("DRC Smoothing Hysteresis Switch", WM8993_DRC_CONTROL_1, 8, 1, 0), -SOC_ENUM("DRC Smoothing Hysteresis Threashold", drc_smooth), +SOC_ENUM("DRC Smoothing Hysteresis Threshold", drc_smooth), SOC_SINGLE_TLV("DRC Startup Volume", WM8993_DRC_CONTROL_4, 8, 18, 0, drc_startup_tlv), diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c index 1966e0d5652..3c7ccb78b6a 100644 --- a/sound/soc/s3c24xx/s3c24xx_simtec.c +++ b/sound/soc/s3c24xx/s3c24xx_simtec.c @@ -270,7 +270,7 @@ static int attach_gpio_amp(struct device *dev, gpio_direction_output(pd->amp_gain[1], 0); } - /* note, curently we assume GPA0 isn't valid amp */ + /* note, currently we assume GPA0 isn't valid amp */ if (pdata->amp_gpio > 0) { ret = gpio_request(pd->amp_gpio, "gpio-amp"); if (ret) { diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c index 83b8028e209..81d6f983f51 100644 --- a/sound/soc/s6000/s6000-pcm.c +++ b/sound/soc/s6000/s6000-pcm.c @@ -196,7 +196,7 @@ static int s6000_pcm_start(struct snd_pcm_substream *substream) 0 /* destination skip after chunk (impossible) */, 4 /* 16 byte burst size */, -1 /* don't conserve bandwidth */, - 0 /* low watermark irq descriptor theshold */, + 0 /* low watermark irq descriptor threshold */, 0 /* disable hardware timestamps */, 1 /* enable channel */); diff --git a/sound/sound_core.c b/sound/sound_core.c index 49c99818659..dbca7c909a3 100644 --- a/sound/sound_core.c +++ b/sound/sound_core.c @@ -353,7 +353,7 @@ static struct sound_unit *chains[SOUND_STEP]; * @dev: device pointer * * Allocate a special sound device by minor number from the sound - * subsystem. The allocated number is returned on succes. On failure + * subsystem. The allocated number is returned on success. On failure * a negative error code is returned. */ -- cgit v1.2.3-70-g09d2 From 27a338a69ed9a8a672cd620f5fd7fa450209313c Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 8 Dec 2009 15:29:05 -0800 Subject: [IA64] Fix cut/paste detritus from unistd.h Build warning: :1523:2: warning: #warning syscall recvmmsg not implemented Because when recvmmesg was added, the previous syscall define was cut&pasted, and a spurious "rt_" left in the name of the define. Signed-off-by: Tony Luck --- arch/ia64/include/asm/unistd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 9c72e36c528..10a8f21ca9e 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -311,7 +311,7 @@ #define __NR_preadv 1319 #define __NR_pwritev 1320 #define __NR_rt_tgsigqueueinfo 1321 -#define __NR_rt_recvmmsg 1322 +#define __NR_recvmmsg 1322 #ifdef __KERNEL__ -- cgit v1.2.3-70-g09d2 From 559df2e0210352f83926d178c40c51142292a18c Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 19 Apr 2009 22:35:10 +0200 Subject: kbuild: move asm-offsets.h to include/generated The simplest method was to add an extra asm-offsets.h file in arch/$ARCH/include/asm that references the generated file. We can now migrate the architectures one-by-one to reference the generated file direct - and when done we can delete the temporary arch/$ARCH/include/asm/asm-offsets.h file. Signed-off-by: Sam Ravnborg Cc: Al Viro Signed-off-by: Michal Marek --- .gitignore | 1 - Kbuild | 2 +- Makefile | 1 - arch/alpha/include/asm/asm-offsets.h | 1 + arch/arm/include/asm/asm-offsets.h | 1 + arch/avr32/include/asm/asm-offsets.h | 1 + arch/blackfin/include/asm/asm-offsets.h | 1 + arch/cris/include/asm/asm-offsets.h | 1 + arch/frv/include/asm/asm-offsets.h | 1 + arch/h8300/include/asm/asm-offsets.h | 1 + arch/ia64/include/asm/asm-offsets.h | 1 + arch/m68k/include/asm/asm-offsets.h | 1 + arch/microblaze/include/asm/asm-offsets.h | 1 + arch/mips/include/asm/asm-offsets.h | 1 + arch/mn10300/include/asm/asm-offsets.h | 1 + arch/parisc/include/asm/asm-offsets.h | 1 + arch/powerpc/include/asm/asm-offsets.h | 1 + arch/s390/include/asm/asm-offsets.h | 1 + arch/sh/include/asm/asm-offsets.h | 1 + arch/sparc/include/asm/asm-offsets.h | 1 + arch/um/Makefile | 2 +- arch/um/include/asm/asm-offsets.h | 1 + arch/x86/include/asm/asm-offsets.h | 1 + arch/xtensa/include/asm/asm-offsets.h | 1 + 24 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 arch/alpha/include/asm/asm-offsets.h create mode 100644 arch/arm/include/asm/asm-offsets.h create mode 100644 arch/avr32/include/asm/asm-offsets.h create mode 100644 arch/blackfin/include/asm/asm-offsets.h create mode 100644 arch/cris/include/asm/asm-offsets.h create mode 100644 arch/frv/include/asm/asm-offsets.h create mode 100644 arch/h8300/include/asm/asm-offsets.h create mode 100644 arch/ia64/include/asm/asm-offsets.h create mode 100644 arch/m68k/include/asm/asm-offsets.h create mode 100644 arch/microblaze/include/asm/asm-offsets.h create mode 100644 arch/mips/include/asm/asm-offsets.h create mode 100644 arch/mn10300/include/asm/asm-offsets.h create mode 100644 arch/parisc/include/asm/asm-offsets.h create mode 100644 arch/powerpc/include/asm/asm-offsets.h create mode 100644 arch/s390/include/asm/asm-offsets.h create mode 100644 arch/sh/include/asm/asm-offsets.h create mode 100644 arch/sparc/include/asm/asm-offsets.h create mode 100644 arch/um/include/asm/asm-offsets.h create mode 100644 arch/x86/include/asm/asm-offsets.h create mode 100644 arch/xtensa/include/asm/asm-offsets.h (limited to 'arch/ia64/include') diff --git a/.gitignore b/.gitignore index 36d9cd6d428..3582f422813 100644 --- a/.gitignore +++ b/.gitignore @@ -46,7 +46,6 @@ Module.symvers # Generated include files # include/asm -include/asm-*/asm-offsets.h include/config include/linux/autoconf.h include/linux/compile.h diff --git a/Kbuild b/Kbuild index 1165d7a5ca4..e3737ad72b5 100644 --- a/Kbuild +++ b/Kbuild @@ -43,7 +43,7 @@ $(obj)/$(bounds-file): kernel/bounds.s Kbuild # 2) Generate asm-offsets.h # -offsets-file := include/asm/asm-offsets.h +offsets-file := include/generated/asm-offsets.h always += $(offsets-file) targets += $(offsets-file) diff --git a/Makefile b/Makefile index b58e9312ce3..eb43b9fa30b 100644 --- a/Makefile +++ b/Makefile @@ -1197,7 +1197,6 @@ MRPROPER_DIRS += include/config include2 usr/include include/generated MRPROPER_FILES += .config .config.old include/asm .version .old_version \ include/linux/autoconf.h include/linux/version.h \ include/linux/utsrelease.h \ - include/asm*/asm-offsets.h \ Module.symvers Module.markers tags TAGS cscope* # clean - Delete most, but leave enough to build external modules diff --git a/arch/alpha/include/asm/asm-offsets.h b/arch/alpha/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/alpha/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/arm/include/asm/asm-offsets.h b/arch/arm/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/arm/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/avr32/include/asm/asm-offsets.h b/arch/avr32/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/avr32/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/blackfin/include/asm/asm-offsets.h b/arch/blackfin/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/blackfin/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/cris/include/asm/asm-offsets.h b/arch/cris/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/cris/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/frv/include/asm/asm-offsets.h b/arch/frv/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/frv/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/h8300/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/ia64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/m68k/include/asm/asm-offsets.h b/arch/m68k/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/m68k/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/microblaze/include/asm/asm-offsets.h b/arch/microblaze/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/microblaze/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/mips/include/asm/asm-offsets.h b/arch/mips/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/mips/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/mn10300/include/asm/asm-offsets.h b/arch/mn10300/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/mn10300/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/parisc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/powerpc/include/asm/asm-offsets.h b/arch/powerpc/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/powerpc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/s390/include/asm/asm-offsets.h b/arch/s390/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/s390/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sh/include/asm/asm-offsets.h b/arch/sh/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/sh/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sparc/include/asm/asm-offsets.h b/arch/sparc/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/sparc/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/um/Makefile b/arch/um/Makefile index fc633dbacf8..fab8121d2b3 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -149,6 +149,6 @@ $(SHARED_HEADERS)/user_constants.h: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s $(SHARED_HEADERS)/kern_constants.h: $(Q)mkdir -p $(dir $@) - $(Q)echo '#include "../../../../include/asm/asm-offsets.h"' >$@ + $(Q)echo '#include "../../../../include/generated/asm-offsets.h"' >$@ export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS HEADER_ARCH DEV_NULL_PATH diff --git a/arch/um/include/asm/asm-offsets.h b/arch/um/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/um/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/x86/include/asm/asm-offsets.h b/arch/x86/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/x86/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/xtensa/include/asm/asm-offsets.h b/arch/xtensa/include/asm/asm-offsets.h new file mode 100644 index 00000000000..d370ee36a18 --- /dev/null +++ b/arch/xtensa/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include -- cgit v1.2.3-70-g09d2 From 4929d29c0dffd5fdc2df987254366c2e25c392d4 Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Mon, 20 Apr 2009 19:35:54 +0200 Subject: ia64: move nr-irqs.h to include/generated Avoid generating files in the now deprecated asm-ia64 dir Simplified the logic in the Makefile when editing stuff in the area Signed-off-by: Sam Ravnborg Cc: Al Viro Cc: Tony Luck Cc: Fenghua Yu Signed-off-by: Michal Marek --- arch/ia64/Makefile | 2 +- arch/ia64/include/asm/irq.h | 2 +- arch/ia64/kernel/Makefile | 7 ++----- 3 files changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index e7cbaa02cd0..475e2725fbd 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -103,4 +103,4 @@ archprepare: make_nr_irqs_h FORCE PHONY += make_nr_irqs_h FORCE make_nr_irqs_h: FORCE - $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h + $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h index 5282546cdf8..91b920fd7d5 100644 --- a/arch/ia64/include/asm/irq.h +++ b/arch/ia64/include/asm/irq.h @@ -13,7 +13,7 @@ #include #include -#include +#include static __inline__ int irq_canonicalize (int irq) diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 6b7edcab0cb..2a75e937ae8 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -81,17 +81,14 @@ define cmd_nr_irqs endef # We use internal kbuild rules to avoid the "is up to date" message from make -arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \ - $(wildcard $(srctree)/include/asm-ia64/*/irq.h) +arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c $(Q)mkdir -p $(dir $@) $(call if_changed_dep,cc_s_c) -include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s +include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s $(Q)mkdir -p $(dir $@) $(call cmd,nr_irqs) -clean-files += $(objtree)/include/asm-ia64/nr-irqs.h - # # native ivt.S, entry.S and fsys.S # -- cgit v1.2.3-70-g09d2 From 445c89514be242b1b0080056d50bdc1b72adeb5c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 19:49:50 +0100 Subject: locking: Convert raw_spinlock to arch_spinlock The raw_spin* namespace was taken by lockdep for the architecture specific implementations. raw_spin_* would be the ideal name space for the spinlocks which are not converted to sleeping locks in preempt-rt. Linus suggested to convert the raw_ to arch_ locks and cleanup the name space instead of using an artifical name like core_spin, atomic_spin or whatever No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 6 +++--- arch/alpha/include/asm/spinlock_types.h | 2 +- arch/arm/include/asm/spinlock.h | 6 +++--- arch/arm/include/asm/spinlock_types.h | 2 +- arch/blackfin/include/asm/spinlock.h | 10 +++++----- arch/blackfin/include/asm/spinlock_types.h | 2 +- arch/cris/include/arch-v32/arch/spinlock.h | 12 ++++++------ arch/ia64/include/asm/spinlock.h | 26 +++++++++++++------------- arch/ia64/include/asm/spinlock_types.h | 2 +- arch/m32r/include/asm/spinlock.h | 6 +++--- arch/m32r/include/asm/spinlock_types.h | 2 +- arch/mips/include/asm/spinlock.h | 10 +++++----- arch/mips/include/asm/spinlock_types.h | 2 +- arch/parisc/include/asm/atomic.h | 6 +++--- arch/parisc/include/asm/spinlock.h | 8 ++++---- arch/parisc/include/asm/spinlock_types.h | 4 ++-- arch/parisc/lib/bitops.c | 2 +- arch/powerpc/include/asm/rtas.h | 2 +- arch/powerpc/include/asm/spinlock.h | 14 +++++++------- arch/powerpc/include/asm/spinlock_types.h | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/lib/locks.c | 4 ++-- arch/powerpc/platforms/pasemi/setup.c | 2 +- arch/s390/include/asm/spinlock.h | 16 ++++++++-------- arch/s390/include/asm/spinlock_types.h | 2 +- arch/s390/lib/spinlock.c | 8 ++++---- arch/sh/include/asm/spinlock.h | 6 +++--- arch/sh/include/asm/spinlock_types.h | 2 +- arch/sparc/include/asm/spinlock_32.h | 6 +++--- arch/sparc/include/asm/spinlock_64.h | 8 ++++---- arch/sparc/include/asm/spinlock_types.h | 2 +- arch/x86/include/asm/paravirt.h | 12 ++++++------ arch/x86/include/asm/paravirt_types.h | 14 +++++++------- arch/x86/include/asm/spinlock.h | 30 +++++++++++++++--------------- arch/x86/include/asm/spinlock_types.h | 4 ++-- arch/x86/kernel/dumpstack.c | 2 +- arch/x86/kernel/paravirt-spinlocks.c | 2 +- arch/x86/kernel/tsc_sync.c | 2 +- arch/x86/xen/spinlock.c | 16 ++++++++-------- include/asm-generic/bitops/atomic.h | 6 +++--- include/linux/spinlock.h | 4 ++-- include/linux/spinlock_types.h | 2 +- include/linux/spinlock_types_up.h | 4 ++-- include/linux/spinlock_up.h | 8 ++++---- kernel/lockdep.c | 2 +- kernel/trace/ring_buffer.c | 4 ++-- kernel/trace/trace.c | 18 +++++++++--------- kernel/trace/trace_clock.c | 4 ++-- kernel/trace/trace_sched_wakeup.c | 4 ++-- kernel/trace/trace_stack.c | 4 ++-- lib/spinlock_debug.c | 2 +- 51 files changed, 164 insertions(+), 164 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e38fb95cb33..bdb26a1940b 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -17,13 +17,13 @@ #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(raw_spinlock_t * lock) +static inline void __raw_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(raw_spinlock_t * lock) +static inline void __raw_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 8141eb5ebf0..bb94a51e53d 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681ac1ed..4e7712ee939 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -23,7 +23,7 @@ #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { smp_mb(); diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6d2ee..5e9d3eadd16 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index b0c7f0ee4b0..fc16b4c5309 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -24,29 +24,29 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr); asmlinkage int __raw_write_trylock_asm(volatile int *ptr); asmlinkage void __raw_write_unlock_asm(volatile int *ptr); -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index be75762c061..03b377abf5c 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -15,7 +15,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 367a53ea10c..e253457765f 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int __raw_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,24 +22,24 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc9516..9fbdf7e6108 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -38,7 +38,7 @@ #define TICKET_BITS 15 #define TICKET_MASK ((1 << TICKET_BITS) - 1) -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) } } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); @@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return 0; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; @@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } -static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket; @@ -89,53 +89,53 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) } } -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4..447ccc6ca7a 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923883b..0c0164225bc 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -36,7 +36,7 @@ * __raw_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f52105c0e..17d15bd6322 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09a0f0..0f16d0673b4 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,7 +34,7 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); @@ -45,7 +45,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *lock) #define __raw_spin_unlock_wait(x) \ while (__raw_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); @@ -53,7 +53,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock) } #define __raw_spin_is_contended __raw_spin_is_contended -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -174,7 +174,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa116c..2e1060892d3 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -12,7 +12,7 @@ typedef struct { * bits 15..28: ticket */ unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96699b..3a4ea778d4b 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -27,18 +27,18 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ __raw_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ __raw_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e136fa..69e8dca2674 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,7 +5,7 @@ #include #include -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int __raw_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; @@ -15,7 +15,7 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x) #define __raw_spin_unlock_wait(x) \ do { cpu_relax(); } while (__raw_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(raw_spinlock_t *x, +static inline void __raw_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *x) +static inline void __raw_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *x) +static inline int __raw_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47cf4b..735caafb81f 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -9,10 +9,10 @@ typedef struct { volatile unsigned int lock[4]; # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif -} raw_spinlock_t; +} arch_spinlock_t; typedef struct { - raw_spinlock_t lock; + arch_spinlock_t lock; volatile int counter; } raw_rwlock_t; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739fab1..fdd7f583de5 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -12,7 +12,7 @@ #include #ifdef CONFIG_SMP -raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { +arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 168fce72620..20de73c3668 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - raw_spinlock_t lock; + arch_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 198266cf9e2..c0d44c92ff0 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) +static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,7 +73,7 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; return arch_spin_trylock(lock) == 0; @@ -96,7 +96,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); +extern void __spin_yield(arch_spinlock_t *lock); extern void __rw_yield(raw_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() @@ -104,7 +104,7 @@ extern void __rw_yield(raw_rwlock_t *lock); #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { @@ -120,7 +120,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; @@ -140,7 +140,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; __asm__ __volatile__("# __raw_spin_unlock\n\t" @@ -149,7 +149,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); #else #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 74236c9f05b..4312e5baaf8 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf90361bb70..579069c1215 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, return 1; } -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static u64 timebase = 0; void __cpuinit rtas_give_timebase(void) diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3a470..b06294cde49 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -25,7 +25,7 @@ #include #include -void __spin_yield(raw_spinlock_t *lock) +void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(raw_spinlock_t *lock) +void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a4619347aa7..be36fece41d 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -71,7 +71,7 @@ static void pas_restart(char *cmd) } #ifdef CONFIG_SMP -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static unsigned long timebase; static void __devinit pas_give_timebase(void) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19c7a..6121fa4b83d 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -57,12 +57,12 @@ _raw_compare_and_swap(volatile unsigned int *lock, do { while (__raw_spin_is_locked(lock)) \ _raw_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *); -extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(raw_spinlock_t *); -extern void _raw_spin_relax(raw_spinlock_t *lock); +extern void _raw_spin_lock_wait(arch_spinlock_t *); +extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int _raw_spin_trylock_retry(arch_spinlock_t *); +extern void _raw_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(raw_spinlock_t *lp) +static inline void __raw_spin_lock(arch_spinlock_t *lp) { int old; @@ -72,7 +72,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lp) _raw_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, +static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -83,7 +83,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, _raw_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(raw_spinlock_t *lp) +static inline int __raw_spin_trylock(arch_spinlock_t *lp) { int old; @@ -93,7 +93,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lp) return _raw_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(raw_spinlock_t *lp) +static inline void __raw_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc40de0..a93638eee3f 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int owner_cpu; -} __attribute__ ((aligned (4))) raw_spinlock_t; +} __attribute__ ((aligned (4))) arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30250b..d4cbf71a607 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(raw_spinlock_t *lp) +void _raw_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -59,7 +59,7 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_lock_wait); -void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) +void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -82,7 +82,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) } EXPORT_SYMBOL(_raw_spin_lock_wait_flags); -int _raw_spin_trylock_retry(raw_spinlock_t *lp) +int _raw_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; @@ -97,7 +97,7 @@ int _raw_spin_trylock_retry(raw_spinlock_t *lp) } EXPORT_SYMBOL(_raw_spin_trylock_retry); -void _raw_spin_relax(raw_spinlock_t *lock) +void _raw_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0053f..5a05b3fcefb 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -34,7 +34,7 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; @@ -54,7 +54,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; @@ -67,7 +67,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e7b60..37712c32ba9 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630cff63..b2d8a67f727 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -15,7 +15,7 @@ #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e51478358..38e16c40efc 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -27,7 +27,7 @@ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01c585..41d9a8fec13 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,7 +7,7 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb38994859..5655f75f10b 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct raw_spinlock *lock) +static inline int __raw_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct raw_spinlock *lock) +static inline int __raw_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) +static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, +static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) +static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) +static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473c8da..b1e70d51e40 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -318,14 +318,14 @@ struct pv_mmu_ops { phys_addr_t phys, pgprot_t flags); }; -struct raw_spinlock; +struct arch_spinlock; struct pv_lock_ops { - int (*spin_is_locked)(struct raw_spinlock *lock); - int (*spin_is_contended)(struct raw_spinlock *lock); - void (*spin_lock)(struct raw_spinlock *lock); - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct raw_spinlock *lock); - void (*spin_unlock)(struct raw_spinlock *lock); + int (*spin_is_locked)(struct arch_spinlock *lock); + int (*spin_is_contended)(struct arch_spinlock *lock); + void (*spin_lock)(struct arch_spinlock *lock); + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853321d..204b524fcf5 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -58,7 +58,7 @@ #if (NR_CPUS < 256) #define TICKET_SHIFT 8 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { short inc = 0x0100; @@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp, new; @@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) #else #define TICKET_SHIFT 16 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp; int new; @@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); @@ -174,33 +174,33 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int __raw_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int __raw_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } #define __raw_spin_is_contended __raw_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); @@ -208,7 +208,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) { while (__raw_spin_is_locked(lock)) cpu_relax(); diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c8709..2ae7637ed52 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -5,9 +5,9 @@ # error "please don't include this file directly" #endif -typedef struct raw_spinlock { +typedef struct arch_spinlock { unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b8ce165dde5..0862d9d89c9 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 3a7c5a44082..a0f39e09068 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -8,7 +8,7 @@ #include static inline void -default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { __raw_spin_lock(lock); } diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index eed156851f5..9f908b9d1ab 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141108d..24ded31b5ae 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -120,14 +120,14 @@ struct xen_spinlock { unsigned short spinners; /* count of waiting cpus */ }; -static int xen_spin_is_locked(struct raw_spinlock *lock) +static int xen_spin_is_locked(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; return xl->lock != 0; } -static int xen_spin_is_contended(struct raw_spinlock *lock) +static int xen_spin_is_contended(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; @@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) return xl->spinners != 0; } -static int xen_spin_trylock(struct raw_spinlock *lock) +static int xen_spin_trylock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; u8 old = 1; @@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock __get_cpu_var(lock_spinners) = prev; } -static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) +static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; @@ -254,7 +254,7 @@ out: return ret; } -static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) +static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; unsigned timeout; @@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) spin_time_accum_total(start_spin); } -static void xen_spin_lock(struct raw_spinlock *lock) +static void xen_spin_lock(struct arch_spinlock *lock) { __xen_spin_lock(lock, false); } -static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) +static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); } @@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) } } -static void xen_spin_unlock(struct raw_spinlock *lock) +static void xen_spin_unlock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index c8946465e63..dcf0afad4a7 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -15,18 +15,18 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ __raw_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ __raw_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a9aaa709fb9..5ef7a4c060b 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -8,7 +8,7 @@ * * on SMP builds: * - * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the + * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: @@ -75,7 +75,7 @@ #define __lockfunc __attribute__((section(".spinlock.text"))) /* - * Pull the raw_spinlock_t and raw_rwlock_t definitions: + * Pull the arch_spinlock_t and raw_rwlock_t definitions: */ #include diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f979d5d8a16..d4af2d7a86e 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -18,7 +18,7 @@ #include typedef struct { - raw_spinlock_t raw_lock; + arch_spinlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198..34d36691c4e 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -16,13 +16,13 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 1 } #else -typedef struct { } raw_spinlock_t; +typedef struct { } arch_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index d4841ed8215..8ee2ac1bf63 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -20,19 +20,19 @@ #ifdef CONFIG_DEBUG_SPINLOCK #define __raw_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return oldval > 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(arch_spinlock_t *lock) { lock->slock = 1; } diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 429540c70d3..7cc50c62af5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a1ca4956ab5..5ac8ee0a9e3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -423,7 +423,7 @@ struct ring_buffer_per_cpu { int cpu; struct ring_buffer *buffer; spinlock_t reader_lock; /* serialize readers */ - raw_spinlock_t lock; + arch_spinlock_t lock; struct lock_class_key lock_key; struct list_head *pages; struct buffer_page *head_page; /* read from head */ @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c82dfd92fdf..7d56cecc2c6 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * - * This is defined as a raw_spinlock_t in order to help + * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ -static raw_spinlock_t ftrace_max_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t ftrace_max_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { - static raw_spinlock_t ftrace_dump_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t ftrace_dump_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 878c03f386b..206ec3d4b3c 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -71,10 +71,10 @@ u64 notrace trace_clock(void) /* keep prev_time and lock in the same cacheline. */ static struct { u64 prev_time; - raw_spinlock_t lock; + arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 26185d72767..4cf7e83ec23 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -28,8 +28,8 @@ static int wakeup_current_cpu; static unsigned wakeup_prio = -1; static int wakeup_rt; -static raw_spinlock_t wakeup_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t wakeup_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8504ac71e4e..9a82d568fde 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = { }; static unsigned long max_stack_size; -static raw_spinlock_t max_stack_lock = - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t max_stack_lock = + (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 9c4b0256490..2acd501b382 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3-70-g09d2 From edc35bd72e2079b25f99c5da7d7a65dbbffc4a26 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 12:38:57 +0100 Subject: locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED Further name space cleanup. No functional change Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock_types.h | 2 +- arch/arm/include/asm/spinlock_types.h | 2 +- arch/blackfin/include/asm/spinlock_types.h | 2 +- arch/ia64/include/asm/spinlock_types.h | 2 +- arch/m32r/include/asm/spinlock_types.h | 2 +- arch/mips/include/asm/spinlock_types.h | 2 +- arch/parisc/include/asm/spinlock_types.h | 6 +++--- arch/parisc/lib/bitops.c | 2 +- arch/powerpc/include/asm/spinlock_types.h | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/s390/include/asm/spinlock_types.h | 2 +- arch/sh/include/asm/spinlock_types.h | 2 +- arch/sparc/include/asm/spinlock_types.h | 2 +- arch/x86/include/asm/spinlock_types.h | 2 +- arch/x86/kernel/dumpstack.c | 2 +- arch/x86/kernel/tsc_sync.c | 2 +- include/linux/spinlock_types.h | 4 ++-- include/linux/spinlock_types_up.h | 4 ++-- kernel/lockdep.c | 2 +- kernel/trace/ring_buffer.c | 2 +- kernel/trace/trace.c | 10 +++++----- kernel/trace/trace_clock.c | 2 +- kernel/trace/trace_sched_wakeup.c | 2 +- kernel/trace/trace_stack.c | 2 +- lib/spinlock_debug.c | 2 +- 25 files changed, 33 insertions(+), 33 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index bb94a51e53d..08975ee0a10 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 5e9d3eadd16..9622e126a8d 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index 03b377abf5c..c8a3928a58c 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -17,7 +17,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 447ccc6ca7a..6a11b65fa66 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 17d15bd6322..5873a870110 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile int lock; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index 2e1060892d3..b4c5efaadb9 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -14,7 +14,7 @@ typedef struct { unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 735caafb81f..396d2746ca5 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,10 +4,10 @@ typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __RAW_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else volatile unsigned int lock[4]; -# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif } arch_spinlock_t; @@ -16,6 +16,6 @@ typedef struct { volatile int counter; } raw_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } +#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index fdd7f583de5..353963d4205 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -13,7 +13,7 @@ #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 4312e5baaf8..f5f39d82711 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 579069c1215..57dfa414cfb 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,7 @@ #include struct rtas_t rtas = { - .lock = __RAW_SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index a93638eee3f..e25c0370f6c 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int owner_cpu; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index 37712c32ba9..a3be2db960e 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned int lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 41d9a8fec13..c145e63a5d6 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct { volatile unsigned char lock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 2ae7637ed52..696f8364a4f 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -9,7 +9,7 @@ typedef struct arch_spinlock { unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0862d9d89c9..5b75afac8a3 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static arch_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9f908b9d1ab..f1714697a09 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata arch_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index d4af2d7a86e..7dadce303eb 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -43,14 +43,14 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ .magic = SPINLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ SPIN_DEP_MAP_INIT(lockname) } #else # define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ + (spinlock_t) { .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEP_MAP_INIT(lockname) } #endif diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 34d36691c4e..10db021f487 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -18,13 +18,13 @@ typedef struct { volatile unsigned int slock; } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else typedef struct { } arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { } +#define __ARCH_SPIN_LOCK_UNLOCKED { } #endif diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7cc50c62af5..2389e3f85cf 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644); * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ -static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5ac8ee0a9e3..fb7a0fa508b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) cpu_buffer->buffer = buffer; spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); - cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d56cecc2c6..63bc1cc3821 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) * CONFIG_TRACER_MAX_TRACE. */ static arch_spinlock_t ftrace_max_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE unsigned long __read_mostly tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; static int cmdline_idx; -static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; /* temporary disable recording */ static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { static arch_spinlock_t trace_buf_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static u32 trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr, int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { - static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; + static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; static char trace_buf[TRACE_BUF_SIZE]; struct ftrace_event_call *call = &event_print; @@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s) static void __ftrace_dump(bool disable_tracing) { static arch_spinlock_t ftrace_dump_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 206ec3d4b3c..433e2eda2d0 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -74,7 +74,7 @@ static struct { arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { - .lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, + .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4cf7e83ec23..e347853564e 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -29,7 +29,7 @@ static unsigned wakeup_prio = -1; static int wakeup_rt; static arch_spinlock_t wakeup_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 9a82d568fde..728c3522148 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = { static unsigned long max_stack_size; static arch_spinlock_t max_stack_lock = - (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int stack_trace_disabled __read_mostly; static DEFINE_PER_CPU(int, trace_active); diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 2acd501b382..f7300413714 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -23,7 +23,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3-70-g09d2 From 0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Dec 2009 20:01:25 +0100 Subject: locking: Convert __raw_spin* functions to arch_spin* Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 18 ++++++------ arch/arm/include/asm/spinlock.h | 20 ++++++------- arch/blackfin/include/asm/spinlock.h | 20 ++++++------- arch/cris/include/arch-v32/arch/spinlock.h | 46 +++++++++++++++--------------- arch/ia64/include/asm/bitops.h | 2 +- arch/ia64/include/asm/spinlock.h | 26 ++++++++--------- arch/m32r/include/asm/spinlock.h | 28 +++++++++--------- arch/mips/include/asm/spinlock.h | 36 +++++++++++------------ arch/parisc/include/asm/atomic.h | 4 +-- arch/parisc/include/asm/spinlock.h | 44 ++++++++++++++-------------- arch/powerpc/include/asm/spinlock.h | 32 ++++++++++----------- arch/powerpc/kernel/rtas.c | 12 ++++---- arch/powerpc/lib/locks.c | 4 +-- arch/powerpc/platforms/pasemi/setup.c | 8 +++--- arch/s390/include/asm/spinlock.h | 34 +++++++++++----------- arch/s390/lib/spinlock.c | 22 +++++++------- arch/sh/include/asm/spinlock.h | 26 ++++++++--------- arch/sparc/include/asm/spinlock_32.h | 20 ++++++------- arch/sparc/include/asm/spinlock_64.h | 18 ++++++------ arch/x86/include/asm/paravirt.h | 14 ++++----- arch/x86/include/asm/spinlock.h | 26 ++++++++--------- arch/x86/kernel/dumpstack.c | 6 ++-- arch/x86/kernel/paravirt-spinlocks.c | 2 +- arch/x86/kernel/tsc_sync.c | 8 +++--- include/asm-generic/bitops/atomic.h | 4 +-- include/linux/spinlock.h | 22 +++++++------- include/linux/spinlock_up.h | 26 ++++++++--------- kernel/lockdep.c | 18 ++++++------ kernel/mutex-debug.h | 4 +-- kernel/spinlock.c | 4 +-- kernel/trace/ring_buffer.c | 12 ++++---- kernel/trace/trace.c | 32 ++++++++++----------- kernel/trace/trace_clock.c | 4 +-- kernel/trace/trace_sched_wakeup.c | 12 ++++---- kernel/trace/trace_selftest.c | 4 +-- kernel/trace/trace_stack.c | 12 ++++---- lib/spinlock_debug.c | 8 +++--- 37 files changed, 319 insertions(+), 319 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index bdb26a1940b..4dac79f504c 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -12,18 +12,18 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(x) \ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(arch_spinlock_t * lock) +static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(arch_spinlock_t * lock) +static inline void arch_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } @@ -169,8 +169,8 @@ static inline void __raw_write_unlock(raw_rwlock_t * lock) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ALPHA_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4e7712ee939..de62eb098f6 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -17,13 +17,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) } } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); @@ -220,8 +220,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index fc16b4c5309..62d49540e02 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -24,31 +24,31 @@ asmlinkage void __raw_write_lock_asm(volatile int *ptr); asmlinkage int __raw_write_trylock_asm(volatile int *ptr); asmlinkage void __raw_write_unlock_asm(volatile int *ptr); -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -92,9 +92,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) __raw_write_unlock_asm(&rw->lock); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index e253457765f..a2e8a394d55 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(arch_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } /* @@ -68,64 +68,64 @@ static inline int __raw_write_can_lock(raw_rwlock_t *x) static inline void __raw_read_lock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock == 0); rw->lock--; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = 0; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_read_unlock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); rw->lock++; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = RW_LOCK_BIAS; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } static inline int __raw_read_trylock(raw_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock != 0) { rw->lock--; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return ret; } static inline int __raw_write_trylock(raw_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock == RW_LOCK_BIAS) { rw->lock = 0; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return 1; } #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_ARCH_SPINLOCK_H */ diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787bc9f..6ebc229a1c5 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) * @addr: Address to start counting from * * Similarly to clear_bit_unlock, the implementation uses a store - * with release semantics. See also __raw_spin_unlock(). + * with release semantics. See also arch_spin_unlock(). */ static __inline__ void __clear_bit_unlock(int nr, void *addr) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 9fbdf7e6108..b06165f6352 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -17,7 +17,7 @@ #include #include -#define __raw_spin_lock_init(x) ((x)->lock = 0) +#define arch_spin_lock_init(x) ((x)->lock = 0) /* * Ticket locks are conceptually two parts, one indicating the current head of @@ -103,39 +103,39 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 0c0164225bc..8acac950a43 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -24,19 +24,19 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) /** - * __raw_spin_trylock - Try spin lock and return a result + * arch_spin_trylock - Try spin lock and return a result * @lock: Pointer to the lock variable * - * __raw_spin_trylock() tries to get the lock and returns a result. + * arch_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_trylock \n\t" + "# arch_spin_trylock \n\t" "ldi %1, #0; \n\t" "mvfc %2, psw; \n\t" "clrpsw #0x40 -> nop; \n\t" @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -84,7 +84,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_lock \n\t" + "# arch_spin_lock \n\t" ".fillinsn \n" "1: \n\t" "mvfc %1, psw; \n\t" @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; @@ -319,8 +319,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_M32R_SPINLOCK_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 0f16d0673b4..95edebaaf22 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,33 +34,33 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return ((counters >> 14) ^ counters) & 0x1fff; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - while (__raw_spin_is_locked(x)) { cpu_relax(); } +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + while (arch_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return (((counters >> 14) - counters) & 0x1fff) > 1; } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -94,7 +94,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) [my_ticket] "=&r" (my_ticket)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " # __raw_spin_unlock \n" + " # arch_spin_unlock \n" "1: ll %[ticket], %[ticket_ptr] \n" " addiu %[ticket], %[ticket], 1 \n" " ori %[ticket], %[ticket], 0x2000 \n" @@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) [ticket] "=&r" (tmp)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_unlock \n" + " .set push # arch_spin_unlock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) +static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(arch_spinlock_t *lock) [now_serving] "=&r" (tmp3)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -483,8 +483,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 3a4ea778d4b..716634d1f54 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -34,12 +34,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; #define _atomic_spin_lock_irqsave(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 69e8dca2674..235e7e386e2 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,17 +5,17 @@ #include #include -static inline int __raw_spin_is_locked(arch_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(arch_spinlock_t *x, +static inline void arch_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(arch_spinlock_t *x) +static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(arch_spinlock_t *x) +static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; @@ -73,9 +73,9 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } @@ -85,9 +85,9 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter--; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } @@ -98,9 +98,9 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) unsigned long flags; retry: local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); return 1; } @@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) return 0; /* Wait until we have a realistic chance at the lock */ - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) cpu_relax(); goto retry; @@ -124,10 +124,10 @@ static __inline__ void __raw_write_lock(raw_rwlock_t *rw) unsigned long flags; retry: local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); if (rw->counter != 0) { - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); while (rw->counter != 0) @@ -144,7 +144,7 @@ retry: static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) { rw->counter = 0; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } /* Note that we have to ensure interrupts are disabled in case we're @@ -155,13 +155,13 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) int result = 0; local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { if (rw->counter == 0) { rw->counter = -1; result = 1; } else { /* Read-locked. Oh well. */ - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } } local_irq_restore(flags); @@ -190,8 +190,8 @@ static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index c0d44c92ff0..cdcaf6b9708 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@ #include #include -#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(arch_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; - return arch_spin_trylock(lock) == 0; + return __arch_spin_trylock(lock) == 0; } /* @@ -104,11 +104,11 @@ extern void __rw_yield(raw_rwlock_t *lock); #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -120,13 +120,13 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -140,19 +140,19 @@ void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; - __asm__ __volatile__("# __raw_spin_unlock\n\t" + __asm__ __volatile__("# arch_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(arch_spinlock_t *lock); +extern void arch_spin_unlock_wait(arch_spinlock_t *lock); #else -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) #endif /* @@ -290,9 +290,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) __spin_yield(lock) -#define _raw_read_relax(lock) __rw_yield(lock) -#define _raw_write_relax(lock) __rw_yield(lock) +#define arch_spin_relax(lock) __spin_yield(lock) +#define arch_read_relax(lock) __rw_yield(lock) +#define arch_write_relax(lock) __rw_yield(lock) #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 57dfa414cfb..fd0d29493fd 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) local_irq_save(flags); preempt_disable(); - __raw_spin_lock_flags(&rtas.lock, flags); + arch_spin_lock_flags(&rtas.lock, flags); return flags; } static void unlock_rtas(unsigned long flags) { - __raw_spin_unlock(&rtas.lock); + arch_spin_unlock(&rtas.lock); local_irq_restore(flags); preempt_enable(); } @@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) { while (!timebase) barrier(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index b06294cde49..ee395e39211 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(arch_spinlock_t *lock) +void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); @@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(arch_spinlock_t *lock) HMT_medium(); } -EXPORT_SYMBOL(__raw_spin_unlock_wait); +EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index be36fece41d..242f8095c2d 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); mtspr(SPRN_TBCTL, TBCTL_FREEZE); isync(); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) while (!timebase) smp_rmb(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } struct smp_ops_t pas_smp_ops = { diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 6121fa4b83d..a94c146657a 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) \ - _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) \ + arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(arch_spinlock_t *); -extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(arch_spinlock_t *); -extern void _raw_spin_relax(arch_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait(lp); + arch_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait_flags(lp, flags); + arch_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(arch_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return 1; - return _raw_spin_trylock_retry(lp); + return arch_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(arch_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } @@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a607..f4596452f07 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(arch_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(arch_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(arch_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) _raw_yield_cpu(~cpu); } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax); void _raw_read_lock_wait(raw_rwlock_t *rw) { diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 5a05b3fcefb..da1c6491ed4 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -219,8 +219,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index b2d8a67f727..9b0f2f53c81 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -176,13 +176,13 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) #define __raw_write_can_lock(rw) (!(rw)->lock) diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 38e16c40efc..7cf58a2fcda 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(arch_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -222,9 +222,9 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) #define __raw_write_can_lock(rw) (!(rw)->lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5655f75f10b..dd59a85a918 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct arch_spinlock *lock) +static inline int arch_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct arch_spinlock *lock) +static inline int arch_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, +static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) +static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) +static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 204b524fcf5..ab9055fd57d 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 5b75afac8a3..0a0aa1cec8f 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { + if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - __raw_spin_lock(&die_lock); + arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); + arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index a0f39e09068..676b8c77a97 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -10,7 +10,7 @@ static inline void default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } struct pv_lock_ops pv_lock_ops = { diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index f1714697a09..0aa5fed8b9e 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); prev = last_tsc; rdtsc_barrier(); now = get_cycles(); rdtsc_barrier(); last_tsc = now; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether @@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); } } WARN(!(now-start), diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index dcf0afad4a7..ecc44a8e2b4 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -22,12 +22,12 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; #define _atomic_spin_lock_irqsave(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ arch_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 5ef7a4c060b..de3a022489c 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -14,7 +14,7 @@ * linux/spinlock_types.h: * defines the generic type and initializers * - * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) @@ -34,7 +34,7 @@ * defines the generic type and initializers * * linux/spinlock_up.h: - * contains the __raw_spin_*()/etc. version of UP + * contains the arch_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * @@ -103,17 +103,17 @@ do { \ do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) #endif -#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) +#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) #ifdef CONFIG_GENERIC_LOCKBREAK #define spin_is_contended(lock) ((lock)->break_lock) #else -#ifdef __raw_spin_is_contended -#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) +#ifdef arch_spin_is_contended +#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) #else #define spin_is_contended(lock) (((void)(lock), 0)) -#endif /*__raw_spin_is_contended*/ +#endif /*arch_spin_is_contended*/ #endif /* The lock does not imply full memory barrier. */ @@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ -#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) +#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) #ifdef CONFIG_DEBUG_SPINLOCK extern void _raw_spin_lock(spinlock_t *lock); @@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); #else -# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) +# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ - __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) -# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) + arch_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock) #endif /* diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 8ee2ac1bf63..1d3bcc3cf7c 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,21 +18,21 @@ */ #ifdef CONFIG_DEBUG_SPINLOCK -#define __raw_spin_is_locked(x) ((x)->slock == 0) +#define arch_spin_is_locked(x) ((x)->slock == 0) -static inline void __raw_spin_lock(arch_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; } static inline void -__raw_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; } -static inline int __raw_spin_trylock(arch_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { char oldval = lock->slock; @@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(arch_spinlock_t *lock) return oldval > 0; } -static inline void __raw_spin_unlock(arch_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { lock->slock = 1; } @@ -57,20 +57,20 @@ static inline void __raw_spin_unlock(arch_spinlock_t *lock) #define __raw_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ -#define __raw_spin_is_locked(lock) ((void)(lock), 0) +#define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define __raw_spin_lock(lock) do { (void)(lock); } while (0) -# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) -# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ -#define __raw_spin_is_contended(lock) (((void)(lock), 0)) +#define arch_spin_is_contended(lock) (((void)(lock), 0)) #define __raw_read_can_lock(lock) (((void)(lock), 1)) #define __raw_write_can_lock(lock) (((void)(lock), 1)) -#define __raw_spin_unlock_wait(lock) \ - do { cpu_relax(); } while (__raw_spin_is_locked(lock)) +#define arch_spin_unlock_wait(lock) \ + do { cpu_relax(); } while (arch_spin_is_locked(lock)) #endif /* __LINUX_SPINLOCK_UP_H */ diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 2389e3f85cf..5feaddcdbe4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -77,7 +77,7 @@ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED static int graph_lock(void) { - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other @@ -85,7 +85,7 @@ static int graph_lock(void) * dropped already) */ if (!debug_locks) { - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ @@ -95,11 +95,11 @@ static int graph_lock(void) static inline int graph_unlock(void) { - if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); current->lockdep_recursion--; - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return 0; } @@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); return ret; } @@ -1170,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; @@ -1197,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.class = class; local_irq_save(flags); - __raw_spin_lock(&lockdep_lock); + arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); - __raw_spin_unlock(&lockdep_lock); + arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index 6b2d735846a..7bebbd15b34 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h @@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock) \ DEBUG_LOCKS_WARN_ON(in_interrupt()); \ local_irq_save(flags); \ - __raw_spin_lock(&(lock)->raw_lock); \ + arch_spin_lock(&(lock)->raw_lock); \ DEBUG_LOCKS_WARN_ON(l->magic != l); \ } while (0) #define spin_unlock_mutex(lock, flags) \ do { \ - __raw_spin_unlock(&(lock)->raw_lock); \ + arch_spin_unlock(&(lock)->raw_lock); \ local_irq_restore(flags); \ preempt_check_resched(); \ } while (0) diff --git a/kernel/spinlock.c b/kernel/spinlock.c index e6e13631843..fbb5f8b7835 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -53,7 +53,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ @@ -73,7 +73,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - _raw_##op##_relax(&lock->raw_lock); \ + arch_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fb7a0fa508b..f58c9ad1583 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) int ret; local_irq_save(flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); again: /* @@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) goto again; out: - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return reader; @@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) synchronize_sched(); spin_lock_irqsave(&cpu_buffer->reader_lock, flags); - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_iter_reset(iter); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return iter; @@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) goto out; - __raw_spin_lock(&cpu_buffer->lock); + arch_spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); - __raw_spin_unlock(&cpu_buffer->lock); + arch_spin_unlock(&cpu_buffer->lock); out: spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 63bc1cc3821..bb6b5e7fa2a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); tr->buffer = max_tr.buffer; max_tr.buffer = buf; __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } /** @@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) return; WARN_ON_ONCE(!irqs_disabled()); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); ftrace_disable_cpu(); @@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ @@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk) * nor do we want to disable interrupts, * so if we miss here, then better luck next time. */ - if (!__raw_spin_trylock(&trace_cmdline_lock)) + if (!arch_spin_trylock(&trace_cmdline_lock)) return; idx = map_pid_to_cmdline[tsk->pid]; @@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk) memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); } void trace_find_cmdline(int pid, char comm[]) @@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[]) } preempt_disable(); - __raw_spin_lock(&trace_cmdline_lock); + arch_spin_lock(&trace_cmdline_lock); map = map_pid_to_cmdline[pid]; if (map != NO_CMDLINE_MAP) strcpy(comm, saved_cmdlines[map]); else strcpy(comm, "<...>"); - __raw_spin_unlock(&trace_cmdline_lock); + arch_spin_unlock(&trace_cmdline_lock); preempt_enable(); } @@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) /* Lockdep uses trace_printk for lock tracing */ local_irq_save(flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); if (len > TRACE_BUF_SIZE || len < 0) @@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); local_irq_restore(flags); out: @@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr, pause_graph_tracing(); raw_local_irq_save(irq_flags); - __raw_spin_lock(&trace_buf_lock); + arch_spin_lock(&trace_buf_lock); len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; @@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr, ring_buffer_unlock_commit(buffer, event); out_unlock: - __raw_spin_unlock(&trace_buf_lock); + arch_spin_unlock(&trace_buf_lock); raw_local_irq_restore(irq_flags); unpause_graph_tracing(); out: @@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, mutex_lock(&tracing_cpumask_update_lock); local_irq_disable(); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are @@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, atomic_dec(&global_trace.data[cpu]->disabled); } } - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_enable(); cpumask_copy(tracing_cpumask, tracing_cpumask_new); @@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing) /* only one dump */ local_irq_save(flags); - __raw_spin_lock(&ftrace_dump_lock); + arch_spin_lock(&ftrace_dump_lock); if (dump_ran) goto out; @@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing) } out: - __raw_spin_unlock(&ftrace_dump_lock); + arch_spin_unlock(&ftrace_dump_lock); local_irq_restore(flags); } diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 433e2eda2d0..84a3a7ba072 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void) if (unlikely(in_nmi())) goto out; - __raw_spin_lock(&trace_clock_struct.lock); + arch_spin_lock(&trace_clock_struct.lock); /* * TODO: if this happens often then maybe we should reset @@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void) trace_clock_struct.prev_time = now; - __raw_spin_unlock(&trace_clock_struct.lock); + arch_spin_unlock(&trace_clock_struct.lock); out: raw_local_irq_restore(flags); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index e347853564e..0271742abb8 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, goto out; local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* We could race with grabbing wakeup_lock */ if (unlikely(!tracer_enabled || next != wakeup_task)) @@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, out_unlock: __wakeup_reset(wakeup_trace); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); @@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr) tracing_reset_online_cpus(tr); local_irq_save(flags); - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); __wakeup_reset(tr); - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); local_irq_restore(flags); } @@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) goto out; /* interrupts should be off from try_to_wake_up */ - __raw_spin_lock(&wakeup_lock); + arch_spin_lock(&wakeup_lock); /* check for races. */ if (!tracer_enabled || p->prio >= wakeup_prio) @@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); out_locked: - __raw_spin_unlock(&wakeup_lock); + arch_spin_unlock(&wakeup_lock); out: atomic_dec(&wakeup_trace->data[cpu]->disabled); } diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index dc98309e839..280fea470d6 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) /* Don't allow flipping of max traces now */ local_irq_save(flags); - __raw_spin_lock(&ftrace_max_lock); + arch_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); @@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) break; } tracing_on(); - __raw_spin_unlock(&ftrace_max_lock); + arch_spin_unlock(&ftrace_max_lock); local_irq_restore(flags); if (count) diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 728c3522148..678a5120ee3 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -54,7 +54,7 @@ static inline void check_stack(void) return; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); /* a race could have already updated it */ if (this_size <= max_stack_size) @@ -103,7 +103,7 @@ static inline void check_stack(void) } out: - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } @@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, return ret; local_irq_save(flags); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); *ptr = val; - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); return count; @@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos) { local_irq_disable(); - __raw_spin_lock(&max_stack_lock); + arch_spin_lock(&max_stack_lock); if (*pos == 0) return SEQ_START_TOKEN; @@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos) static void t_stop(struct seq_file *m, void *p) { - __raw_spin_unlock(&max_stack_lock); + arch_spin_unlock(&max_stack_lock); local_irq_enable(); } diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index f7300413714..1304fe09454 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_spin_trylock(&lock->raw_lock)) + if (arch_spin_trylock(&lock->raw_lock)) return; __delay(1); } @@ -128,14 +128,14 @@ static void __spin_lock_debug(spinlock_t *lock) void _raw_spin_lock(spinlock_t *lock) { debug_spin_lock_before(lock); - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) __spin_lock_debug(lock); debug_spin_lock_after(lock); } int _raw_spin_trylock(spinlock_t *lock) { - int ret = __raw_spin_trylock(&lock->raw_lock); + int ret = arch_spin_trylock(&lock->raw_lock); if (ret) debug_spin_lock_after(lock); @@ -151,7 +151,7 @@ int _raw_spin_trylock(spinlock_t *lock) void _raw_spin_unlock(spinlock_t *lock) { debug_spin_unlock(lock); - __raw_spin_unlock(&lock->raw_lock); + arch_spin_unlock(&lock->raw_lock); } static void rwlock_bug(rwlock_t *lock, const char *msg) -- cgit v1.2.3-70-g09d2 From fb3a6bbc912b12347614e5742c7c61416cdb0ca0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:01:19 +0100 Subject: locking: Convert raw_rwlock to arch_rwlock Not strictly necessary for -rt as -rt does not have non sleeping rwlocks, but it's odd to not have a consistent naming convention. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 16 ++++++++-------- arch/alpha/include/asm/spinlock_types.h | 4 ++-- arch/arm/include/asm/spinlock.h | 12 ++++++------ arch/arm/include/asm/spinlock_types.h | 4 ++-- arch/blackfin/include/asm/spinlock.h | 16 ++++++++-------- arch/blackfin/include/asm/spinlock_types.h | 4 ++-- arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++++-------- arch/ia64/include/asm/spinlock.h | 16 ++++++++-------- arch/ia64/include/asm/spinlock_types.h | 4 ++-- arch/m32r/include/asm/spinlock.h | 12 ++++++------ arch/m32r/include/asm/spinlock_types.h | 4 ++-- arch/mips/include/asm/spinlock.h | 12 ++++++------ arch/mips/include/asm/spinlock_types.h | 4 ++-- arch/parisc/include/asm/spinlock.h | 16 ++++++++-------- arch/parisc/include/asm/spinlock_types.h | 4 ++-- arch/powerpc/include/asm/spinlock.h | 18 +++++++++--------- arch/powerpc/include/asm/spinlock_types.h | 4 ++-- arch/powerpc/lib/locks.c | 2 +- arch/s390/include/asm/spinlock.h | 30 +++++++++++++++--------------- arch/s390/include/asm/spinlock_types.h | 4 ++-- arch/s390/lib/spinlock.c | 12 ++++++------ arch/sh/include/asm/spinlock.h | 12 ++++++------ arch/sh/include/asm/spinlock_types.h | 4 ++-- arch/sparc/include/asm/spinlock_32.h | 20 ++++++++++---------- arch/sparc/include/asm/spinlock_64.h | 12 ++++++------ arch/sparc/include/asm/spinlock_types.h | 4 ++-- arch/x86/include/asm/spinlock.h | 16 ++++++++-------- arch/x86/include/asm/spinlock_types.h | 4 ++-- include/linux/rwlock_types.h | 6 +++--- include/linux/spinlock.h | 4 ++-- include/linux/spinlock_types_up.h | 4 ++-- lib/spinlock_debug.c | 2 +- 32 files changed, 151 insertions(+), 151 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index 4dac79f504c..e8b2970f037 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /***********************************************************/ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(raw_rwlock_t *lock) +static inline void __raw_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *lock) +static inline void __raw_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t * lock) +static inline int __raw_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(raw_rwlock_t * lock) +static inline int __raw_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(raw_rwlock_t * lock) +static inline void __raw_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,7 +160,7 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t * lock) +static inline void __raw_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 08975ee0a10..54c2afce0a1 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index de62eb098f6..a8671d8bc7d 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 9622e126a8d..d14d197ae04 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 62d49540e02..7e1c56b0a57 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -52,42 +52,42 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) cpu_relax(); } -static inline int __raw_read_can_lock(raw_rwlock_t *rw) +static inline int __raw_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *rw) +static inline int __raw_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { __raw_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { return __raw_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { __raw_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { __raw_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { return __raw_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __raw_write_unlock_asm(&rw->lock); } diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index c8a3928a58c..1a33608c958 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -21,8 +21,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index a2e8a394d55..1d7d3a8046c 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) +static inline int __raw_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *x) +static inline int __raw_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock == 0); @@ -74,7 +74,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -82,14 +82,14 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); rw->lock++; arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -97,7 +97,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index b06165f6352..6715b6a8ebc 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -146,7 +146,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -177,7 +177,7 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) #define __raw_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -190,14 +190,14 @@ do { \ #define __raw_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void __raw_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int __raw_read_trylock(arch_rwlock_t *x) { union { - raw_rwlock_t lock; + arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 6a11b65fa66..e2b42a52a6d 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -14,8 +14,8 @@ typedef struct { typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } #endif diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 8acac950a43..1c76af8c8e1 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -148,7 +148,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 5873a870110..92e27672661 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -13,11 +13,11 @@ typedef struct { typedef struct { volatile int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 95edebaaf22..7bf27c8a336 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -256,7 +256,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned int tmp; @@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -335,7 +335,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned int tmp; @@ -377,7 +377,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -389,7 +389,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; @@ -433,7 +433,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index b4c5efaadb9..ee197c2f9c9 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -18,8 +18,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 235e7e386e2..1ff3a0a94a4 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(raw_rwlock_t *rw) +static __inline__ void __raw_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) +static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) +static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(raw_rwlock_t *rw) +static __inline__ void __raw_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -141,7 +141,7 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; arch_spin_unlock(&rw->lock); @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) +static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) +static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,7 +182,7 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) +static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 396d2746ca5..8c373aa28a8 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -14,8 +14,8 @@ typedef struct { typedef struct { arch_spinlock_t lock; volatile int counter; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index cdcaf6b9708..2fad2c07c59 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -97,7 +97,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) extern void __spin_yield(arch_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() @@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(raw_rwlock_t *rw) +static inline long arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(raw_rwlock_t *rw) +static inline long arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,7 +225,7 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { while (1) { if (likely(arch_read_trylock(rw) > 0)) @@ -239,7 +239,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { while (1) { if (likely(arch_write_trylock(rw) == 0)) @@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) } } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { return arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { return arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,7 +280,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index f5f39d82711..2351adc4fdc 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile signed int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index ee395e39211..58e14fba11b 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -55,7 +55,7 @@ void __spin_yield(arch_spinlock_t *lock) * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(raw_rwlock_t *rw) +void __rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index a94c146657a..7f98f0e48ac 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -121,14 +121,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) */ #define __raw_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(raw_rwlock_t *lp); -extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_read_trylock_retry(raw_rwlock_t *lp); -extern void _raw_write_lock_wait(raw_rwlock_t *lp); -extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_write_trylock_retry(raw_rwlock_t *lp); - -static inline void __raw_read_lock(raw_rwlock_t *rw) +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_read_trylock_retry(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_write_trylock_retry(arch_rwlock_t *lp); + +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index e25c0370f6c..9c76656a0af 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f4596452f07..09fee9a1aa1 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -105,7 +105,7 @@ void arch_spin_relax(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_relax); -void _raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; int count = spin_retry; @@ -145,7 +145,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_read_lock_wait_flags); -int _raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void _raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(arch_rwlock_t *rw) { int count = spin_retry; @@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { int count = spin_retry; @@ -197,7 +197,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_write_lock_wait_flags); -int _raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(arch_rwlock_t *rw) { int count = spin_retry; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index da1c6491ed4..7f3626aac86 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -108,7 +108,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) */ #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; @@ -142,7 +142,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -160,7 +160,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( "mov.l %1, @%0 ! __raw_write_unlock \n\t" @@ -170,7 +170,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int __raw_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; @@ -193,7 +193,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index a3be2db960e..9b7560db06c 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -13,9 +13,9 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 9b0f2f53c81..06d37e588fd 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -65,7 +65,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -96,9 +96,9 @@ do { unsigned long flags; \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -116,9 +116,9 @@ do { unsigned long flags; \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int __raw_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 7cf58a2fcda..2b22d7f2c2f 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -92,7 +92,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index c145e63a5d6..9c454fdeaad 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct { typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index ab9055fd57d..99cb86e843a 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int __raw_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int __raw_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void __raw_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void __raw_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int __raw_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int __raw_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,12 +284,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void __raw_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void __raw_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 696f8364a4f..dcb48b2edc1 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -13,8 +13,8 @@ typedef struct arch_spinlock { typedef struct { unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_X86_SPINLOCK_TYPES_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index f8c935206a4..bd31808c7d8 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -9,7 +9,7 @@ * Released under the General Public License (GPL). */ typedef struct { - raw_rwlock_t raw_lock; + arch_rwlock_t raw_lock; #ifdef CONFIG_GENERIC_LOCKBREAK unsigned int break_lock; #endif @@ -32,14 +32,14 @@ typedef struct { #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ .magic = RWLOCK_MAGIC, \ .owner = SPINLOCK_OWNER_INIT, \ .owner_cpu = -1, \ RW_DEP_MAP_INIT(lockname) } #else #define __RW_LOCK_UNLOCKED(lockname) \ - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ RW_DEP_MAP_INIT(lockname) } #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index de3a022489c..53bc2213b41 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -8,7 +8,7 @@ * * on SMP builds: * - * asm/spinlock_types.h: contains the arch_spinlock_t/raw_rwlock_t and the + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * * linux/spinlock_types.h: @@ -75,7 +75,7 @@ #define __lockfunc __attribute__((section(".spinlock.text"))) /* - * Pull the arch_spinlock_t and raw_rwlock_t definitions: + * Pull the arch_spinlock_t and arch_rwlock_t definitions: */ #include diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 10db021f487..c09b6407ae1 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -30,8 +30,8 @@ typedef struct { } arch_spinlock_t; typedef struct { /* no debug version on UP */ -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { } +#define __ARCH_RW_LOCK_UNLOCKED { } #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 1304fe09454..3f72f10d9cb 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map(&lock->dep_map, name, key, 0); #endif - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; + lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; lock->magic = RWLOCK_MAGIC; lock->owner = SPINLOCK_OWNER_INIT; lock->owner_cpu = -1; -- cgit v1.2.3-70-g09d2 From e5931943d02bf751b1ec849c0d2ade23d76a8d41 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 3 Dec 2009 20:08:46 +0100 Subject: locking: Convert raw_rwlock functions to arch_rwlock Name space cleanup for rwlock functions. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Acked-by: David S. Miller Acked-by: Ingo Molnar Cc: linux-arch@vger.kernel.org --- arch/alpha/include/asm/spinlock.h | 20 +++++++------- arch/arm/include/asm/spinlock.h | 20 +++++++------- arch/blackfin/include/asm/spinlock.h | 40 ++++++++++++++-------------- arch/cris/include/arch-v32/arch/spinlock.h | 16 ++++++------ arch/ia64/include/asm/spinlock.h | 32 +++++++++++------------ arch/m32r/include/asm/spinlock.h | 20 +++++++------- arch/mips/include/asm/spinlock.h | 42 +++++++++++++++--------------- arch/parisc/include/asm/spinlock.h | 20 +++++++------- arch/powerpc/include/asm/spinlock.h | 32 +++++++++++------------ arch/s390/include/asm/spinlock.h | 20 +++++++------- arch/s390/lib/spinlock.c | 12 ++++----- arch/sh/include/asm/spinlock.h | 32 +++++++++++------------ arch/sparc/include/asm/spinlock_32.h | 32 +++++++++++------------ arch/sparc/include/asm/spinlock_64.h | 22 ++++++++-------- arch/x86/include/asm/spinlock.h | 20 +++++++------- include/linux/rwlock.h | 20 +++++++------- include/linux/spinlock_up.h | 16 ++++++------ lib/spinlock_debug.c | 16 ++++++------ 18 files changed, 216 insertions(+), 216 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e8b2970f037..d0faca1e992 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) /***********************************************************/ -static inline int __raw_read_can_lock(arch_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(arch_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(arch_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t * lock) +static inline int arch_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(arch_rwlock_t * lock) +static inline int arch_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(arch_rwlock_t * lock) +static inline void arch_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t * lock) +static inline void arch_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index a8671d8bc7d..c91c64cab92 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) } } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index 7e1c56b0a57..1942ccfedbe 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); asmlinkage void __raw_spin_lock_asm(volatile int *ptr); asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); -asmlinkage void __raw_read_lock_asm(volatile int *ptr); -asmlinkage int __raw_read_trylock_asm(volatile int *ptr); -asmlinkage void __raw_read_unlock_asm(volatile int *ptr); -asmlinkage void __raw_write_lock_asm(volatile int *ptr); -asmlinkage int __raw_write_trylock_asm(volatile int *ptr); -asmlinkage void __raw_write_unlock_asm(volatile int *ptr); +asmlinkage void arch_read_lock_asm(volatile int *ptr); +asmlinkage int arch_read_trylock_asm(volatile int *ptr); +asmlinkage void arch_read_unlock_asm(volatile int *ptr); +asmlinkage void arch_write_lock_asm(volatile int *ptr); +asmlinkage int arch_write_trylock_asm(volatile int *ptr); +asmlinkage void arch_write_unlock_asm(volatile int *ptr); static inline int arch_spin_is_locked(arch_spinlock_t *lock) { @@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) cpu_relax(); } -static inline int __raw_read_can_lock(arch_rwlock_t *rw) +static inline int arch_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *rw) +static inline int arch_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_read_lock_asm(&rw->lock); + arch_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return __raw_read_trylock_asm(&rw->lock); + return arch_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_read_unlock_asm(&rw->lock); + arch_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_write_lock_asm(&rw->lock); + arch_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return __raw_write_trylock_asm(&rw->lock); + return arch_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_write_unlock_asm(&rw->lock); + arch_write_unlock_asm(&rw->lock); } #define arch_spin_relax(lock) cpu_relax() diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 1d7d3a8046c..f171a6600fb 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -56,17 +56,17 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(arch_rwlock_t *x) +static inline int arch_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(arch_rwlock_t *x) +static inline int arch_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock == 0); @@ -74,7 +74,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -82,14 +82,14 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); rw->lock++; arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); @@ -97,7 +97,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; arch_spin_lock(&rw->slock); diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 6715b6a8ebc..1a91c9121d1 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -140,13 +140,13 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) __ticket_spin_unlock_wait(lock); } -#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -169,13 +169,13 @@ __raw_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) +#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) -#define __raw_read_lock(rw) \ +#define arch_read_lock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ \ @@ -188,7 +188,7 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define __raw_read_unlock(rw) \ +#define arch_read_unlock(rw) \ do { \ arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -197,7 +197,7 @@ do { \ #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) +arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -221,9 +221,9 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) +#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ register long result; \ \ @@ -235,7 +235,7 @@ __raw_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) +#define arch_write_lock_flags(l, flags) arch_write_lock(l) -#define __raw_write_lock(l) \ +#define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) } while (ia64_val); \ }) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(arch_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,7 +273,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(arch_rwlock_t *x) +static inline int arch_read_trylock(arch_rwlock_t *x) { union { arch_rwlock_t lock; diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index 1c76af8c8e1..179a06489b1 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -140,15 +140,15 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define arch_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) ); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw) ); } -static inline int __raw_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -316,8 +316,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) return 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 7bf27c8a336..21ef9efbde4 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -248,21 +248,21 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) /* * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -275,7 +275,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 2f \n" " addu %1, 1 \n" @@ -301,7 +301,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -309,7 +309,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( - "1: ll %1, %2 # __raw_read_unlock \n" + "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" @@ -318,7 +318,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_unlock \n" + " .set noreorder # arch_read_unlock \n" "1: ll %1, %2 \n" " sub %1, 1 \n" " sc %1, %0 \n" @@ -335,13 +335,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) } } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -354,7 +354,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 2f \n" " lui %1, 0x8000 \n" @@ -377,26 +377,26 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( - " # __raw_write_unlock \n" + " # arch_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -413,7 +413,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -433,14 +433,14 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -457,7 +457,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -480,8 +480,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return ret; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 1ff3a0a94a4..74036f436a3 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -69,7 +69,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(arch_rwlock_t *rw) +static __inline__ void arch_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -81,7 +81,7 @@ static __inline__ void __raw_read_lock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) +static __inline__ void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); @@ -93,7 +93,7 @@ static __inline__ void __raw_read_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) +static __inline__ int arch_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -119,7 +119,7 @@ static __inline__ int __raw_read_trylock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(arch_rwlock_t *rw) +static __inline__ void arch_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: @@ -141,7 +141,7 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) +static __inline__ void arch_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; arch_spin_unlock(&rw->lock); @@ -149,7 +149,7 @@ static __inline__ void __raw_write_unlock(arch_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) +static __inline__ int arch_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(arch_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,13 +182,13 @@ static __inline__ int __raw_read_can_lock(arch_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(arch_rwlock_t *rw) +static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 2fad2c07c59..764094cff68 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -166,8 +166,8 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * read-locks. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_write_can_lock(rw) (!(rw)->lock) #ifdef CONFIG_PPC64 #define __DO_SIGN_EXTEND "extsw %0,%0\n" @@ -181,7 +181,7 @@ extern void arch_spin_unlock_wait(arch_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(arch_rwlock_t *rw) +static inline long __arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(arch_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(arch_rwlock_t *rw) +static inline long __arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,10 +225,10 @@ static inline long arch_write_trylock(arch_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_read_trylock(rw) > 0)) + if (likely(__arch_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -239,10 +239,10 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) } } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_write_trylock(rw) == 0)) + if (likely(__arch_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -253,17 +253,17 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) } } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return arch_read_trylock(rw) > 0; + return __arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return arch_write_trylock(rw) == 0; + return __arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,15 +280,15 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); rw->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) __spin_yield(lock) #define arch_read_relax(lock) __rw_yield(lock) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 7f98f0e48ac..a587907d77f 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -113,13 +113,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) +#define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) extern void _raw_read_lock_wait(arch_rwlock_t *lp); extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); @@ -128,7 +128,7 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) +static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,7 +181,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 09fee9a1aa1..10754a37566 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -115,7 +115,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); @@ -151,7 +151,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -170,7 +170,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return; @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; local_irq_disable(); if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) @@ -202,7 +202,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) int count = spin_retry; while (count-- > 0) { - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return 1; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index 7f3626aac86..bdc0f3b6c56 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -100,21 +100,21 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ); } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw) ); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(arch_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,8 +216,8 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 06d37e588fd..7f9b9dba38a 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -76,7 +76,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(arch_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -89,14 +89,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(arch_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -109,14 +109,14 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); lp = rw; @@ -130,7 +130,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(arch_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,7 +150,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(arch_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); @@ -165,27 +165,27 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 2b22d7f2c2f..073936a8b27 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -210,17 +210,17 @@ static int inline arch_write_trylock(arch_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 99cb86e843a..3089f70c0c5 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -232,7 +232,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(arch_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(arch_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(arch_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(arch_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(arch_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(arch_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(arch_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,19 +284,19 @@ static inline int __raw_write_trylock(arch_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(arch_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(arch_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index 73785b0bd6b..5725b034def 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -38,20 +38,20 @@ do { \ extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else -# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) +# define _raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) # define _raw_read_lock_flags(lock, flags) \ - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) + arch_read_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) # define _raw_write_lock_flags(lock, flags) \ - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) -# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) + arch_write_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) #endif -#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) -#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) +#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) +#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) /* * Define the various rw_lock methods. Note we define these diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 1d3bcc3cf7c..b14f6a91e19 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -49,12 +49,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) /* * Read-write spinlocks. No debug version. */ -#define __raw_read_lock(lock) do { (void)(lock); } while (0) -#define __raw_write_lock(lock) do { (void)(lock); } while (0) -#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) -#define __raw_read_unlock(lock) do { (void)(lock); } while (0) -#define __raw_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { (void)(lock); } while (0) +#define arch_write_lock(lock) do { (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ (void)(lock); 1; }) +#define arch_read_unlock(lock) do { (void)(lock); } while (0) +#define arch_write_unlock(lock) do { (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) @@ -67,8 +67,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) #define arch_spin_is_contended(lock) (((void)(lock), 0)) -#define __raw_read_can_lock(lock) (((void)(lock), 1)) -#define __raw_write_can_lock(lock) (((void)(lock), 1)) +#define arch_read_can_lock(lock) (((void)(lock), 1)) +#define arch_write_can_lock(lock) (((void)(lock), 1)) #define arch_spin_unlock_wait(lock) \ do { cpu_relax(); } while (arch_spin_is_locked(lock)) diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3f72f10d9cb..0cea0bf6114 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_read_trylock(&lock->raw_lock)) + if (arch_read_trylock(&lock->raw_lock)) return; __delay(1); } @@ -196,12 +196,12 @@ static void __read_lock_debug(rwlock_t *lock) void _raw_read_lock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_lock(&lock->raw_lock); + arch_read_lock(&lock->raw_lock); } int _raw_read_trylock(rwlock_t *lock) { - int ret = __raw_read_trylock(&lock->raw_lock); + int ret = arch_read_trylock(&lock->raw_lock); #ifndef CONFIG_SMP /* @@ -215,7 +215,7 @@ int _raw_read_trylock(rwlock_t *lock) void _raw_read_unlock(rwlock_t *lock) { RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); - __raw_read_unlock(&lock->raw_lock); + arch_read_unlock(&lock->raw_lock); } static inline void debug_write_lock_before(rwlock_t *lock) @@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock) for (;;) { for (i = 0; i < loops; i++) { - if (__raw_write_trylock(&lock->raw_lock)) + if (arch_write_trylock(&lock->raw_lock)) return; __delay(1); } @@ -271,13 +271,13 @@ static void __write_lock_debug(rwlock_t *lock) void _raw_write_lock(rwlock_t *lock) { debug_write_lock_before(lock); - __raw_write_lock(&lock->raw_lock); + arch_write_lock(&lock->raw_lock); debug_write_lock_after(lock); } int _raw_write_trylock(rwlock_t *lock) { - int ret = __raw_write_trylock(&lock->raw_lock); + int ret = arch_write_trylock(&lock->raw_lock); if (ret) debug_write_lock_after(lock); @@ -293,5 +293,5 @@ int _raw_write_trylock(rwlock_t *lock) void _raw_write_unlock(rwlock_t *lock) { debug_write_unlock(lock); - __raw_write_unlock(&lock->raw_lock); + arch_write_unlock(&lock->raw_lock); } -- cgit v1.2.3-70-g09d2 From 09b366b78c9602f53344c269eac608fd6e24d670 Mon Sep 17 00:00:00 2001 From: Kenji Kaneshige Date: Wed, 9 Dec 2009 10:56:41 +0900 Subject: [IA64] preallocate IA64_IRQ_MOVE_VECTOR Previously, we tried to use IA64_DEF_FIRST_DEVICE_VECTOR (0x30) as the IA64_IRQ_MOVE_VECTOR. However, we allocate other IRQs from the device vector range, so there's no guarantee that IA64_DEF_FIRST_DEVICE_VECTOR will still be available when we register IA64_IRQ_MOVE_VECTOR. This patch statically allocates 0x30 for IA64_IRQ_MOVE_VECTOR and removes it from the device vector range. Without this patch, we crash on machines like the HP rx3600 that use vector 48 (0x30) as the ACPI SCI interrupt: kernel BUG at arch/ia64/kernel/irq_ia64.c:647! swapper[0]: bugcheck! 0 [1] Modules linked in: Pid: 0, CPU 0, comm: swapper psr : 00001010084a2018 ifs : 800000000000030e ip : [] Not tainted (2.6.32-rc8-00184-gd5d4ec8) ip is at ia64_native_register_percpu_irq+0x110/0x1e0 Signed-off-by: Bjorn Helgaas Signed-off-by: Kenji Kaneshige Tested-by: Kenji Kaneshige Tested-by: Bjorn Helgaas Signed-off-by: Tony Luck --- arch/ia64/include/asm/hw_irq.h | 6 ++++++ arch/ia64/kernel/irq_ia64.c | 6 +----- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h index 91619b31dbf..bf2e37493e0 100644 --- a/arch/ia64/include/asm/hw_irq.h +++ b/arch/ia64/include/asm/hw_irq.h @@ -59,7 +59,13 @@ typedef u16 ia64_vector; extern int ia64_first_device_vector; extern int ia64_last_device_vector; +#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG)) +/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */ +#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */ +#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31 +#else #define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 +#endif #define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 #define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector #define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b54f1a..181a9344e67 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu) } #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) -#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR static enum vector_domain_type { VECTOR_DOMAIN_NONE, @@ -659,11 +658,8 @@ init_IRQ (void) register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); #ifdef CONFIG_SMP #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) - if (vector_domain_type != VECTOR_DOMAIN_NONE) { - BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); - IA64_FIRST_DEVICE_VECTOR++; + if (vector_domain_type != VECTOR_DOMAIN_NONE) register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); - } #endif #endif #ifdef CONFIG_PERFMON -- cgit v1.2.3-70-g09d2 From 9ee27c76393394c7fb1ddeca3f1622d4537185a0 Mon Sep 17 00:00:00 2001 From: Takao Indoh Date: Thu, 19 Nov 2009 16:39:22 -0500 Subject: [IA64] Save I-resources to ia64_sal_os_state This is a patch related to this discussion. http://www.spinics.net/lists/linux-ia64/msg07605.html When INIT is sent, ip/psr/pfs register is stored to the I-resources (iip/ipsr/ifs registers), and they are copied in the min-state save area(pmsa_{iip,ipsr,ifs}). Therefore, in creating pt_regs at ia64_mca_modify_original_stack(), cr_{iip,ipsr,ifs} should be derived from pmsa_{iip,ipsr,ifs}. But current code copies pmsa_{xip,xpsr,xfs} to cr_{iip,ipsr,ifs} when PSR.ic is 0. finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, unsigned long *nat) { (snip) if (ia64_psr(regs)->ic) { regs->cr_iip = ms->pmsa_iip; regs->cr_ipsr = ms->pmsa_ipsr; regs->cr_ifs = ms->pmsa_ifs; } else { regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; } It's ok when PSR.ic is not 0. But when PSR.ic is 0, this could be a problem when we investigate kernel as the value of regs->cr_iip does not point to where INIT really interrupted. At first I tried to change finish_pt_regs() so that it uses always pmsa_{iip,ipsr,ifs} for cr_{iip,ipsr,ifs}, but Keith Owens pointed out it could cause another problem if I change it. >The only problem I can think of is an MCA/INIT >arriving while code like SAVE_MIN or SAVE_REST is executing. Back >tracing at that point using pmsa_iip is going to be a problem, you have >no idea what state the registers or stack are in. I confirmed he was right, so I decided to keep it as-is and to save pmsa_{iip,ipsr,ifs} to ia64_sal_os_state for debugging. An attached patch is just adding new members into ia64_sal_os_state to save pmsa_{iip,ipsr,ifs}. Signed-off-by: Takao Indoh Signed-off-by: Tony Luck --- arch/ia64/include/asm/mca.h | 5 +++++ arch/ia64/kernel/mca.c | 11 ++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index c171cdf0a78..43f96ab18fa 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -106,6 +106,11 @@ struct ia64_sal_os_state { unsigned long os_status; /* OS status to SAL, enum below */ unsigned long context; /* 0 if return to same context 1 if return to new context */ + + /* I-resources */ + unsigned long iip; + unsigned long ipsr; + unsigned long ifs; }; enum { diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 496ac7a9948..32f2639e9b0 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -888,9 +888,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current) } static void -finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, +finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos, unsigned long *nat) { + const pal_min_state_area_t *ms = sos->pal_min_state; const u64 *bank; /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use @@ -904,6 +905,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, regs->cr_iip = ms->pmsa_xip; regs->cr_ipsr = ms->pmsa_xpsr; regs->cr_ifs = ms->pmsa_xfs; + + sos->iip = ms->pmsa_iip; + sos->ipsr = ms->pmsa_ipsr; + sos->ifs = ms->pmsa_ifs; } regs->pr = ms->pmsa_pr; regs->b0 = ms->pmsa_br0; @@ -1079,7 +1084,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, memcpy(old_regs, regs, sizeof(*regs)); old_regs->loadrs = loadrs; old_unat = old_regs->ar_unat; - finish_pt_regs(old_regs, ms, &old_unat); + finish_pt_regs(old_regs, sos, &old_unat); /* Next stack a struct switch_stack. mca_asm.S built a partial * switch_stack, copy it and fill in the blanks using pt_regs and @@ -1150,7 +1155,7 @@ no_mod: mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", smp_processor_id(), type, msg); old_unat = regs->ar_unat; - finish_pt_regs(regs, ms, &old_unat); + finish_pt_regs(regs, sos, &old_unat); return previous_current; } -- cgit v1.2.3-70-g09d2 From 21fc3fded7095f6018e770412b6881ced4c09d6f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 6 Nov 2009 22:42:01 +0000 Subject: [IA64] Replace old style lock initializer SPIN_LOCK_UNLOCKED is deprecated. Use __SPIN_LOCK_UNLOCKED instead. Signed-off-by: Thomas Gleixner Cc: linux-ia64@vger.kernel.org Signed-off-by: Tony Luck --- arch/ia64/include/asm/rwsem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index fbee74b1578..e8762688e8e 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -47,7 +47,7 @@ struct rw_semaphore { #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ LIST_HEAD_INIT((name).wait_list) } #define DECLARE_RWSEM(name) \ -- cgit v1.2.3-70-g09d2 From 430677133fd5a2033222b3b5016bb461fe8fabf2 Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Mon, 14 Dec 2009 15:00:37 -0500 Subject: [IA64] implement early_io{re,un}map for ia64 drivers/pci/dmar.c uses these functions, so provide them for ia64 Signed-off-by: Tony Luck --- arch/ia64/include/asm/io.h | 2 ++ arch/ia64/mm/ioremap.c | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d9d16e2d94..cc8335eb311 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr) extern void __iomem * ioremap(unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap (volatile void __iomem *addr); +extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); +extern void early_iounmap (volatile void __iomem *addr, unsigned long size); /* * String version of IO memory access ops: diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 2a140627dfd..3dccdd8eb27 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -21,6 +21,12 @@ __ioremap (unsigned long phys_addr) return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } +void __iomem * +early_ioremap (unsigned long phys_addr, unsigned long size) +{ + return __ioremap(phys_addr); +} + void __iomem * ioremap (unsigned long phys_addr, unsigned long size) { @@ -101,6 +107,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_nocache); +void +early_iounmap (volatile void __iomem *addr, unsigned long size) +{ +} + void iounmap (volatile void __iomem *addr) { -- cgit v1.2.3-70-g09d2 From 4e25b2576efda24c02e2d6b9bcb5965a3f865f33 Mon Sep 17 00:00:00 2001 From: Lee Schermerhorn Date: Mon, 14 Dec 2009 17:58:23 -0800 Subject: hugetlb: add generic definition of NUMA_NO_NODE Move definition of NUMA_NO_NODE from ia64 and x86_64 arch specific headers to generic header 'linux/numa.h' for use in generic code. NUMA_NO_NODE replaces bare '-1' where it's used in this series to indicate "no node id specified". Ultimately, it can be used to replace the -1 elsewhere where it is used similarly. Signed-off-by: Lee Schermerhorn Acked-by: David Rientjes Acked-by: Mel Gorman Reviewed-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Cc: Randy Dunlap Cc: Nishanth Aravamudan Cc: Adam Litke Cc: Andy Whitcroft Cc: Eric Whitney Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/numa.h | 2 -- arch/x86/include/asm/topology.h | 9 +++++++-- include/linux/numa.h | 2 ++ 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index 3499ff57bf4..6a8a27cfae3 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h @@ -22,8 +22,6 @@ #include -#define NUMA_NO_NODE -1 - extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; extern pg_data_t *pgdat_list[MAX_NUMNODES]; diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 40e37b10c6c..c5087d79658 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -35,11 +35,16 @@ # endif #endif -/* Node not present */ -#define NUMA_NO_NODE (-1) +/* + * to preserve the visibility of NUMA_NO_NODE definition, + * moved to there from here. May be used independent of + * CONFIG_NUMA. + */ +#include #ifdef CONFIG_NUMA #include + #include #ifdef CONFIG_X86_32 diff --git a/include/linux/numa.h b/include/linux/numa.h index a31a7301b15..3aaa31603a8 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -10,4 +10,6 @@ #define MAX_NUMNODES (1 << NODES_SHIFT) +#define NUMA_NO_NODE (-1) + #endif /* _LINUX_NUMA_H */ -- cgit v1.2.3-70-g09d2 From cd7bcf32d42b15891620b3f1387a00178b54291a Mon Sep 17 00:00:00 2001 From: "Luck, Tony" Date: Mon, 14 Dec 2009 20:00:36 +0000 Subject: implement early_io{re,un}map for ia64 Needed for commit 2c992208 ("intel-iommu: Detect DMAR in hyperspace at probe time.) to build on IA64. Signed-off-by: Tony Luck Signed-off-by: David Woodhouse --- arch/ia64/include/asm/io.h | 2 ++ arch/ia64/mm/ioremap.c | 11 +++++++++++ 2 files changed, 13 insertions(+) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d9d16e2d94..cc8335eb311 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr) extern void __iomem * ioremap(unsigned long offset, unsigned long size); extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap (volatile void __iomem *addr); +extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); +extern void early_iounmap (volatile void __iomem *addr, unsigned long size); /* * String version of IO memory access ops: diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c index 2a140627dfd..3dccdd8eb27 100644 --- a/arch/ia64/mm/ioremap.c +++ b/arch/ia64/mm/ioremap.c @@ -21,6 +21,12 @@ __ioremap (unsigned long phys_addr) return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr); } +void __iomem * +early_ioremap (unsigned long phys_addr, unsigned long size) +{ + return __ioremap(phys_addr); +} + void __iomem * ioremap (unsigned long phys_addr, unsigned long size) { @@ -101,6 +107,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size) } EXPORT_SYMBOL(ioremap_nocache); +void +early_iounmap (volatile void __iomem *addr, unsigned long size) +{ +} + void iounmap (volatile void __iomem *addr) { -- cgit v1.2.3-70-g09d2 From 698ba7b5a3a7be772922340fade365c675b8243f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 15 Dec 2009 16:47:37 -0800 Subject: elf: kill USE_ELF_CORE_DUMP Currently all architectures but microblaze unconditionally define USE_ELF_CORE_DUMP. The microblaze omission seems like an error to me, so let's kill this ifdef and make sure we are the same everywhere. Signed-off-by: Christoph Hellwig Acked-by: Hugh Dickins Cc: Cc: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/elf.h | 1 - arch/arm/include/asm/elf.h | 1 - arch/avr32/include/asm/elf.h | 1 - arch/blackfin/include/asm/elf.h | 1 - arch/cris/include/asm/elf.h | 2 -- arch/frv/include/asm/elf.h | 1 - arch/h8300/include/asm/elf.h | 1 - arch/ia64/ia32/elfcore32.h | 2 -- arch/ia64/include/asm/elf.h | 1 - arch/m32r/include/asm/elf.h | 1 - arch/m68k/include/asm/elf.h | 1 - arch/microblaze/include/asm/elf.h | 1 - arch/mips/include/asm/elf.h | 1 - arch/mn10300/include/asm/elf.h | 1 - arch/parisc/include/asm/elf.h | 1 - arch/powerpc/include/asm/elf.h | 1 - arch/s390/include/asm/elf.h | 1 - arch/score/include/asm/elf.h | 1 - arch/sh/include/asm/elf.h | 1 - arch/sparc/include/asm/elf_32.h | 2 -- arch/sparc/include/asm/elf_64.h | 1 - arch/um/sys-i386/asm/elf.h | 1 - arch/um/sys-ppc/asm/elf.h | 2 -- arch/um/sys-x86_64/asm/elf.h | 1 - arch/x86/include/asm/elf.h | 1 - arch/xtensa/include/asm/elf.h | 1 - fs/binfmt_elf.c | 11 +++-------- fs/binfmt_elf_fdpic.c | 8 ++++---- fs/proc/base.c | 4 ++-- 29 files changed, 9 insertions(+), 44 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 5c75c1b2352..9baae8afe8a 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -81,7 +81,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_ALPHA -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 8192 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 6aac3f5bb2f..a399bb5730f 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -101,7 +101,6 @@ extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define ELF_CORE_COPY_TASK_REGS dump_task_regs -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h index d5d1d41c600..3b3159b710d 100644 --- a/arch/avr32/include/asm/elf.h +++ b/arch/avr32/include/asm/elf.h @@ -77,7 +77,6 @@ typedef struct user_fpu_struct elf_fpregset_t; #endif #define ELF_ARCH EM_AVR32 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/blackfin/include/asm/elf.h b/arch/blackfin/include/asm/elf.h index 8e0764c81ea..5b50f0ecacf 100644 --- a/arch/blackfin/include/asm/elf.h +++ b/arch/blackfin/include/asm/elf.h @@ -55,7 +55,6 @@ do { \ _regs->p2 = _dynamic_addr; \ } while(0) -#define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_BFIN_FDPIC #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h index 0f51b10b9f4..8a3d8e2b33c 100644 --- a/arch/cris/include/asm/elf.h +++ b/arch/cris/include/asm/elf.h @@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t; #define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004 /* End of excerpt from {binutils}/include/elf/cris.h. */ -#define USE_ELF_CORE_DUMP - #define ELF_EXEC_PAGESIZE 8192 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/frv/include/asm/elf.h b/arch/frv/include/asm/elf.h index 7bbf6e47f8c..c3819804a74 100644 --- a/arch/frv/include/asm/elf.h +++ b/arch/frv/include/asm/elf.h @@ -115,7 +115,6 @@ do { \ __kernel_frame0_ptr->gr29 = 0; \ } while(0) -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET #define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC #define ELF_EXEC_PAGESIZE 16384 diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h index 94e2284c881..c24fa250d65 100644 --- a/arch/h8300/include/asm/elf.h +++ b/arch/h8300/include/asm/elf.h @@ -34,7 +34,6 @@ typedef unsigned long elf_fpregset_t; #define ELF_PLAT_INIT(_r) _r->er1 = 0 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h index 9a3abf58cea..65772574261 100644 --- a/arch/ia64/ia32/elfcore32.h +++ b/arch/ia64/ia32/elfcore32.h @@ -11,8 +11,6 @@ #include #include -#define USE_ELF_CORE_DUMP 1 - /* Override elfcore.h */ #define _LINUX_ELFCORE_H 1 typedef unsigned int elf_greg_t; diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h index 86eddee029c..e14108b19c0 100644 --- a/arch/ia64/include/asm/elf.h +++ b/arch/ia64/include/asm/elf.h @@ -25,7 +25,6 @@ #define ELF_DATA ELFDATA2LSB #define ELF_ARCH EM_IA_64 -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET /* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are diff --git a/arch/m32r/include/asm/elf.h b/arch/m32r/include/asm/elf.h index 0cc34c94bf2..2f85412ef73 100644 --- a/arch/m32r/include/asm/elf.h +++ b/arch/m32r/include/asm/elf.h @@ -102,7 +102,6 @@ typedef elf_fpreg_t elf_fpregset_t; */ #define ELF_PLAT_INIT(_r, load_addr) (_r)->r0 = 0 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* diff --git a/arch/m68k/include/asm/elf.h b/arch/m68k/include/asm/elf.h index 0b0f49eb876..01c193d9141 100644 --- a/arch/m68k/include/asm/elf.h +++ b/arch/m68k/include/asm/elf.h @@ -59,7 +59,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t; is actually used on ASV. */ #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0 -#define USE_ELF_CORE_DUMP #ifndef CONFIG_SUN3 #define ELF_EXEC_PAGESIZE 4096 #else diff --git a/arch/microblaze/include/asm/elf.h b/arch/microblaze/include/asm/elf.h index f92fc0dda00..7d4acf2b278 100644 --- a/arch/microblaze/include/asm/elf.h +++ b/arch/microblaze/include/asm/elf.h @@ -77,7 +77,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; #define ELF_DATA ELFDATA2MSB #endif -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index 7990694cda2..7a6a35dbe52 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -326,7 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \ dump_task_fpu(tsk, elf_fpregs) -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* This yields a mask that user programs can use to figure out what diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h index 75a70aa9fd6..e5fa97cd9a1 100644 --- a/arch/mn10300/include/asm/elf.h +++ b/arch/mn10300/include/asm/elf.h @@ -77,7 +77,6 @@ do { \ _ur->a1 = 0; _ur->a0 = 0; _ur->d1 = 0; _ur->d0 = 0; \ } while (0) -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 9c802eb4be8..19f6cb1a4a1 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */ such function. */ #define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 014a624f4c8..17828ad411e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -170,7 +170,6 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG]; #define elf_check_arch(x) ((x)->e_machine == ELF_ARCH) #define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC) -#define USE_ELF_CORE_DUMP #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index e885442c1df..354d42616c7 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -155,7 +155,6 @@ extern unsigned int vdso_enabled; } while (0) #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/score/include/asm/elf.h b/arch/score/include/asm/elf.h index 43526d9fda9..f478ce94181 100644 --- a/arch/score/include/asm/elf.h +++ b/arch/score/include/asm/elf.h @@ -61,7 +61,6 @@ struct task_struct; struct pt_regs; #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* This yields a mask that user programs can use to figure out what diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index ccb1d93bb04..ac04255022b 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -114,7 +114,6 @@ typedef struct user_fpu_struct elf_fpregset_t; */ #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC #define ELF_EXEC_PAGESIZE PAGE_SIZE diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h index 381a1b5256d..4269ca6ad18 100644 --- a/arch/sparc/include/asm/elf_32.h +++ b/arch/sparc/include/asm/elf_32.h @@ -104,8 +104,6 @@ typedef struct { #define ELF_CLASS ELFCLASS32 #define ELF_DATA ELFDATA2MSB -#define USE_ELF_CORE_DUMP - #define ELF_EXEC_PAGESIZE 4096 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h index d42e393078c..ff66bb88537 100644 --- a/arch/sparc/include/asm/elf_64.h +++ b/arch/sparc/include/asm/elf_64.h @@ -152,7 +152,6 @@ typedef struct { (x)->e_machine == EM_SPARC32PLUS) #define compat_start_thread start_thread32 -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/um/sys-i386/asm/elf.h b/arch/um/sys-i386/asm/elf.h index d0da9d7c537..770885472ed 100644 --- a/arch/um/sys-i386/asm/elf.h +++ b/arch/um/sys-i386/asm/elf.h @@ -48,7 +48,6 @@ typedef struct user_i387_struct elf_fpregset_t; PT_REGS_EAX(regs) = 0; \ } while (0) -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) diff --git a/arch/um/sys-ppc/asm/elf.h b/arch/um/sys-ppc/asm/elf.h index af9463cd8ce..8aacaf56508 100644 --- a/arch/um/sys-ppc/asm/elf.h +++ b/arch/um/sys-ppc/asm/elf.h @@ -17,8 +17,6 @@ extern long elf_aux_hwcap; #define ELF_CLASS ELFCLASS32 #endif -#define USE_ELF_CORE_DUMP - #define R_386_NONE 0 #define R_386_32 1 #define R_386_PC32 2 diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h index 04b9e87c8da..49655c83efd 100644 --- a/arch/um/sys-x86_64/asm/elf.h +++ b/arch/um/sys-x86_64/asm/elf.h @@ -104,7 +104,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu); clear_thread_flag(TIF_IA32); #endif -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 8a024babe5e..b4501ee223a 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -239,7 +239,6 @@ extern int force_personality32; #endif /* !CONFIG_X86_32 */ #define CORE_DUMP_USE_REGSET -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 /* This is the location that an ET_DYN program is loaded if exec'ed. Typical diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h index c3f53e755ca..5eb6d695e98 100644 --- a/arch/xtensa/include/asm/elf.h +++ b/arch/xtensa/include/asm/elf.h @@ -123,7 +123,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); #define ELF_CLASS ELFCLASS32 #define ELF_ARCH EM_XTENSA -#define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE PAGE_SIZE /* diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index d15ea1790bf..97b6e9efeb7 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -44,7 +44,7 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, * If we don't support core dumping, then supply a NULL so we * don't even try. */ -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); #else #define elf_core_dump NULL @@ -1101,12 +1101,7 @@ out: return error; } -/* - * Note that some platforms still use traditional core dumps and not - * the ELF core dump. Each platform can select it as appropriate. - */ -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) - +#ifdef CONFIG_ELF_CORE /* * ELF core dumper * @@ -2063,7 +2058,7 @@ out: return has_dumped; } -#endif /* USE_ELF_CORE_DUMP */ +#endif /* CONFIG_ELF_CORE */ static int __init init_elf_binfmt(void) { diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 79d2b1aa389..7b055385db8 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -75,14 +75,14 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *, static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, struct file *, struct mm_struct *); -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit); #endif static struct linux_binfmt elf_fdpic_format = { .module = THIS_MODULE, .load_binary = load_elf_fdpic_binary, -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE .core_dump = elf_fdpic_core_dump, #endif .min_coredump = ELF_EXEC_PAGESIZE, @@ -1201,7 +1201,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, * * Modelled on fs/binfmt_elf.c core dumper */ -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE /* * These are the only things you should do on a core-file: use only these @@ -1826,4 +1826,4 @@ cleanup: #undef NUM_NOTES } -#endif /* USE_ELF_CORE_DUMP */ +#endif /* CONFIG_ELF_CORE */ diff --git a/fs/proc/base.c b/fs/proc/base.c index 4df4a464a91..18d5cc62d8e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2266,7 +2266,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = { #endif -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -2623,7 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif -#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) +#ifdef CONFIG_ELF_CORE REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING -- cgit v1.2.3-70-g09d2 From ac2b3e67dd59b8c6ef8c199641444c6ea03535a6 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 15 Dec 2009 16:47:43 -0800 Subject: dma-mapping: fix off-by-one error in dma_capable() dma_mask is, when interpreted as address, the last valid byte, and hence comparison msut also be done using the last valid of the buffer in question. Also fix the open-coded instances in lib/swiotlb.c. Signed-off-by: Jan Beulich Cc: FUJITA Tomonori Cc: Becky Bruce Cc: "Luck, Tony" Cc: Benjamin Herrenschmidt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ia64/include/asm/dma-mapping.h | 2 +- arch/powerpc/include/asm/dma-mapping.h | 2 +- arch/x86/include/asm/dma-mapping.h | 2 +- lib/swiotlb.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 8d3c79cd81e..7d09a09cdaa 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return 0; - return addr + size <= *dev->dma_mask; + return addr + size - 1 <= *dev->dma_mask; } static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index e281daebddc..80a973bb9e7 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -197,7 +197,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return 0; - return addr + size <= *dev->dma_mask; + return addr + size - 1 <= *dev->dma_mask; } static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 0f6c02f3b7d..ac91eed2106 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -67,7 +67,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return 0; - return addr + size <= *dev->dma_mask; + return addr + size - 1 <= *dev->dma_mask; } static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 5bc01803f8f..437eedb5a53 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -549,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { + if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) { /* * The allocated memory isn't reachable by the device. */ @@ -571,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = swiotlb_virt_to_bus(hwdev, ret); /* Confirm address can be DMA'd by device */ - if (dev_addr + size > dma_mask) { + if (dev_addr + size - 1 > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); -- cgit v1.2.3-70-g09d2 From 1d9cb470a755409ce97c3376174b1e234bd20371 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:14 -0700 Subject: ACPI: processor: introduce arch_has_acpi_pdc arch dependent helper function that tells us if we should attempt to evaluate _PDC on this machine or not. The x86 implementation assumes that the CPUs in the machine must be homogeneous, and that you cannot mix CPUs of different vendors. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 2 ++ arch/x86/include/asm/acpi.h | 7 +++++++ arch/x86/kernel/acpi/processor.c | 4 +--- drivers/acpi/processor_pdc.c | 3 +++ 4 files changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 91df9686a0d..3b788829464 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -132,6 +132,8 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif +static inline bool arch_has_acpi_pdc(void) { return true; } + #define acpi_unlazy_tlb(x) #ifdef CONFIG_ACPI_NUMA diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 60d2b2db0bc..d787e6e92bd 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -142,6 +142,13 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) return max_cstate; } +static inline bool arch_has_acpi_pdc(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + return (c->x86_vendor == X86_VENDOR_INTEL || + c->x86_vendor == X86_VENDOR_CENTAUR); +} + #else /* !CONFIG_ACPI */ #define acpi_lapic 0 diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c index d85d1b2432b..bcb6efe08c5 100644 --- a/arch/x86/kernel/acpi/processor.c +++ b/arch/x86/kernel/acpi/processor.c @@ -79,9 +79,7 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr) struct cpuinfo_x86 *c = &cpu_data(pr->id); pr->pdc = NULL; - if (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR) - init_intel_pdc(pr, c); + init_intel_pdc(pr, c); return; } diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index b416c32dda0..931e735e9e3 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -69,6 +69,9 @@ static int acpi_processor_eval_pdc(struct acpi_processor *pr) void acpi_processor_set_pdc(struct acpi_processor *pr) { + if (arch_has_acpi_pdc() == false) + return; + arch_acpi_processor_init_pdc(pr); acpi_processor_eval_pdc(pr); arch_acpi_processor_cleanup_pdc(pr); -- cgit v1.2.3-70-g09d2 From 6c5807d7bc7d051fce00863ffb98d36325501eb2 Mon Sep 17 00:00:00 2001 From: Alex Chiang Date: Sun, 20 Dec 2009 12:19:29 -0700 Subject: ACPI: processor: finish unifying arch_acpi_processor_init_pdc() The only thing arch-specific about calling _PDC is what bits get set in the input obj_list buffer. There's no need for several levels of indirection to twiddle those bits. Additionally, since we're just messing around with a buffer, we can simplify the interface; no need to pass around the entire struct acpi_processor * just to get at the buffer. Cc: Tony Luck Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Alex Chiang Signed-off-by: Len Brown --- arch/ia64/include/asm/acpi.h | 4 ++++ arch/ia64/kernel/acpi-processor.c | 18 ------------------ arch/x86/include/asm/acpi.h | 19 +++++++++++++++++++ arch/x86/kernel/acpi/processor.c | 34 ---------------------------------- drivers/acpi/processor_pdc.c | 6 +++--- include/acpi/processor.h | 1 - 6 files changed, 26 insertions(+), 56 deletions(-) (limited to 'arch/ia64/include') diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 3b788829464..7ae58892ba8 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -133,6 +133,10 @@ extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; #endif static inline bool arch_has_acpi_pdc(void) { return true; } +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; +} #define acpi_unlazy_tlb(x) diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c index ebe23f58bd6..7ba5accebf6 100644 --- a/arch/ia64/kernel/acpi-processor.c +++ b/arch/ia64/kernel/acpi-processor.c @@ -14,24 +14,6 @@ #include #include -static void init_intel_pdc(struct acpi_processor *pr) -{ - u32 *buf = (u32 *)pr->pdc->pointer->buffer.pointer; - - buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; - - return; -} - -/* Initialize _PDC data based on the CPU vendor */ -void arch_acpi_processor_init_pdc(struct acpi_processor *pr) -{ - init_intel_pdc(pr); - return; -} - -EXPORT_SYMBOL(arch_acpi_processor_init_pdc); - void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) { if (pr->pdc) { diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index d787e6e92bd..56f462cf22d 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -149,6 +149,25 @@ static inline bool arch_has_acpi_pdc(void) c->x86_vendor == X86_VENDOR_CENTAUR); } +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; + + if (cpu_has(c, X86_FEATURE_EST)) + buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; + + if (cpu_has(c, X86_FEATURE_ACPI)) + buf[2] |= ACPI_PDC_T_FFH; + + /* + * If mwait/monitor is unsupported, C2/C3_FFH will be disabled + */ + if (!cpu_has(c, X86_FEATURE_MWAIT)) + buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); +} + #else /* !CONFIG_ACPI */ #define acpi_lapic 0 diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c index d722ca8cb4c..0f57307f822 100644 --- a/arch/x86/kernel/acpi/processor.c +++ b/arch/x86/kernel/acpi/processor.c @@ -12,40 +12,6 @@ #include #include -static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) -{ - u32 *buf = (u32 *)pr->pdc->pointer->buffer.pointer; - - buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; - - if (cpu_has(c, X86_FEATURE_EST)) - buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; - - if (cpu_has(c, X86_FEATURE_ACPI)) - buf[2] |= ACPI_PDC_T_FFH; - - /* - * If mwait/monitor is unsupported, C2/C3_FFH will be disabled - */ - if (!cpu_has(c, X86_FEATURE_MWAIT)) - buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); - - return; -} - - -/* Initialize _PDC data based on the CPU vendor */ -void arch_acpi_processor_init_pdc(struct acpi_processor *pr) -{ - struct cpuinfo_x86 *c = &cpu_data(pr->id); - - init_intel_pdc(pr, c); - - return; -} - -EXPORT_SYMBOL(arch_acpi_processor_init_pdc); - void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) { if (pr->pdc) { diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c index ccda7c95d07..48df08ebcec 100644 --- a/drivers/acpi/processor_pdc.c +++ b/drivers/acpi/processor_pdc.c @@ -40,6 +40,9 @@ static void acpi_set_pdc_bits(u32 *buf) /* Enable coordination with firmware's _TSD info */ buf[2] = ACPI_PDC_SMP_T_SWCOORD; + + /* Twiddle arch-specific bits needed for _PDC */ + arch_acpi_set_pdc_bits(buf); } static void acpi_processor_init_pdc(struct acpi_processor *pr) @@ -81,9 +84,6 @@ static void acpi_processor_init_pdc(struct acpi_processor *pr) obj_list->pointer = obj; pr->pdc = obj_list; - /* Now let the arch do the bit-twiddling to buf[] */ - arch_acpi_processor_init_pdc(pr); - return; } diff --git a/include/acpi/processor.h b/include/acpi/processor.h index a1b748a7a76..50edd734aec 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -257,7 +257,6 @@ int acpi_processor_notify_smm(struct module *calling_module); DECLARE_PER_CPU(struct acpi_processor *, processors); extern struct acpi_processor_errata errata; -void arch_acpi_processor_init_pdc(struct acpi_processor *pr); void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr); #ifdef ARCH_HAS_POWER_INIT -- cgit v1.2.3-70-g09d2