summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/common.c22
-rw-r--r--net/bluetooth/l2cap.c5
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/stream.c10
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/inet_connection_sock.c16
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/inet6_connection_sock.c15
-rw-r--r--net/iucv/af_iucv.c11
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rxrpc/af_rxrpc.c10
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/probe.c1
-rw-r--r--net/sctp/sm_make_chunk.c94
-rw-r--r--net/sctp/sm_sideeffect.c26
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/socket.c19
-rw-r--r--net/socket.c47
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/unix/garbage.c13
26 files changed, 281 insertions, 158 deletions
diff --git a/net/atm/common.c b/net/atm/common.c
index e3e10e6f862..b43feb1a399 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -90,10 +90,13 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up(sk_sleep(sk));
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up(&wq->wait);
+ rcu_read_unlock();
}
static inline int vcc_writable(struct sock *sk)
@@ -106,16 +109,19 @@ static inline int vcc_writable(struct sock *sk)
static void vcc_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (vcc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static struct proto vcc_proto = {
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c1e60eed5a9..864c76f4a67 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1626,7 +1626,10 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
/* Connectionless channel */
if (sk->sk_type == SOCK_DGRAM) {
skb = l2cap_create_connless_pdu(sk, msg, len);
- err = l2cap_do_send(sk, skb);
+ if (IS_ERR(skb))
+ err = PTR_ERR(skb);
+ else
+ err = l2cap_do_send(sk, skb);
goto done;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 100dcbd2973..36d53be4fca 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2205,8 +2205,6 @@ int netdev_max_backlog __read_mostly = 1000;
int netdev_budget __read_mostly = 300;
int weight_p __read_mostly = 64; /* old backlog weight */
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
@@ -2366,7 +2364,7 @@ static void rps_trigger_softirq(void *data)
struct softnet_data *sd = data;
__napi_schedule(&sd->backlog);
- __get_cpu_var(netdev_rx_stat).received_rps++;
+ sd->received_rps++;
}
#endif /* CONFIG_RPS */
@@ -2405,7 +2403,6 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
sd = &per_cpu(softnet_data, cpu);
local_irq_save(flags);
- __get_cpu_var(netdev_rx_stat).total++;
rps_lock(sd);
if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
@@ -2429,9 +2426,9 @@ enqueue:
goto enqueue;
}
+ sd->dropped++;
rps_unlock(sd);
- __get_cpu_var(netdev_rx_stat).dropped++;
local_irq_restore(flags);
kfree_skb(skb);
@@ -2806,7 +2803,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
skb->dev = master;
}
- __get_cpu_var(netdev_rx_stat).total++;
+ __get_cpu_var(softnet_data).processed++;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -3490,7 +3487,7 @@ out:
return;
softnet_break:
- __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ sd->time_squeeze++;
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
goto out;
}
@@ -3691,17 +3688,17 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+static struct softnet_data *softnet_get_online(loff_t *pos)
{
- struct netif_rx_stats *rc = NULL;
+ struct softnet_data *sd = NULL;
while (*pos < nr_cpu_ids)
if (cpu_online(*pos)) {
- rc = &per_cpu(netdev_rx_stat, *pos);
+ sd = &per_cpu(softnet_data, *pos);
break;
} else
++*pos;
- return rc;
+ return sd;
}
static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
@@ -3721,12 +3718,12 @@ static void softnet_seq_stop(struct seq_file *seq, void *v)
static int softnet_seq_show(struct seq_file *seq, void *v)
{
- struct netif_rx_stats *s = v;
+ struct softnet_data *sd = v;
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- s->total, s->dropped, s->time_squeeze, 0,
+ sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- s->cpu_collision, s->received_rps);
+ sd->cpu_collision, sd->received_rps);
return 0;
}
@@ -5869,6 +5866,7 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
+ memset(sd, 0, sizeof(*sd));
skb_queue_head_init(&sd->input_pkt_queue);
skb_queue_head_init(&sd->process_queue);
sd->completion_queue = NULL;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4218ff49bf1..8b9c109166a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1051,7 +1051,7 @@ EXPORT_SYMBOL(skb_push);
*/
unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+ return skb_pull_inline(skb, len);
}
EXPORT_SYMBOL(skb_pull);
diff --git a/net/core/sock.c b/net/core/sock.c
index 51041759517..94c4affdda9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1211,7 +1211,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
- newsk->sk_sleep = NULL;
+ newsk->sk_wq = NULL;
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
@@ -1800,41 +1800,53 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk_sleep(sk));
- read_unlock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
+ rcu_read_unlock();
}
static void sock_def_error_report(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_poll(sk_sleep(sk), POLLERR);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_readable(struct sock *sk, int len)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN |
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
@@ -1842,7 +1854,7 @@ static void sock_def_write_space(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
static void sock_def_destruct(struct sock *sk)
@@ -1896,10 +1908,10 @@ void sock_init_data(struct socket *sock, struct sock *sk)
if (sock) {
sk->sk_type = sock->type;
- sk->sk_sleep = &sock->wait;
+ sk->sk_wq = sock->wq;
sock->sk = sk;
} else
- sk->sk_sleep = NULL;
+ sk->sk_wq = NULL;
spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
diff --git a/net/core/stream.c b/net/core/stream.c
index 7b3c3f30b10..cc196f42b8d 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -28,15 +28,19 @@
void sk_stream_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
+ struct socket_wq *wq;
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
- if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
- wake_up_interruptible_poll(sk_sleep(sk), POLLOUT |
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_poll(&wq->wait, POLLOUT |
POLLWRNORM | POLLWRBAND);
- if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+ if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
+ rcu_read_unlock();
}
}
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 2d3dcb39851..aadbdb58758 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -195,15 +195,17 @@ EXPORT_SYMBOL_GPL(dccp_sync_mss);
void dccp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/**
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 0c0d272a988..61ec0329316 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -162,7 +162,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- skb_pull(skb, ETH_HLEN);
+ skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 78cbc39f56c..e0a3e3537b1 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -70,17 +70,13 @@ int inet_csk_bind_conflict(const struct sock *sk,
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
+ const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr ||
sk2_rcv_saddr == sk_rcv_saddr)
break;
- } else if (reuse && sk2->sk_reuse &&
- sk2_rcv_saddr &&
- sk2_rcv_saddr == sk_rcv_saddr)
- break;
+ }
}
}
return node != NULL;
@@ -124,11 +120,9 @@ again:
smallest_size = tb->num_owners;
smallest_rover = rover;
if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
- if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
- spin_unlock(&head->lock);
- snum = smallest_rover;
- goto have_snum;
- }
+ spin_unlock(&head->lock);
+ snum = smallest_rover;
+ goto have_snum;
}
}
goto next;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 34d2d649e39..3984f52181f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1346,7 +1346,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
struct hlist_node *node;
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, node, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr)) {
@@ -2959,7 +2959,7 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
struct hlist_node *n;
- hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
+ hlist_for_each_entry_rcu_bh(ifa, n, &inet6_addr_lst[state->bucket],
addr_lst)
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
@@ -2974,12 +2974,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct hlist_node *n = &ifa->addr_lst;
- hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
+ hlist_for_each_entry_continue_rcu_bh(ifa, n, addr_lst)
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
while (++state->bucket < IN6_ADDR_HSIZE) {
- hlist_for_each_entry(ifa, n,
+ hlist_for_each_entry_rcu_bh(ifa, n,
&inet6_addr_lst[state->bucket], addr_lst) {
if (net_eq(dev_net(ifa->idev->dev), net))
return ifa;
@@ -3000,7 +3000,7 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
}
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(rcu)
+ __acquires(rcu_bh)
{
rcu_read_lock_bh();
return if6_get_idx(seq, *pos);
@@ -3016,7 +3016,7 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void if6_seq_stop(struct seq_file *seq, void *v)
- __releases(rcu)
+ __releases(rcu_bh)
{
rcu_read_unlock_bh();
}
@@ -3093,7 +3093,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
unsigned int hash = ipv6_addr_hash(addr);
rcu_read_lock_bh();
- hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
+ hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
if (!net_eq(dev_net(ifp->idev->dev), net))
continue;
if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3127,7 +3127,7 @@ static void addrconf_verify(unsigned long foo)
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
- hlist_for_each_entry_rcu(ifp, node,
+ hlist_for_each_entry_rcu_bh(ifp, node,
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9ca1efc923a..0c5e3c3b7fd 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -42,16 +42,11 @@ int inet6_csk_bind_conflict(const struct sock *sk,
if (sk != sk2 &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!sk->sk_reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- ipv6_rcv_saddr_equal(sk, sk2))
- break;
- else if (sk->sk_reuse && sk2->sk_reuse &&
- !ipv6_addr_any(inet6_rcv_saddr(sk)) &&
- ipv6_rcv_saddr_equal(sk, sk2))
- break;
- }
+ sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
+ (!sk->sk_reuse || !sk2->sk_reuse ||
+ sk2->sk_state == TCP_LISTEN) &&
+ ipv6_rcv_saddr_equal(sk, sk2))
+ break;
}
return node != NULL;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9636b7d27b4..8be324fe08b 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -305,11 +305,14 @@ static inline int iucv_below_msglim(struct sock *sk)
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
- if (sk_has_sleeper(sk))
- wake_up_interruptible_all(sk_sleep(sk));
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* Timers */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e2a95762abd..af4d38bc3b2 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -664,12 +664,12 @@ static int pep_wait_connreq(struct sock *sk, int noblock)
if (signal_pending(tsk))
return sock_intr_errno(timeo);
- prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
}
return 0;
@@ -910,10 +910,10 @@ disabled:
goto out;
}
- prepare_to_wait(&sk->sk_socket->wait, &wait,
+ prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
- finish_wait(&sk->sk_socket->wait, &wait);
+ finish_wait(sk_sleep(sk), &wait);
if (sk->sk_state != TCP_ESTABLISHED)
goto disabled;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c785bfd0744..6e9848bf037 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -265,7 +265,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
struct pep_sock *pn = pep_sk(sk);
unsigned int mask = 0;
- poll_wait(file, &sock->wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
switch (sk->sk_state) {
case TCP_LISTEN:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c432d76f415..0b9bb2085ce 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -62,13 +62,15 @@ static inline int rxrpc_writable(struct sock *sk)
static void rxrpc_write_space(struct sock *sk)
{
_enter("%p", sk);
- read_lock(&sk->sk_callback_lock);
+ rcu_read_lock();
if (rxrpc_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible(sk_sleep(sk));
+ struct socket_wq *wq = rcu_dereference(sk->sk_wq);
+
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/*
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index aeddabfb8e4..a969b111bd7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -94,7 +94,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* Another cpu is holding lock, requeue & delay xmits for
* some time.
*/
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ __get_cpu_var(softnet_data).cpu_collision++;
ret = dev_requeue_skb(skb, q);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 65f9a7cdf46..3912420cedc 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1192,8 +1192,10 @@ void sctp_assoc_update(struct sctp_association *asoc,
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
- if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
- sctp_assoc_del_peer(asoc, &trans->ipaddr);
+ if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
+ sctp_assoc_rm_peer(asoc, trans);
+ continue;
+ }
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2f8763bae9e..e10acc01c75 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -142,6 +142,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Use SCTP specific send buffer space queues. */
ep->sndbuf_policy = sctp_sndbuf_policy;
+ sk->sk_data_ready = sctp_data_ready;
sk->sk_write_space = sctp_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 8f025d5831a..db3a42b8b34 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -27,6 +27,7 @@
#include <linux/socket.h>
#include <linux/sctp.h>
#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/kfifo.h>
#include <linux/time.h>
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 24effdf471e..d8261f3d771 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -108,7 +108,7 @@ static const struct sctp_paramhdr prsctp_param = {
cpu_to_be16(sizeof(struct sctp_paramhdr)),
};
-/* A helper to initialize to initialize an op error inside a
+/* A helper to initialize an op error inside a
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
@@ -125,6 +125,29 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
}
+/* A helper to initialize an op error inside a
+ * provided chunk, as most cause codes will be embedded inside an
+ * abort chunk. Differs from sctp_init_cause in that it won't oops
+ * if there isn't enough space in the op error chunk
+ */
+int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+ size_t paylen)
+{
+ sctp_errhdr_t err;
+ __u16 len;
+
+ /* Cause code constants are now defined in network order. */
+ err.cause = cause_code;
+ len = sizeof(sctp_errhdr_t) + paylen;
+ err.length = htons(len);
+
+ if (skb_tailroom(chunk->skb) > len)
+ return -ENOSPC;
+ chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk,
+ sizeof(sctp_errhdr_t),
+ &err);
+ return 0;
+}
/* 3.3.2 Initiation (INIT) (1)
*
* This chunk is used to initiate a SCTP association between two
@@ -208,7 +231,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
sp = sctp_sk(asoc->base.sk);
num_types = sp->pf->supported_addrs(sp, types);
- chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types);
+ chunksize = sizeof(init) + addrs_len;
+ chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (sctp_prsctp_enable)
@@ -238,14 +262,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -255,7 +279,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
@@ -397,13 +422,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += ntohs(auth_hmacs->length);
+ chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += ntohs(auth_chunks->length);
+ chunksize += WORD_ROUND(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -412,7 +437,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
- chunksize += sizeof(sctp_supported_ext_param_t) + num_ext;
+ chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
@@ -1124,6 +1150,24 @@ nodata:
return retval;
}
+/* Create an Operation Error chunk of a fixed size,
+ * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT)
+ * This is a helper function to allocate an error chunk for
+ * for those invalid parameter codes in which we may not want
+ * to report all the errors, if the incomming chunk is large
+ */
+static inline struct sctp_chunk *sctp_make_op_error_fixed(
+ const struct sctp_association *asoc,
+ const struct sctp_chunk *chunk)
+{
+ size_t size = asoc ? asoc->pathmtu : 0;
+
+ if (!size)
+ size = SCTP_DEFAULT_MAXSEGMENT;
+
+ return sctp_make_op_error_space(asoc, chunk, size);
+}
+
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
@@ -1365,6 +1409,18 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
return target;
}
+/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient
+ * space in the chunk
+ */
+void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk,
+ int len, const void *data)
+{
+ if (skb_tailroom(chunk->skb) > len)
+ return sctp_addto_chunk(chunk, len, data);
+ else
+ return NULL;
+}
+
/* Append bytes from user space to the end of a chunk. Will panic if
* chunk is not big enough.
* Returns a kernel err value.
@@ -1968,13 +2024,12 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
* returning multiple unknown parameters.
*/
if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
+ *errp = sctp_make_op_error_fixed(asoc, chunk);
if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+ sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
+ sctp_addto_chunk_fixed(*errp,
WORD_ROUND(ntohs(param.p->length)),
param.v);
} else {
@@ -3309,21 +3364,6 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
sctp_chunk_free(asconf);
asoc->addip_last_asconf = NULL;
- /* Send the next asconf chunk from the addip chunk queue. */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- asconf = list_entry(entry, struct sctp_chunk, list);
-
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-
return retval;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 49fb9acece6..3b7230ef77c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -966,6 +966,29 @@ static int sctp_cmd_send_msg(struct sctp_association *asoc,
}
+/* Sent the next ASCONF packet currently stored in the association.
+ * This happens after the ASCONF_ACK was succeffully processed.
+ */
+static void sctp_cmd_send_asconf(struct sctp_association *asoc)
+{
+ /* Send the next asconf chunk from the addip chunk
+ * queue.
+ */
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ struct sctp_chunk *asconf = list_entry(entry,
+ struct sctp_chunk, list);
+ list_del_init(entry);
+
+ /* Hold the chunk until an ASCONF_ACK is received. */
+ sctp_chunk_hold(asconf);
+ if (sctp_primitive_ASCONF(asoc, asconf))
+ sctp_chunk_free(asconf);
+ else
+ asoc->addip_last_asconf = asconf;
+ }
+}
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
@@ -1621,6 +1644,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
}
error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
break;
+ case SCTP_CMD_SEND_NEXT_ASCONF:
+ sctp_cmd_send_asconf(asoc);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index abf601a1b84..24b2cd55563 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3676,8 +3676,14 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack))
+ asconf_ack)) {
+ /* Successfully processed ASCONF_ACK. We can
+ * release the next asconf if we have one.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
+ SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
+ }
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1282a0ed855..ba1add0b13c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3719,9 +3719,9 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
- percpu_counter_inc(&sctp_sockets_allocated);
local_bh_disable();
+ percpu_counter_inc(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
@@ -3738,8 +3738,8 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
/* Release our hold on the endpoint. */
ep = sctp_sk(sk)->ep;
sctp_endpoint_free(ep);
- percpu_counter_dec(&sctp_sockets_allocated);
local_bh_disable();
+ percpu_counter_dec(&sctp_sockets_allocated);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
}
@@ -6065,7 +6065,7 @@ static void __sctp_write_space(struct sctp_association *asoc)
* here by modeling from the current TCP/UDP code.
* We have not tested with it yet.
*/
- if (sock->fasync_list &&
+ if (sock->wq->fasync_list &&
!(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock,
SOCK_WAKE_SPACE, POLL_OUT);
@@ -6185,6 +6185,19 @@ do_nonblock:
goto out;
}
+void sctp_data_ready(struct sock *sk, int len)
+{
+ struct socket_wq *wq;
+
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ rcu_read_unlock();
+}
+
/* If socket sndbuf has changed, wake up all per association waiters. */
void sctp_write_space(struct sock *sk)
{
diff --git a/net/socket.c b/net/socket.c
index cb7c1f6c0d6..dae8c6b84a0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -252,9 +252,14 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
- init_waitqueue_head(&ei->socket.wait);
+ ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
+ if (!ei->socket.wq) {
+ kmem_cache_free(sock_inode_cachep, ei);
+ return NULL;
+ }
+ init_waitqueue_head(&ei->socket.wq->wait);
+ ei->socket.wq->fasync_list = NULL;
- ei->socket.fasync_list = NULL;
ei->socket.state = SS_UNCONNECTED;
ei->socket.flags = 0;
ei->socket.ops = NULL;
@@ -264,10 +269,21 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
+
+static void wq_free_rcu(struct rcu_head *head)
+{
+ struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
+
+ kfree(wq);
+}
+
static void sock_destroy_inode(struct inode *inode)
{
- kmem_cache_free(sock_inode_cachep,
- container_of(inode, struct socket_alloc, vfs_inode));
+ struct socket_alloc *ei;
+
+ ei = container_of(inode, struct socket_alloc, vfs_inode);
+ call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+ kmem_cache_free(sock_inode_cachep, ei);
}
static void init_once(void *foo)
@@ -513,7 +529,7 @@ void sock_release(struct socket *sock)
module_put(owner);
}
- if (sock->fasync_list)
+ if (sock->wq->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
percpu_sub(sockets_in_use, 1);
@@ -1080,9 +1096,9 @@ static int sock_fasync(int fd, struct file *filp, int on)
lock_sock(sk);
- fasync_helper(fd, filp, on, &sock->fasync_list);
+ fasync_helper(fd, filp, on, &sock->wq->fasync_list);
- if (!sock->fasync_list)
+ if (!sock->wq->fasync_list)
sock_reset_flag(sk, SOCK_FASYNC);
else
sock_set_flag(sk, SOCK_FASYNC);
@@ -1091,12 +1107,20 @@ static int sock_fasync(int fd, struct file *filp, int on)
return 0;
}
-/* This function may be called only under socket lock or callback_lock */
+/* This function may be called only under socket lock or callback_lock or rcu_lock */
int sock_wake_async(struct socket *sock, int how, int band)
{
- if (!sock || !sock->fasync_list)
+ struct socket_wq *wq;
+
+ if (!sock)
return -1;
+ rcu_read_lock();
+ wq = rcu_dereference(sock->wq);
+ if (!wq || !wq->fasync_list) {
+ rcu_read_unlock();
+ return -1;
+ }
switch (how) {
case SOCK_WAKE_WAITD:
if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
@@ -1108,11 +1132,12 @@ int sock_wake_async(struct socket *sock, int how, int band)
/* fall through */
case SOCK_WAKE_IO:
call_kill:
- kill_fasync(&sock->fasync_list, SIGIO, band);
+ kill_fasync(&wq->fasync_list, SIGIO, band);
break;
case SOCK_WAKE_URG:
- kill_fasync(&sock->fasync_list, SIGURG, band);
+ kill_fasync(&wq->fasync_list, SIGURG, band);
}
+ rcu_read_unlock();
return 0;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 87c0360eaa2..fef2cc5e9d2 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -313,13 +313,16 @@ static inline int unix_writable(struct sock *sk)
static void unix_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ struct socket_wq *wq;
+
+ rcu_read_lock();
if (unix_writable(sk)) {
- if (sk_has_sleeper(sk))
- wake_up_interruptible_sync(sk_sleep(sk));
+ wq = rcu_dereference(sk->sk_wq);
+ if (wq_has_sleeper(wq))
+ wake_up_interruptible_sync(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
- read_unlock(&sk->sk_callback_lock);
+ rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
@@ -406,9 +409,7 @@ static int unix_release_sock(struct sock *sk, int embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
- read_lock(&skpair->sk_callback_lock);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
- read_unlock(&skpair->sk_callback_lock);
}
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
@@ -1142,7 +1143,7 @@ restart:
newsk->sk_peercred.pid = task_tgid_vnr(current);
current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
newu = unix_sk(newsk);
- newsk->sk_sleep = &newu->peer_wait;
+ newsk->sk_wq = &newu->peer_wq;
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
@@ -1931,12 +1932,10 @@ static int unix_shutdown(struct socket *sock, int mode)
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
- read_lock(&other->sk_callback_lock);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- read_unlock(&other->sk_callback_lock);
}
if (other)
sock_put(other);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 14c22c3768d..c8df6fda0b1 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -153,15 +153,6 @@ void unix_notinflight(struct file *fp)
}
}
-static inline struct sk_buff *sock_queue_head(struct sock *sk)
-{
- return (struct sk_buff *)&sk->sk_receive_queue;
-}
-
-#define receive_queue_for_each_skb(sk, next, skb) \
- for (skb = sock_queue_head(sk)->next, next = skb->next; \
- skb != sock_queue_head(sk); skb = next, next = skb->next)
-
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
@@ -169,7 +160,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/*
* Do we have file descriptors ?
*/
@@ -225,7 +216,7 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
- receive_queue_for_each_skb(x, next, skb) {
+ skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/*