diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-10-14 20:40:11 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-10-14 20:40:11 -0700 |
commit | 766e9037cc139ee25ed93ee5ad11e1450c4b99f6 (patch) | |
tree | 062702b8edf203a6e91d1e6853ab24989617d758 /net/core | |
parent | 48bccd25df71f4f8177cb800f4b288222eb57761 (diff) |
net: sk_drops consolidation
sock_queue_rcv_skb() can update sk_drops itself, removing need for
callers to take care of it. This is more consistent since
sock_queue_rcv_skb() also reads sk_drops when queueing a skb.
This adds sk_drops managment to many protocols that not cared yet.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/sock.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 43ca2c99539..38713aa3faf 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -274,7 +274,7 @@ static void sock_disable_timestamp(struct sock *sk, int flag) int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { - int err = 0; + int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; @@ -284,17 +284,17 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) */ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf) { - err = -ENOMEM; - goto out; + atomic_inc(&sk->sk_drops); + return -ENOMEM; } err = sk_filter(sk, skb); if (err) - goto out; + return err; if (!sk_rmem_schedule(sk, skb->truesize)) { - err = -ENOBUFS; - goto out; + atomic_inc(&sk->sk_drops); + return -ENOBUFS; } skb->dev = NULL; @@ -314,8 +314,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); -out: - return err; + return 0; } EXPORT_SYMBOL(sock_queue_rcv_skb); |