summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h253
1 files changed, 158 insertions, 95 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 91c1c8baf02..4a452169956 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -46,6 +46,7 @@
#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
+#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
@@ -55,7 +56,9 @@
#include <linux/uaccess.h>
#include <linux/memcontrol.h>
#include <linux/res_counter.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
+#include <linux/aio.h>
+#include <linux/sched.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
@@ -68,16 +71,16 @@
struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
-int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
#else
static inline
-int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
return 0;
}
static inline
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
{
}
#endif
@@ -95,7 +98,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
#else
/* Validate arguments and do nothing */
static inline __printf(2, 3)
-void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
+void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
{
}
#endif
@@ -244,6 +247,7 @@ struct cg_proto;
* @sk_user_data: RPC layer private data
* @sk_sndmsg_page: cached page for sendmsg
* @sk_sndmsg_off: cached offset for sendmsg
+ * @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
* @sk_security: used by security modules
* @sk_mark: generic packet mark
@@ -357,6 +361,7 @@ struct sock {
struct page *sk_sndmsg_page;
struct sk_buff *sk_send_head;
__u32 sk_sndmsg_off;
+ __s32 sk_peek_off;
int sk_write_pending;
#ifdef CONFIG_SECURITY
void *sk_security;
@@ -368,12 +373,47 @@ struct sock {
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
- int (*sk_backlog_rcv)(struct sock *sk,
- struct sk_buff *skb);
+ int (*sk_backlog_rcv)(struct sock *sk,
+ struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
};
/*
+ * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
+ * or not whether his port will be reused by someone else. SK_FORCE_REUSE
+ * on a socket means that the socket will reuse everybody else's port
+ * without looking at the other's sk_reuse value.
+ */
+
+#define SK_NO_REUSE 0
+#define SK_CAN_REUSE 1
+#define SK_FORCE_REUSE 2
+
+static inline int sk_peek_offset(struct sock *sk, int flags)
+{
+ if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
+ return sk->sk_peek_off;
+ else
+ return 0;
+}
+
+static inline void sk_peek_offset_bwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0) {
+ if (sk->sk_peek_off >= val)
+ sk->sk_peek_off -= val;
+ else
+ sk->sk_peek_off = 0;
+ }
+}
+
+static inline void sk_peek_offset_fwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0)
+ sk->sk_peek_off += val;
+}
+
+/*
* Hashed lists helper routines
*/
static inline struct sock *sk_entry(const struct hlist_node *node)
@@ -415,40 +455,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk)
NULL;
}
-static inline int sk_unhashed(const struct sock *sk)
+static inline bool sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->sk_node);
}
-static inline int sk_hashed(const struct sock *sk)
+static inline bool sk_hashed(const struct sock *sk)
{
return !sk_unhashed(sk);
}
-static __inline__ void sk_node_init(struct hlist_node *node)
+static inline void sk_node_init(struct hlist_node *node)
{
node->pprev = NULL;
}
-static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
+static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
{
node->pprev = NULL;
}
-static __inline__ void __sk_del_node(struct sock *sk)
+static inline void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->sk_node);
}
/* NB: equivalent to hlist_del_init_rcu */
-static __inline__ int __sk_del_node_init(struct sock *sk)
+static inline bool __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
__sk_del_node(sk);
sk_node_init(&sk->sk_node);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/* Grab socket reference count. This operation is valid only
@@ -470,9 +510,9 @@ static inline void __sock_put(struct sock *sk)
atomic_dec(&sk->sk_refcnt);
}
-static __inline__ int sk_del_node_init(struct sock *sk)
+static inline bool sk_del_node_init(struct sock *sk)
{
- int rc = __sk_del_node_init(sk);
+ bool rc = __sk_del_node_init(sk);
if (rc) {
/* paranoid for a while -acme */
@@ -483,18 +523,18 @@ static __inline__ int sk_del_node_init(struct sock *sk)
}
#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
-static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
{
if (sk_hashed(sk)) {
hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
+static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
{
- int rc = __sk_nulls_del_node_init_rcu(sk);
+ bool rc = __sk_nulls_del_node_init_rcu(sk);
if (rc) {
/* paranoid for a while -acme */
@@ -504,40 +544,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
return rc;
}
-static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->sk_node, list);
}
-static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
__sk_add_node(sk, list);
}
-static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
hlist_add_head_rcu(&sk->sk_node, list);
}
-static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
-static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
__sk_nulls_add_node_rcu(sk, list);
}
-static __inline__ void __sk_del_bind_node(struct sock *sk)
+static inline void __sk_del_bind_node(struct sock *sk)
{
__hlist_del(&sk->sk_bind_node);
}
-static __inline__ void sk_add_bind_node(struct sock *sk,
+static inline void sk_add_bind_node(struct sock *sk,
struct hlist_head *list)
{
hlist_add_head(&sk->sk_bind_node, list);
@@ -590,6 +630,10 @@ enum sock_flags {
SOCK_RXQ_OVFL,
SOCK_ZEROCOPY, /* buffers from userspace */
SOCK_WIFI_STATUS, /* push wifi status to userspace */
+ SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
+ * Will use last 4 bytes of packet sent from
+ * user-space instead.
+ */
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -607,7 +651,7 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
__clear_bit(flag, &sk->sk_flags);
}
-static inline int sock_flag(struct sock *sk, enum sock_flags flag)
+static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
{
return test_bit(flag, &sk->sk_flags);
}
@@ -622,7 +666,7 @@ static inline void sk_acceptq_added(struct sock *sk)
sk->sk_ack_backlog++;
}
-static inline int sk_acceptq_is_full(struct sock *sk)
+static inline bool sk_acceptq_is_full(const struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
@@ -630,19 +674,19 @@ static inline int sk_acceptq_is_full(struct sock *sk)
/*
* Compute minimal free write space needed to queue new packets.
*/
-static inline int sk_stream_min_wspace(struct sock *sk)
+static inline int sk_stream_min_wspace(const struct sock *sk)
{
return sk->sk_wmem_queued >> 1;
}
-static inline int sk_stream_wspace(struct sock *sk)
+static inline int sk_stream_wspace(const struct sock *sk)
{
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
extern void sk_stream_write_space(struct sock *sk);
-static inline int sk_stream_memory_free(struct sock *sk)
+static inline bool sk_stream_memory_free(const struct sock *sk)
{
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
@@ -667,17 +711,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
* Do not take into account this skb truesize,
* to allow even a single big packet to come.
*/
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
+ unsigned int limit)
{
unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
- return qsize > sk->sk_rcvbuf;
+ return qsize > limit;
}
/* The per-socket spinlock must be held here. */
-static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
+ unsigned int limit)
{
- if (sk_rcvqueues_full(sk, skb))
+ if (sk_rcvqueues_full(sk, skb, limit))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
@@ -764,26 +810,26 @@ struct module;
* transport -> network interface is defined by struct inet_proto
*/
struct proto {
- void (*close)(struct sock *sk,
+ void (*close)(struct sock *sk,
long timeout);
int (*connect)(struct sock *sk,
- struct sockaddr *uaddr,
+ struct sockaddr *uaddr,
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
- struct sock * (*accept) (struct sock *sk, int flags, int *err);
+ struct sock * (*accept)(struct sock *sk, int flags, int *err);
int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
- int (*setsockopt)(struct sock *sk, int level,
+ int (*setsockopt)(struct sock *sk, int level,
int optname, char __user *optval,
unsigned int optlen);
- int (*getsockopt)(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *option);
+ int (*getsockopt)(struct sock *sk, int level,
+ int optname, char __user *optval,
+ int __user *option);
#ifdef CONFIG_COMPAT
int (*compat_setsockopt)(struct sock *sk,
int level,
@@ -800,14 +846,14 @@ struct proto {
struct msghdr *msg, size_t len);
int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg,
- size_t len, int noblock, int flags,
- int *addr_len);
+ size_t len, int noblock, int flags,
+ int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
- int (*bind)(struct sock *sk,
+ int (*bind)(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
- int (*backlog_rcv) (struct sock *sk,
+ int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
/* Keeping track of sk's, looking them up, and port selection methods. */
@@ -869,20 +915,30 @@ struct proto {
* This function has to setup any files the protocol want to
* appear in the kmem cgroup filesystem.
*/
- int (*init_cgroup)(struct cgroup *cgrp,
+ int (*init_cgroup)(struct mem_cgroup *memcg,
struct cgroup_subsys *ss);
- void (*destroy_cgroup)(struct cgroup *cgrp,
- struct cgroup_subsys *ss);
+ void (*destroy_cgroup)(struct mem_cgroup *memcg);
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
struct res_counter *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
int *memory_pressure;
long *sysctl_mem;
+ unsigned long flags;
/*
* memcg field is used to find which memcg we belong directly
* Each memcg struct can hold more than one cg_proto, so container_of
@@ -898,6 +954,16 @@ struct cg_proto {
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
+
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
@@ -924,13 +990,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
#endif /* SOCK_REFCNT_DEBUG */
#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
-extern struct jump_label_key memcg_socket_limit_enabled;
+extern struct static_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
struct cg_proto *cg_proto)
{
return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
-#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
@@ -1098,9 +1164,9 @@ sk_sockets_allocated_read_positive(struct sock *sk)
struct proto *prot = sk->sk_prot;
if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
- return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);
+ return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
- return percpu_counter_sum_positive(prot->sockets_allocated);
+ return percpu_counter_read_positive(prot->sockets_allocated);
}
static inline int
@@ -1129,7 +1195,7 @@ proto_memory_pressure(struct proto *prot)
extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
#else
-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
int inc)
{
}
@@ -1216,24 +1282,24 @@ static inline int sk_mem_pages(int amt)
return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
}
-static inline int sk_has_account(struct sock *sk)
+static inline bool sk_has_account(struct sock *sk)
{
/* return true if protocol supports memory accounting */
return !!sk->sk_prot->memory_allocated;
}
-static inline int sk_wmem_schedule(struct sock *sk, int size)
+static inline bool sk_wmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
- return 1;
+ return true;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, SK_MEM_SEND);
}
-static inline int sk_rmem_schedule(struct sock *sk, int size)
+static inline bool sk_rmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
- return 1;
+ return true;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, SK_MEM_RECV);
}
@@ -1298,7 +1364,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
* Mark both the sk_lock and the sk_lock.slock as a
* per-address-family lock class.
*/
-#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
do { \
sk->sk_lock.owned = 0; \
init_waitqueue_head(&sk->sk_lock.wq); \
@@ -1306,7 +1372,7 @@ do { \
debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
sizeof((sk)->sk_lock)); \
lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
- (skey), (sname)); \
+ (skey), (sname)); \
lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
} while (0)
@@ -1366,13 +1432,13 @@ extern int sock_setsockopt(struct socket *sock, int level,
unsigned int optlen);
extern int sock_getsockopt(struct socket *sock, int level,
- int op, char __user *optval,
+ int op, char __user *optval,
int __user *optlen);
-extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
+extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
unsigned long size,
int noblock,
int *errcode);
-extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
+extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
unsigned long header_len,
unsigned long data_len,
int noblock,
@@ -1394,7 +1460,7 @@ static inline void sock_update_classid(struct sock *sk)
* Functions to fill in entries in struct proto_ops when a protocol
* does not implement a particular function.
*/
-extern int sock_no_bind(struct socket *,
+extern int sock_no_bind(struct socket *,
struct sockaddr *, int);
extern int sock_no_connect(struct socket *,
struct sockaddr *, int, int);
@@ -1423,7 +1489,7 @@ extern int sock_no_mmap(struct file *file,
struct vm_area_struct *vma);
extern ssize_t sock_no_sendpage(struct socket *sock,
struct page *page,
- int offset, size_t size,
+ int offset, size_t size,
int flags);
/*
@@ -1446,7 +1512,7 @@ extern void sk_common_release(struct sock *sk);
/*
* Default socket callbacks and setup code
*/
-
+
/* Initialise core socket variables */
extern void sock_init_data(struct socket *sock, struct sock *sk);
@@ -1646,7 +1712,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
-static inline int sk_can_gso(const struct sock *sk)
+static inline bool sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
@@ -1763,7 +1829,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk)
*
* Returns true if socket has write or read allocations
*/
-static inline int sk_has_allocations(const struct sock *sk)
+static inline bool sk_has_allocations(const struct sock *sk)
{
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
@@ -1802,9 +1868,7 @@ static inline int sk_has_allocations(const struct sock *sk)
*/
static inline bool wq_has_sleeper(struct socket_wq *wq)
{
-
- /*
- * We need to be sure we are in sync with the
+ /* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier is paired in the sock_poll_wait.
@@ -1824,24 +1888,23 @@ static inline bool wq_has_sleeper(struct socket_wq *wq)
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
{
- if (p && wait_address) {
+ if (!poll_does_not_wait(p) && wait_address) {
poll_wait(filp, wait_address, p);
- /*
- * We need to be sure we are in sync with the
+ /* We need to be sure we are in sync with the
* socket flags modification.
*
* This memory barrier is paired in the wq_has_sleeper.
- */
+ */
smp_mb();
}
}
/*
- * Queue a received datagram if it will fit. Stream and sequenced
+ * Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
* and play with them.
*
- * Inlined as it's very short and called for pretty much every
+ * Inlined as it's very short and called for pretty much every
* packet ever received.
*/
@@ -1867,10 +1930,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize);
}
-extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
unsigned long expires);
-extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
@@ -1879,7 +1942,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
/*
* Recover an error report and clear atomically
*/
-
+
static inline int sock_error(struct sock *sk)
{
int err;
@@ -1895,7 +1958,7 @@ static inline unsigned long sock_wspace(struct sock *sk)
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
- if (amt < 0)
+ if (amt < 0)
amt = 0;
}
return amt;
@@ -1939,7 +2002,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
-static inline int sock_writeable(const struct sock *sk)
+static inline bool sock_writeable(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
}
@@ -1949,12 +2012,12 @@ static inline gfp_t gfp_any(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
-static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
+static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}
-static inline long sock_sndtimeo(const struct sock *sk, int noblock)
+static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}
@@ -1977,7 +2040,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
-static __inline__ void
+static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
@@ -2018,7 +2081,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
(1UL << SOCK_RCVTSTAMP) | \
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
- (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
+ (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
(1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
if (sk->sk_flags & FLAGS_TS_OR_DROPS)
@@ -2047,7 +2110,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
* locked so that the sk_buff queue operation is ok.
*/
#ifdef CONFIG_NET_DMA
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
if (!copied_early)
@@ -2056,7 +2119,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
}
#else
-static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
@@ -2103,8 +2166,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag);
extern int sock_get_timestamp(struct sock *, struct timeval __user *);
extern int sock_get_timestampns(struct sock *, struct timespec __user *);
-/*
- * Enable debug/info messages
+/*
+ * Enable debug/info messages
*/
extern int net_msg_warn;
#define NETDEBUG(fmt, args...) \