diff options
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 282 |
1 files changed, 137 insertions, 145 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 1d37a8086be..e3a18ff0c38 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair; */ struct sock_common { /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned - * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH() + * address on 64bit arches : cf INET_MATCH() */ union { __addrpair skc_addrpair; @@ -191,6 +191,12 @@ struct sock_common { #ifdef CONFIG_NET_NS struct net *skc_net; #endif + +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr skc_v6_daddr; + struct in6_addr skc_v6_rcv_saddr; +#endif + /* * fields between dontcopy_begin/dontcopy_end * are not copied in sock_copy() @@ -218,7 +224,7 @@ struct cg_proto; * @sk_lock: synchronizer * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head - * @sk_rx_dst: receive input route used by early tcp demux + * @sk_rx_dst: receive input route used by early demux * @sk_dst_cache: destination cache * @sk_dst_lock: destination cache lock * @sk_policy: flow policy @@ -233,6 +239,7 @@ struct cg_proto; * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) + * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings @@ -299,6 +306,12 @@ struct sock { #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin #define sk_dontcopy_end __sk_common.skc_dontcopy_end #define sk_hash __sk_common.skc_hash +#define sk_portpair __sk_common.skc_portpair +#define sk_num __sk_common.skc_num +#define sk_dport __sk_common.skc_dport +#define sk_addrpair __sk_common.skc_addrpair +#define sk_daddr __sk_common.skc_daddr +#define sk_rcv_saddr __sk_common.skc_rcv_saddr #define sk_family __sk_common.skc_family #define sk_state __sk_common.skc_state #define sk_reuse __sk_common.skc_reuse @@ -307,6 +320,9 @@ struct sock { #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot #define sk_net __sk_common.skc_net +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + socket_lock_t sk_lock; struct sk_buff_head sk_receive_queue; /* @@ -363,6 +379,7 @@ struct sock { int sk_wmem_queued; gfp_t sk_allocation; u32 sk_pacing_rate; /* bytes per second */ + u32 sk_max_pacing_rate; netdev_features_t sk_route_caps; netdev_features_t sk_route_nocaps; int sk_gso_type; @@ -751,7 +768,7 @@ static inline int sk_stream_wspace(const struct sock *sk) return sk->sk_sndbuf - sk->sk_wmem_queued; } -extern void sk_stream_write_space(struct sock *sk); +void sk_stream_write_space(struct sock *sk); /* OOB backlog add */ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) @@ -793,7 +810,7 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s return 0; } -extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); +int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) { @@ -858,15 +875,15 @@ static inline void sock_rps_reset_rxhash(struct sock *sk) __rc; \ }) -extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p); -extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p); -extern void sk_stream_wait_close(struct sock *sk, long timeo_p); -extern int sk_stream_error(struct sock *sk, int flags, int err); -extern void sk_stream_kill_queues(struct sock *sk); -extern void sk_set_memalloc(struct sock *sk); -extern void sk_clear_memalloc(struct sock *sk); +int sk_stream_wait_connect(struct sock *sk, long *timeo_p); +int sk_stream_wait_memory(struct sock *sk, long *timeo_p); +void sk_stream_wait_close(struct sock *sk, long timeo_p); +int sk_stream_error(struct sock *sk, int flags, int err); +void sk_stream_kill_queues(struct sock *sk); +void sk_set_memalloc(struct sock *sk); +void sk_clear_memalloc(struct sock *sk); -extern int sk_wait_data(struct sock *sk, long *timeo); +int sk_wait_data(struct sock *sk, long *timeo); struct request_sock_ops; struct timewait_sock_ops; @@ -1019,10 +1036,10 @@ enum cg_proto_flags { struct cg_proto { void (*enter_memory_pressure)(struct sock *sk); - struct res_counter *memory_allocated; /* Current allocated memory. */ - struct percpu_counter *sockets_allocated; /* Current number of sockets. */ - int *memory_pressure; - long *sysctl_mem; + struct res_counter memory_allocated; /* Current allocated memory. */ + struct percpu_counter sockets_allocated; /* Current number of sockets. */ + int memory_pressure; + long sysctl_mem[3]; unsigned long flags; /* * memcg field is used to find which memcg we belong directly @@ -1036,8 +1053,8 @@ struct cg_proto { struct mem_cgroup *memcg; }; -extern int proto_register(struct proto *prot, int alloc_slab); -extern void proto_unregister(struct proto *prot); +int proto_register(struct proto *prot, int alloc_slab); +void proto_unregister(struct proto *prot); static inline bool memcg_proto_active(struct cg_proto *cg_proto) { @@ -1118,7 +1135,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) return false; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!*sk->sk_cgrp->memory_pressure; + return !!sk->sk_cgrp->memory_pressure; return !!*sk->sk_prot->memory_pressure; } @@ -1138,8 +1155,8 @@ static inline void sk_leave_memory_pressure(struct sock *sk) struct proto *prot = sk->sk_prot; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - if (*cg_proto->memory_pressure) - *cg_proto->memory_pressure = 0; + if (cg_proto->memory_pressure) + cg_proto->memory_pressure = 0; } } @@ -1175,7 +1192,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, struct res_counter *fail; int ret; - ret = res_counter_charge_nofail(prot->memory_allocated, + ret = res_counter_charge_nofail(&prot->memory_allocated, amt << PAGE_SHIFT, &fail); if (ret < 0) *parent_status = OVER_LIMIT; @@ -1184,13 +1201,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT); + res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); } static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) { u64 ret; - ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE); + ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); return ret >> PAGE_SHIFT; } @@ -1238,7 +1255,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk) struct cg_proto *cg_proto = sk->sk_cgrp; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_dec(cg_proto->sockets_allocated); + percpu_counter_dec(&cg_proto->sockets_allocated); } percpu_counter_dec(prot->sockets_allocated); @@ -1252,7 +1269,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) struct cg_proto *cg_proto = sk->sk_cgrp; for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) - percpu_counter_inc(cg_proto->sockets_allocated); + percpu_counter_inc(&cg_proto->sockets_allocated); } percpu_counter_inc(prot->sockets_allocated); @@ -1264,7 +1281,7 @@ sk_sockets_allocated_read_positive(struct sock *sk) struct proto *prot = sk->sk_prot; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated); + return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); return percpu_counter_read_positive(prot->sockets_allocated); } @@ -1292,8 +1309,8 @@ proto_memory_pressure(struct proto *prot) #ifdef CONFIG_PROC_FS /* Called with local bh disabled */ -extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); -extern int sock_prot_inuse_get(struct net *net, struct proto *proto); +void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); +int sock_prot_inuse_get(struct net *net, struct proto *proto); #else static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc) @@ -1369,8 +1386,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket) /* * Functions for memory accounting */ -extern int __sk_mem_schedule(struct sock *sk, int size, int kind); -extern void __sk_mem_reclaim(struct sock *sk); +int __sk_mem_schedule(struct sock *sk, int size, int kind); +void __sk_mem_reclaim(struct sock *sk); #define SK_MEM_QUANTUM ((int)PAGE_SIZE) #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) @@ -1478,14 +1495,14 @@ do { \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) -extern void lock_sock_nested(struct sock *sk, int subclass); +void lock_sock_nested(struct sock *sk, int subclass); static inline void lock_sock(struct sock *sk) { lock_sock_nested(sk, 0); } -extern void release_sock(struct sock *sk); +void release_sock(struct sock *sk); /* BH context may only use the following locking interface. */ #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) @@ -1494,7 +1511,7 @@ extern void release_sock(struct sock *sk); SINGLE_DEPTH_NESTING) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern bool lock_sock_fast(struct sock *sk); +bool lock_sock_fast(struct sock *sk); /** * unlock_sock_fast - complement of lock_sock_fast * @sk: socket @@ -1512,108 +1529,84 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) } -extern struct sock *sk_alloc(struct net *net, int family, - gfp_t priority, - struct proto *prot); -extern void sk_free(struct sock *sk); -extern void sk_release_kernel(struct sock *sk); -extern struct sock *sk_clone_lock(const struct sock *sk, - const gfp_t priority); - -extern struct sk_buff *sock_wmalloc(struct sock *sk, - unsigned long size, int force, - gfp_t priority); -extern struct sk_buff *sock_rmalloc(struct sock *sk, - unsigned long size, int force, - gfp_t priority); -extern void sock_wfree(struct sk_buff *skb); -extern void skb_orphan_partial(struct sk_buff *skb); -extern void sock_rfree(struct sk_buff *skb); -extern void sock_edemux(struct sk_buff *skb); - -extern int sock_setsockopt(struct socket *sock, int level, - int op, char __user *optval, - unsigned int optlen); - -extern int sock_getsockopt(struct socket *sock, int level, - int op, char __user *optval, - int __user *optlen); -extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, - unsigned long size, - int noblock, - int *errcode); -extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, - unsigned long header_len, - unsigned long data_len, - int noblock, - int *errcode, - int max_page_order); -extern void *sock_kmalloc(struct sock *sk, int size, - gfp_t priority); -extern void sock_kfree_s(struct sock *sk, void *mem, int size); -extern void sk_send_sigurg(struct sock *sk); +struct sock *sk_alloc(struct net *net, int family, gfp_t priority, + struct proto *prot); +void sk_free(struct sock *sk); +void sk_release_kernel(struct sock *sk); +struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); + +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority); +struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, + gfp_t priority); +void sock_wfree(struct sk_buff *skb); +void skb_orphan_partial(struct sk_buff *skb); +void sock_rfree(struct sk_buff *skb); +void sock_edemux(struct sk_buff *skb); + +int sock_setsockopt(struct socket *sock, int level, int op, + char __user *optval, unsigned int optlen); + +int sock_getsockopt(struct socket *sock, int level, int op, + char __user *optval, int __user *optlen); +struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, + int noblock, int *errcode); +struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, + unsigned long data_len, int noblock, + int *errcode, int max_page_order); +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); +void sock_kfree_s(struct sock *sk, void *mem, int size); +void sk_send_sigurg(struct sock *sk); /* * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */ -extern int sock_no_bind(struct socket *, - struct sockaddr *, int); -extern int sock_no_connect(struct socket *, - struct sockaddr *, int, int); -extern int sock_no_socketpair(struct socket *, - struct socket *); -extern int sock_no_accept(struct socket *, - struct socket *, int); -extern int sock_no_getname(struct socket *, - struct sockaddr *, int *, int); -extern unsigned int sock_no_poll(struct file *, struct socket *, - struct poll_table_struct *); -extern int sock_no_ioctl(struct socket *, unsigned int, - unsigned long); -extern int sock_no_listen(struct socket *, int); -extern int sock_no_shutdown(struct socket *, int); -extern int sock_no_getsockopt(struct socket *, int , int, - char __user *, int __user *); -extern int sock_no_setsockopt(struct socket *, int, int, - char __user *, unsigned int); -extern int sock_no_sendmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t); -extern int sock_no_recvmsg(struct kiocb *, struct socket *, - struct msghdr *, size_t, int); -extern int sock_no_mmap(struct file *file, - struct socket *sock, - struct vm_area_struct *vma); -extern ssize_t sock_no_sendpage(struct socket *sock, - struct page *page, - int offset, size_t size, - int flags); +int sock_no_bind(struct socket *, struct sockaddr *, int); +int sock_no_connect(struct socket *, struct sockaddr *, int, int); +int sock_no_socketpair(struct socket *, struct socket *); +int sock_no_accept(struct socket *, struct socket *, int); +int sock_no_getname(struct socket *, struct sockaddr *, int *, int); +unsigned int sock_no_poll(struct file *, struct socket *, + struct poll_table_struct *); +int sock_no_ioctl(struct socket *, unsigned int, unsigned long); +int sock_no_listen(struct socket *, int); +int sock_no_shutdown(struct socket *, int); +int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); +int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); +int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t); +int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t, + int); +int sock_no_mmap(struct file *file, struct socket *sock, + struct vm_area_struct *vma); +ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, + size_t size, int flags); /* * Functions to fill in entries in struct proto_ops when a protocol * uses the inet style. */ -extern int sock_common_getsockopt(struct socket *sock, int level, int optname, +int sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, +int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags); -extern int sock_common_setsockopt(struct socket *sock, int level, int optname, +int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); -extern int compat_sock_common_getsockopt(struct socket *sock, int level, +int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -extern int compat_sock_common_setsockopt(struct socket *sock, int level, +int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); -extern void sk_common_release(struct sock *sk); +void sk_common_release(struct sock *sk); /* * Default socket callbacks and setup code */ /* Initialise core socket variables */ -extern void sock_init_data(struct socket *sock, struct sock *sk); +void sock_init_data(struct socket *sock, struct sock *sk); -extern void sk_filter_release_rcu(struct rcu_head *rcu); +void sk_filter_release_rcu(struct rcu_head *rcu); /** * sk_filter_release - release a socket filter @@ -1630,16 +1623,14 @@ static inline void sk_filter_release(struct sk_filter *fp) static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); + atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc); sk_filter_release(fp); } static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp) { atomic_inc(&fp->refcnt); - atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc); + atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc); } /* @@ -1673,9 +1664,12 @@ static inline void sock_put(struct sock *sk) if (atomic_dec_and_test(&sk->sk_refcnt)) sk_free(sk); } +/* Generic version of sock_put(), dealing with all sockets + * (TCP_TIMEWAIT, ESTABLISHED...) + */ +void sock_gen_put(struct sock *sk); -extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb, - const int nested); +int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) { @@ -1729,8 +1723,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) write_unlock_bh(&sk->sk_callback_lock); } -extern kuid_t sock_i_uid(struct sock *sk); -extern unsigned long sock_i_ino(struct sock *sk); +kuid_t sock_i_uid(struct sock *sk); +unsigned long sock_i_ino(struct sock *sk); static inline struct dst_entry * __sk_dst_get(struct sock *sk) @@ -1752,8 +1746,6 @@ sk_dst_get(struct sock *sk) return dst; } -extern void sk_reset_txq(struct sock *sk); - static inline void dst_negative_advice(struct sock *sk) { struct dst_entry *ndst, *dst = __sk_dst_get(sk); @@ -1763,7 +1755,7 @@ static inline void dst_negative_advice(struct sock *sk) if (ndst != dst) { rcu_assign_pointer(sk->sk_dst_cache, ndst); - sk_reset_txq(sk); + sk_tx_queue_clear(sk); } } } @@ -1805,16 +1797,16 @@ sk_dst_reset(struct sock *sk) spin_unlock(&sk->sk_dst_lock); } -extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); +struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); -extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); +struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); static inline bool sk_can_gso(const struct sock *sk) { return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } -extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst); +void sk_setup_caps(struct sock *sk, struct dst_entry *dst); static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) { @@ -2027,14 +2019,14 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, - unsigned long expires); +void sk_reset_timer(struct sock *sk, struct timer_list *timer, + unsigned long expires); -extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); +void sk_stop_timer(struct sock *sk, struct timer_list *timer); -extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); -extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); +int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); /* * Recover an error report and clear atomically @@ -2102,7 +2094,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk) return &sk->sk_frag; } -extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); +bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); /* * Default write policy as shown to user space via poll/select/SIGIO @@ -2140,10 +2132,10 @@ static inline int sock_intr_errno(long timeo) return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; } -extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb); -extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb); +void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); +void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) @@ -2176,8 +2168,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) __sock_recv_wifi_status(msg, sk, skb); } -extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb); +void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb); static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) @@ -2202,7 +2194,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, * * Currently only depends on SOCK_TIMESTAMPING* flags. */ -extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); +void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); /** * sk_eat_skb - Release a skb if it is no longer needed @@ -2266,11 +2258,11 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb) return NULL; } -extern void sock_enable_timestamp(struct sock *sk, int flag); -extern int sock_get_timestamp(struct sock *, struct timeval __user *); -extern int sock_get_timestampns(struct sock *, struct timespec __user *); -extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, - int level, int type); +void sock_enable_timestamp(struct sock *sk, int flag); +int sock_get_timestamp(struct sock *, struct timeval __user *); +int sock_get_timestampns(struct sock *, struct timespec __user *); +int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, + int type); /* * Enable debug/info messages |