summaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h56
1 files changed, 43 insertions, 13 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index bb972d254df..04bc0b30e9e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -55,6 +55,9 @@
#include <linux/uaccess.h>
#include <linux/memcontrol.h>
#include <linux/res_counter.h>
+#include <linux/static_key.h>
+#include <linux/aio.h>
+#include <linux/sched.h>
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
@@ -68,7 +71,7 @@ struct cgroup;
struct cgroup_subsys;
#ifdef CONFIG_NET
int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp);
#else
static inline
int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
@@ -76,7 +79,7 @@ int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
return 0;
}
static inline
-void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
{
}
#endif
@@ -226,6 +229,7 @@ struct cg_proto;
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_priority: %SO_PRIORITY setting
+ * @sk_cgrp_prioidx: socket group's priority map index
* @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family
* @sk_peer_pid: &struct pid for this socket's peer
@@ -355,6 +359,7 @@ struct sock {
struct page *sk_sndmsg_page;
struct sk_buff *sk_send_head;
__u32 sk_sndmsg_off;
+ __s32 sk_peek_off;
int sk_write_pending;
#ifdef CONFIG_SECURITY
void *sk_security;
@@ -371,6 +376,30 @@ struct sock {
void (*sk_destruct)(struct sock *sk);
};
+static inline int sk_peek_offset(struct sock *sk, int flags)
+{
+ if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
+ return sk->sk_peek_off;
+ else
+ return 0;
+}
+
+static inline void sk_peek_offset_bwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0) {
+ if (sk->sk_peek_off >= val)
+ sk->sk_peek_off -= val;
+ else
+ sk->sk_peek_off = 0;
+ }
+}
+
+static inline void sk_peek_offset_fwd(struct sock *sk, int val)
+{
+ if (sk->sk_peek_off >= 0)
+ sk->sk_peek_off += val;
+}
+
/*
* Hashed lists helper routines
*/
@@ -588,6 +617,10 @@ enum sock_flags {
SOCK_RXQ_OVFL,
SOCK_ZEROCOPY, /* buffers from userspace */
SOCK_WIFI_STATUS, /* push wifi status to userspace */
+ SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
+ * Will use last 4 bytes of packet sent from
+ * user-space instead.
+ */
};
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
@@ -869,8 +902,7 @@ struct proto {
*/
int (*init_cgroup)(struct cgroup *cgrp,
struct cgroup_subsys *ss);
- void (*destroy_cgroup)(struct cgroup *cgrp,
- struct cgroup_subsys *ss);
+ void (*destroy_cgroup)(struct cgroup *cgrp);
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};
@@ -921,14 +953,14 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
-extern struct jump_label_key memcg_socket_limit_enabled;
+#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
+extern struct static_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
struct cg_proto *cg_proto)
{
return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
-#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
@@ -1007,9 +1039,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
struct res_counter *fail;
int ret;
- ret = res_counter_charge(prot->memory_allocated,
- amt << PAGE_SHIFT, &fail);
-
+ ret = res_counter_charge_nofail(prot->memory_allocated,
+ amt << PAGE_SHIFT, &fail);
if (ret < 0)
*parent_status = OVER_LIMIT;
}
@@ -1053,12 +1084,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+sk_memory_allocated_sub(struct sock *sk, int amt)
{
struct proto *prot = sk->sk_prot;
- if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
- parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+ if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
memcg_memory_allocated_sub(sk->sk_cgrp, amt);
atomic_long_sub(amt, prot->memory_allocated);