summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-07-08 19:36:05 +0000
committerDavid S. Miller <davem@davemloft.net>2009-07-11 20:26:19 -0700
commite912b1142be8f1e2c71c71001dc992c6e5eb2ec1 (patch)
tree9812c7d3e5431852d25bc15860830413ff15dc51 /net
parente594e96e8a14101a6decabf6746bd5186287debc (diff)
net: sk_prot_alloc() should not blindly overwrite memory
Some sockets use SLAB_DESTROY_BY_RCU, and our RCU code correctness depends on sk->sk_nulls_node.next being always valid. A NULL value is not allowed as it might fault a lockless reader. Current sk_prot_alloc() implementation doesnt respect this hypothesis, calling kmem_cache_alloc() with __GFP_ZERO. Just call memset() around the forbidden field. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/sock.c19
1 files changed, 17 insertions, 2 deletions
diff --git a/net/core/sock.c b/net/core/sock.c
index 6354863b1c6..ba5d2116aea 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -939,8 +939,23 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
struct kmem_cache *slab;
slab = prot->slab;
- if (slab != NULL)
- sk = kmem_cache_alloc(slab, priority);
+ if (slab != NULL) {
+ sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
+ if (!sk)
+ return sk;
+ if (priority & __GFP_ZERO) {
+ /*
+ * caches using SLAB_DESTROY_BY_RCU should let
+ * sk_node.next un-modified. Special care is taken
+ * when initializing object to zero.
+ */
+ if (offsetof(struct sock, sk_node.next) != 0)
+ memset(sk, 0, offsetof(struct sock, sk_node.next));
+ memset(&sk->sk_node.pprev, 0,
+ prot->obj_size - offsetof(struct sock,
+ sk_node.pprev));
+ }
+ }
else
sk = kmalloc(prot->obj_size, priority);