summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/skbuff.c55
3 files changed, 11 insertions, 56 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index cfc60019cf9..841e3f32cab 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1331,6 +1331,8 @@ void neigh_parms_destroy(struct neigh_parms *parms)
kfree(parms);
}
+static struct lock_class_key neigh_table_proxy_queue_class;
+
void neigh_table_init_no_netlink(struct neigh_table *tbl)
{
unsigned long now = jiffies;
@@ -1379,7 +1381,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
init_timer(&tbl->proxy_timer);
tbl->proxy_timer.data = (unsigned long)tbl;
tbl->proxy_timer.function = neigh_proxy_process;
- skb_queue_head_init(&tbl->proxy_queue);
+ skb_queue_head_init_class(&tbl->proxy_queue,
+ &neigh_table_proxy_queue_class);
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index da1019451cc..4581ece48bb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -471,6 +471,13 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb->len < len || len < iph->ihl*4)
goto out;
+ /*
+ * Our transport medium may have padded the buffer out.
+ * Now We trim to the true length of the frame.
+ */
+ if (pskb_trim_rcsum(skb, len))
+ goto out;
+
if (iph->protocol != IPPROTO_UDP)
goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 87573ae35b0..336958fbbcb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -197,61 +197,6 @@ nodata:
}
/**
- * alloc_skb_from_cache - allocate a network buffer
- * @cp: kmem_cache from which to allocate the data area
- * (object size must be big enough for @size bytes + skb overheads)
- * @size: size to allocate
- * @gfp_mask: allocation mask
- *
- * Allocate a new &sk_buff. The returned buffer has no headroom and
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
- *
- * Buffers may only be allocated from interrupts using a @gfp_mask of
- * %GFP_ATOMIC.
- */
-struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
- unsigned int size,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u8 *data;
-
- /* Get the HEAD */
- skb = kmem_cache_alloc(skbuff_head_cache,
- gfp_mask & ~__GFP_DMA);
- if (!skb)
- goto out;
-
- /* Get the DATA. */
- size = SKB_DATA_ALIGN(size);
- data = kmem_cache_alloc(cp, gfp_mask);
- if (!data)
- goto nodata;
-
- memset(skb, 0, offsetof(struct sk_buff, truesize));
- skb->truesize = size + sizeof(struct sk_buff);
- atomic_set(&skb->users, 1);
- skb->head = data;
- skb->data = data;
- skb->tail = data;
- skb->end = data + size;
-
- atomic_set(&(skb_shinfo(skb)->dataref), 1);
- skb_shinfo(skb)->nr_frags = 0;
- skb_shinfo(skb)->gso_size = 0;
- skb_shinfo(skb)->gso_segs = 0;
- skb_shinfo(skb)->gso_type = 0;
- skb_shinfo(skb)->frag_list = NULL;
-out:
- return skb;
-nodata:
- kmem_cache_free(skbuff_head_cache, skb);
- skb = NULL;
- goto out;
-}
-
-/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
* @length: length to allocate