summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-10-14 00:37:52 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-15 12:26:24 -0700
commite0053ec07e32ec94535c47b10af3377255f00836 (patch)
tree05d74698e2be2ac052fca3132e830c524887a5fd /net/core/skbuff.c
parentdec18810c52ed564c1aedc7f93dbf278b7fdf6d5 (diff)
[SKBUFF]: Add skb_morph
This patch creates a new function skb_morph that's just like skb_clone except that it lets user provide the spare skb that will be overwritten by the one that's to be cloned. This will be used by IP fragment reassembly so that we get back the same skb that went in last (rather than the head skb that we get now which requires us to carry around double pointers all over the place). Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c83
1 files changed, 53 insertions, 30 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 758bbef506d..c3aa68ceed6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -400,37 +400,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_copy_secmark(new, old);
}
-/**
- * skb_clone - duplicate an sk_buff
- * @skb: buffer to clone
- * @gfp_mask: allocation priority
- *
- * Duplicate an &sk_buff. The new one is not owned by a socket. Both
- * copies share the same packet data but not structure. The new
- * buffer has a reference count of 1. If the allocation fails the
- * function returns %NULL otherwise the new buffer is returned.
- *
- * If this function is called from an interrupt gfp_mask() must be
- * %GFP_ATOMIC.
- */
-
-struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
+static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
{
- struct sk_buff *n;
-
- n = skb + 1;
- if (skb->fclone == SKB_FCLONE_ORIG &&
- n->fclone == SKB_FCLONE_UNAVAILABLE) {
- atomic_t *fclone_ref = (atomic_t *) (n + 1);
- n->fclone = SKB_FCLONE_CLONE;
- atomic_inc(fclone_ref);
- } else {
- n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
- if (!n)
- return NULL;
- n->fclone = SKB_FCLONE_UNAVAILABLE;
- }
-
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
@@ -462,6 +433,58 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
skb->cloned = 1;
return n;
+#undef C
+}
+
+/**
+ * skb_morph - morph one skb into another
+ * @dst: the skb to receive the contents
+ * @src: the skb to supply the contents
+ *
+ * This is identical to skb_clone except that the target skb is
+ * supplied by the user.
+ *
+ * The target skb is returned upon exit.
+ */
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
+{
+ skb_release_data(dst);
+ return __skb_clone(dst, src);
+}
+EXPORT_SYMBOL_GPL(skb_morph);
+
+/**
+ * skb_clone - duplicate an sk_buff
+ * @skb: buffer to clone
+ * @gfp_mask: allocation priority
+ *
+ * Duplicate an &sk_buff. The new one is not owned by a socket. Both
+ * copies share the same packet data but not structure. The new
+ * buffer has a reference count of 1. If the allocation fails the
+ * function returns %NULL otherwise the new buffer is returned.
+ *
+ * If this function is called from an interrupt gfp_mask() must be
+ * %GFP_ATOMIC.
+ */
+
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ struct sk_buff *n;
+
+ n = skb + 1;
+ if (skb->fclone == SKB_FCLONE_ORIG &&
+ n->fclone == SKB_FCLONE_UNAVAILABLE) {
+ atomic_t *fclone_ref = (atomic_t *) (n + 1);
+ n->fclone = SKB_FCLONE_CLONE;
+ atomic_inc(fclone_ref);
+ } else {
+ n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+ if (!n)
+ return NULL;
+ n->fclone = SKB_FCLONE_UNAVAILABLE;
+ }
+
+ return __skb_clone(n, skb);
}
static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)