summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJörn Engel <joern@wohnheim.fh-wedel.de>2006-03-20 21:28:35 -0800
committerDavid S. Miller <davem@davemloft.net>2006-03-20 21:28:35 -0800
commit231d06ae826664b83369166449144304859a62fa (patch)
tree1c761b91405573f4e787454b454ead8354c3ba23
parent2e1f47c74c26a591fc490eb339f1b3167361158b (diff)
[NET]: Uninline kfree_skb and allow NULL argument
o Uninline kfree_skb, which saves some 15k of object code on my notebook. o Allow kfree_skb to be called with a NULL argument. Subsequent patches can remove conditional from drivers and further reduce source and object size. Signed-off-by: Jörn Engel <joern@wohnheim.fh-wedel.de> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--net/core/skbuff.c19
2 files changed, 20 insertions, 16 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 1a2611030d3..75c963103b9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -304,6 +304,7 @@ struct sk_buff {
#include <asm/system.h>
+extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone);
@@ -404,22 +405,6 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
*/
/**
- * kfree_skb - free an sk_buff
- * @skb: buffer to free
- *
- * Drop a reference to the buffer and free it if the usage count has
- * hit zero.
- */
-static inline void kfree_skb(struct sk_buff *skb)
-{
- if (likely(atomic_read(&skb->users) == 1))
- smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
- return;
- __kfree_skb(skb);
-}
-
-/**
* skb_cloned - is the buffer a clone
* @skb: buffer to check
*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2144952d1c6..01abf1e8990 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -356,6 +356,24 @@ void __kfree_skb(struct sk_buff *skb)
}
/**
+ * kfree_skb - free an sk_buff
+ * @skb: buffer to free
+ *
+ * Drop a reference to the buffer and free it if the usage count has
+ * hit zero.
+ */
+void kfree_skb(struct sk_buff *skb)
+{
+ if (unlikely(!skb))
+ return;
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+ __kfree_skb(skb);
+}
+
+/**
* skb_clone - duplicate an sk_buff
* @skb: buffer to clone
* @gfp_mask: allocation priority
@@ -1799,6 +1817,7 @@ void __init skb_init(void)
EXPORT_SYMBOL(___pskb_trim);
EXPORT_SYMBOL(__kfree_skb);
+EXPORT_SYMBOL(kfree_skb);
EXPORT_SYMBOL(__pskb_pull_tail);
EXPORT_SYMBOL(__alloc_skb);
EXPORT_SYMBOL(pskb_copy);