summaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h62
1 files changed, 48 insertions, 14 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e6ba898de61..16c9c091555 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -122,8 +122,14 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list */
+/* To allow 64K frame to be packed as single skb without frag_list. Since
+ * GRO uses frags we allocate at least 16 regardless of page size.
+ */
+#if (65536/PAGE_SIZE + 2) < 16
+#define MAX_SKB_FRAGS 16UL
+#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+#endif
typedef struct skb_frag_struct skb_frag_t;
@@ -255,6 +261,11 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
+#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
+ defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
+#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
+#endif
+
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -362,6 +373,8 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb);
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack *nfct;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
struct sk_buff *nfct_reasm;
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -378,17 +391,15 @@ struct sk_buff {
__u32 rxhash;
+ __u16 queue_mapping;
kmemcheck_bitfield_begin(flags2);
- __u16 queue_mapping:16;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
- __u8 ndisc_nodetype:2,
- deliver_no_wcard:1;
-#else
- __u8 deliver_no_wcard:1;
+ __u8 ndisc_nodetype:2;
#endif
+ __u8 ooo_okay:1;
kmemcheck_bitfield_end(flags2);
- /* 0/14 bit hole */
+ /* 0/13 bit hole */
#ifdef CONFIG_NET_DMA
dma_cookie_t dma_cookie;
@@ -463,7 +474,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
/**
- * skb_dst_is_noref - Test if skb dst isnt refcounted
+ * skb_dst_is_noref - Test if skb dst isn't refcounted
* @skb: buffer
*/
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
@@ -1354,6 +1365,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
}
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+
static inline int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
@@ -1766,7 +1782,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
- prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
+ skb != (struct sk_buff *)(queue); \
skb = skb->next)
#define skb_queue_walk_safe(queue, skb, tmp) \
@@ -1775,7 +1791,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
skb = tmp, tmp = skb->next)
#define skb_queue_walk_from(queue, skb) \
- for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
+ for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
#define skb_queue_walk_from_safe(queue, skb, tmp) \
@@ -1785,9 +1801,18 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
#define skb_queue_reverse_walk(queue, skb) \
for (skb = (queue)->prev; \
- prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
+ skb != (struct sk_buff *)(queue); \
skb = skb->prev)
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
static inline bool skb_has_frag_list(const struct sk_buff *skb)
{
@@ -1855,7 +1880,7 @@ extern void skb_split(struct sk_buff *skb,
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
@@ -2051,6 +2076,8 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
if (nfct)
atomic_inc(&nfct->use);
}
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
{
if (skb)
@@ -2079,6 +2106,8 @@ static inline void nf_reset(struct sk_buff *skb)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = NULL;
#endif
@@ -2095,6 +2124,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
dst->nfct = src->nfct;
nf_conntrack_get(src->nfct);
dst->nfctinfo = src->nfctinfo;
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
#endif
@@ -2108,6 +2139,8 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(dst->nfct);
+#endif
+#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_put_reasm(dst->nfct_reasm);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -2164,8 +2197,9 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
return skb->queue_mapping != 0;
}
-extern u16 skb_tx_hash(const struct net_device *dev,
- const struct sk_buff *skb);
+extern u16 __skb_tx_hash(const struct net_device *dev,
+ const struct sk_buff *skb,
+ unsigned int num_tx_queues);
#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)