summaryrefslogtreecommitdiffstats
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h95
1 files changed, 94 insertions, 1 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b8292d8cc9f..2e0ced1af3b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,6 +32,7 @@
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
+#include <net/flow_keys.h>
/* Don't change this without changing skb_csum_unnecessary! */
#define CHECKSUM_NONE 0
@@ -316,6 +317,8 @@ enum {
SKB_GSO_FCOE = 1 << 5,
SKB_GSO_GRE = 1 << 6,
+
+ SKB_GSO_UDP_TUNNEL = 1 << 7,
};
#if BITS_PER_LONG > 32
@@ -384,9 +387,11 @@ typedef unsigned char *sk_buff_data_t;
* @secmark: security marking
* @mark: Generic packet mark
* @dropcount: total number of sk_receive_queue overflows
+ * @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
+ * @inner_mac_header: Link layer header (encapsulation)
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
@@ -461,6 +466,7 @@ struct sk_buff {
__u32 rxhash;
+ __be16 vlan_proto;
__u16 vlan_tci;
#ifdef CONFIG_NET_SCHED
@@ -505,6 +511,7 @@ struct sk_buff {
sk_buff_data_t inner_transport_header;
sk_buff_data_t inner_network_header;
+ sk_buff_data_t inner_mac_header;
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
@@ -570,7 +577,40 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
skb->_skb_refdst = (unsigned long)dst;
}
-extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
+extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+ bool force);
+
+/**
+ * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
+ * @skb: buffer
+ * @dst: dst entry
+ *
+ * Sets skb dst, assuming a reference was not taken on dst.
+ * If dst entry is cached, we do not take reference and dst_release
+ * will be avoided by refdst_drop. If dst entry is not cached, we take
+ * reference, so that last dst_release can destroy the dst immediately.
+ */
+static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
+{
+ __skb_dst_set_noref(skb, dst, false);
+}
+
+/**
+ * skb_dst_set_noref_force - sets skb dst, without taking reference
+ * @skb: buffer
+ * @dst: dst entry
+ *
+ * Sets skb dst, assuming a reference was not taken on dst.
+ * No reference is taken and no dst_release will be called. While for
+ * cached dsts deferred reclaim is a basic feature, for entries that are
+ * not cached it is caller's job to guarantee that last dst_release for
+ * provided dst happens when nobody uses it, eg. after a RCU grace period.
+ */
+static inline void skb_dst_set_noref_force(struct sk_buff *skb,
+ struct dst_entry *dst)
+{
+ __skb_dst_set_noref(skb, dst, true);
+}
/**
* skb_dst_is_noref - Test if skb dst isn't refcounted
@@ -611,6 +651,12 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
+extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+static inline struct sk_buff *alloc_skb_head(gfp_t priority)
+{
+ return __alloc_skb_head(priority, -1);
+}
+
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
@@ -1471,6 +1517,7 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
static inline void skb_reset_inner_headers(struct sk_buff *skb)
{
+ skb->inner_mac_header = skb->mac_header;
skb->inner_network_header = skb->network_header;
skb->inner_transport_header = skb->transport_header;
}
@@ -1516,6 +1563,22 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->inner_mac_header;
+}
+
+static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
+{
+ skb->inner_mac_header = skb->data - skb->head;
+}
+
+static inline void skb_set_inner_mac_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb_reset_inner_mac_header(skb);
+ skb->inner_mac_header += offset;
+}
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
return skb->transport_header != ~0U;
@@ -1609,6 +1672,21 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header = skb->data + offset;
}
+static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
+{
+ return skb->inner_mac_header;
+}
+
+static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
+{
+ skb->inner_mac_header = skb->data;
+}
+
+static inline void skb_set_inner_mac_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb->inner_mac_header = skb->data + offset;
+}
static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
{
return skb->transport_header != NULL;
@@ -1666,6 +1744,19 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
}
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline void skb_probe_transport_header(struct sk_buff *skb,
+ const int offset_hint)
+{
+ struct flow_keys keys;
+
+ if (skb_transport_header_was_set(skb))
+ return;
+ else if (skb_flow_dissect(skb, &keys))
+ skb_set_transport_header(skb, keys.thoff);
+ else
+ skb_set_transport_header(skb, offset_hint);
+}
+
static inline void skb_mac_header_rebuild(struct sk_buff *skb)
{
if (skb_mac_header_was_set(skb)) {
@@ -2811,6 +2902,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+u32 __skb_get_poff(const struct sk_buff *skb);
+
/**
* skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check