summaryrefslogtreecommitdiffstats
path: root/include/net/netfilter
diff options
context:
space:
mode:
authorEric Leblond <eric@regit.org>2013-12-06 00:24:12 +0100
committerPablo Neira Ayuso <pablo@netfilter.org>2013-12-07 23:20:45 +0100
commit97a2d41c47a2246c3387a937c62126c9faefe875 (patch)
tree0c295ac10edb37c0e441666a84b6f3ff37fc077e /include/net/netfilter
parente569bdab35fd0d31cecb6b072e95af1834991f9d (diff)
netfilter: xt_NFQUEUE: separate reusable code
This patch prepares the addition of nft_queue module by moving reusable code into a header file. This patch also converts NFQUEUE to use prandom_u32 to initialize the random jhash seed as suggested by Florian Westphal. Signed-off-by: Eric Leblond <eric@regit.org> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'include/net/netfilter')
-rw-r--r--include/net/netfilter/nf_queue.h62
1 files changed, 62 insertions, 0 deletions
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index c1d5b3e34a2..84a53d78030 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -1,6 +1,10 @@
#ifndef _NF_QUEUE_H
#define _NF_QUEUE_H
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/jhash.h>
+
/* Each queued (to userspace) skbuff has one of these. */
struct nf_queue_entry {
struct list_head list;
@@ -33,4 +37,62 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
+static inline void init_hashrandom(u32 *jhash_initval)
+{
+ while (*jhash_initval == 0)
+ *jhash_initval = prandom_u32();
+}
+
+static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+
+ /* packets in either direction go into same queue */
+ if ((__force u32)iph->saddr < (__force u32)iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, jhash_initval);
+
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, jhash_initval);
+}
+
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ u32 a, b, c;
+
+ if ((__force u32)ip6h->saddr.s6_addr32[3] <
+ (__force u32)ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
+
+ if ((__force u32)ip6h->saddr.s6_addr32[1] <
+ (__force u32)ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
+
+ return jhash_3words(a, b, c, jhash_initval);
+}
+#endif
+
+static inline u32
+nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
+ u32 jhash_initval)
+{
+ if (family == NFPROTO_IPV4)
+ queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
+ else if (family == NFPROTO_IPV6)
+ queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
+#endif
+
+ return queue;
+}
+
#endif /* _NF_QUEUE_H */