summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/devinet.c22
-rw-r--r--net/ipv4/esp4.c17
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/fib_trie.c72
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c7
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ipmr.c6
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c43
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c16
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c20
-rw-r--r--net/ipv4/netfilter/Kconfig55
-rw-r--r--net/ipv4/netfilter/Makefile12
-rw-r--r--net/ipv4/netfilter/arp_tables.c14
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c51
-rw-r--r--net/ipv4/netfilter/ip_conntrack_helper_pptp.c806
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c4
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c52
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_gre.c328
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_sctp.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c30
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c6
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c37
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c4
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c401
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_gre.c214
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c25
-rw-r--r--net/ipv4/netfilter/ip_queue.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c17
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c223
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c34
46 files changed, 2300 insertions, 316 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8bf312bdea1..b425748f02d 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -241,7 +241,7 @@ static int arp_constructor(struct neighbour *neigh)
neigh->type = inet_addr_type(addr);
rcu_read_lock();
- in_dev = rcu_dereference(__in_dev_get(dev));
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev == NULL) {
rcu_read_unlock();
return -EINVAL;
@@ -697,12 +697,6 @@ void arp_send(int type, int ptype, u32 dest_ip,
arp_xmit(skb);
}
-static void parp_redo(struct sk_buff *skb)
-{
- nf_reset(skb);
- arp_rcv(skb, skb->dev, NULL, skb->dev);
-}
-
/*
* Process an arp request.
*/
@@ -922,6 +916,11 @@ out:
return 0;
}
+static void parp_redo(struct sk_buff *skb)
+{
+ arp_process(skb);
+}
+
/*
* Receive an arp request from the device layer.
@@ -990,8 +989,8 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev)
ipv4_devconf.proxy_arp = 1;
return 0;
}
- if (__in_dev_get(dev)) {
- __in_dev_get(dev)->cnf.proxy_arp = 1;
+ if (__in_dev_get_rtnl(dev)) {
+ __in_dev_get_rtnl(dev)->cnf.proxy_arp = 1;
return 0;
}
return -ENXIO;
@@ -1096,8 +1095,8 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev)
ipv4_devconf.proxy_arp = 0;
return 0;
}
- if (__in_dev_get(dev)) {
- __in_dev_get(dev)->cnf.proxy_arp = 0;
+ if (__in_dev_get_rtnl(dev)) {
+ __in_dev_get_rtnl(dev)->cnf.proxy_arp = 0;
return 0;
}
return -ENXIO;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ba2895ae815..74f2207e131 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -351,7 +351,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa)
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
ASSERT_RTNL();
@@ -449,7 +449,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
goto out;
rc = -ENOBUFS;
- if ((in_dev = __in_dev_get(dev)) == NULL) {
+ if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
in_dev = inetdev_init(dev);
if (!in_dev)
goto out;
@@ -584,7 +584,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
if (colon)
*colon = ':';
- if ((in_dev = __in_dev_get(dev)) != NULL) {
+ if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
if (tryaddrmatch) {
/* Matthias Andree */
/* compare label and address (4.4BSD style) */
@@ -748,7 +748,7 @@ rarok:
static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
{
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
struct in_ifaddr *ifa;
struct ifreq ifr;
int done = 0;
@@ -791,7 +791,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
struct in_device *in_dev;
rcu_read_lock();
- in_dev = __in_dev_get(dev);
+ in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto no_in_dev;
@@ -818,7 +818,7 @@ no_in_dev:
read_lock(&dev_base_lock);
rcu_read_lock();
for (dev = dev_base; dev; dev = dev->next) {
- if ((in_dev = __in_dev_get(dev)) == NULL)
+ if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
continue;
for_primary_ifa(in_dev) {
@@ -887,7 +887,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
if (dev) {
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)))
+ if ((in_dev = __in_dev_get_rcu(dev)))
addr = confirm_addr_indev(in_dev, dst, local, scope);
rcu_read_unlock();
@@ -897,7 +897,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
read_lock(&dev_base_lock);
rcu_read_lock();
for (dev = dev_base; dev; dev = dev->next) {
- if ((in_dev = __in_dev_get(dev))) {
+ if ((in_dev = __in_dev_get_rcu(dev))) {
addr = confirm_addr_indev(in_dev, dst, local, scope);
if (addr)
break;
@@ -957,7 +957,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = ptr;
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
ASSERT_RTNL();
@@ -1078,7 +1078,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
if (idx > s_idx)
s_ip_idx = 0;
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)) == NULL) {
+ if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
rcu_read_unlock();
continue;
}
@@ -1149,7 +1149,7 @@ void inet_forward_change(void)
for (dev = dev_base; dev; dev = dev->next) {
struct in_device *in_dev;
rcu_read_lock();
- in_dev = __in_dev_get(dev);
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev)
in_dev->cnf.forwarding = on;
rcu_read_unlock();
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1b5a09d1b90..1b18ce66e7b 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -5,6 +5,7 @@
#include <net/esp.h>
#include <asm/scatterlist.h>
#include <linux/crypto.h>
+#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
#include <linux/random.h>
#include <net/icmp.h>
@@ -42,10 +43,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
- blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
- clen = (clen + 2 + blksize-1)&~(blksize-1);
+ blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
+ clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen)
- clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);
+ clen = ALIGN(clen, esp->conf.padlen);
if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
goto error;
@@ -143,7 +144,7 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
struct sk_buff *trailer;
- int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm);
+ int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
int nfrags;
@@ -304,16 +305,16 @@ static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap,
static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
- u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm);
+ u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
if (x->props.mode) {
- mtu = (mtu + 2 + blksize-1)&~(blksize-1);
+ mtu = ALIGN(mtu + 2, blksize);
} else {
/* The worst case. */
- mtu += 2 + blksize;
+ mtu = ALIGN(mtu + 2, 4) + blksize - 4;
}
if (esp->conf.padlen)
- mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1);
+ mtu = ALIGN(mtu, esp->conf.padlen);
return mtu + x->props.header_len + esp->auth.icv_trunc_len;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e1379f7126..e61bc7177eb 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -173,7 +173,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
no_addr = rpf = 0;
rcu_read_lock();
- in_dev = __in_dev_get(dev);
+ in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
no_addr = in_dev->ifa_list == NULL;
rpf = IN_DEV_RPFILTER(in_dev);
@@ -607,7 +607,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, 2);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d41219e8037..186f20c4a45 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1087,7 +1087,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
rta->rta_oif = &dev->ifindex;
if (colon) {
struct in_ifaddr *ifa;
- struct in_device *in_dev = __in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return -ENODEV;
*colon = ':';
@@ -1268,7 +1268,7 @@ int fib_sync_up(struct net_device *dev)
}
if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
continue;
- if (nh->nh_dev != dev || __in_dev_get(dev) == NULL)
+ if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
continue;
alive++;
spin_lock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1b63b482416..0093ea08c7f 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -43,7 +43,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#define VERSION "0.403"
+#define VERSION "0.404"
#include <linux/config.h>
#include <asm/uaccess.h>
@@ -224,7 +224,7 @@ static inline int tkey_mismatch(t_key a, int offset, t_key b)
Consider a node 'n' and its parent 'tp'.
If n is a leaf, every bit in its key is significant. Its presence is
- necessitaded by path compression, since during a tree traversal (when
+ necessitated by path compression, since during a tree traversal (when
searching for a leaf - unless we are doing an insertion) we will completely
ignore all skipped bits we encounter. Thus we need to verify, at the end of
a potentially successful search, that we have indeed been walking the
@@ -286,6 +286,8 @@ static inline void check_tnode(const struct tnode *tn)
static int halve_threshold = 25;
static int inflate_threshold = 50;
+static int halve_threshold_root = 15;
+static int inflate_threshold_root = 25;
static void __alias_free_mem(struct rcu_head *head)
@@ -449,6 +451,8 @@ static struct node *resize(struct trie *t, struct tnode *tn)
int i;
int err = 0;
struct tnode *old_tn;
+ int inflate_threshold_use;
+ int halve_threshold_use;
if (!tn)
return NULL;
@@ -541,10 +545,17 @@ static struct node *resize(struct trie *t, struct tnode *tn)
check_tnode(tn);
+ /* Keep root node larger */
+
+ if(!tn->parent)
+ inflate_threshold_use = inflate_threshold_root;
+ else
+ inflate_threshold_use = inflate_threshold;
+
err = 0;
while ((tn->full_children > 0 &&
50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
- inflate_threshold * tnode_child_length(tn))) {
+ inflate_threshold_use * tnode_child_length(tn))) {
old_tn = tn;
tn = inflate(t, tn);
@@ -564,10 +575,18 @@ static struct node *resize(struct trie *t, struct tnode *tn)
* node is above threshold.
*/
+
+ /* Keep root node larger */
+
+ if(!tn->parent)
+ halve_threshold_use = halve_threshold_root;
+ else
+ halve_threshold_use = halve_threshold;
+
err = 0;
while (tn->bits > 1 &&
100 * (tnode_child_length(tn) - tn->empty_children) <
- halve_threshold * tnode_child_length(tn)) {
+ halve_threshold_use * tnode_child_length(tn)) {
old_tn = tn;
tn = halve(t, tn);
@@ -836,11 +855,12 @@ static void trie_init(struct trie *t)
#endif
}
-/* readside most use rcu_read_lock currently dump routines
+/* readside must use rcu_read_lock currently dump routines
via get_fa_head and dump */
-static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
+static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
{
+ struct hlist_head *head = &l->list;
struct hlist_node *node;
struct leaf_info *li;
@@ -853,7 +873,7 @@ static struct leaf_info *find_leaf_info(struct hlist_head *head, int plen)
static inline struct list_head * get_fa_head(struct leaf *l, int plen)
{
- struct leaf_info *li = find_leaf_info(&l->list, plen);
+ struct leaf_info *li = find_leaf_info(l, plen);
if (!li)
return NULL;
@@ -1085,7 +1105,7 @@ fib_insert_node(struct trie *t, int *err, u32 key, int plen)
}
if (tp && tp->pos + tp->bits > 32)
- printk("ERROR tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
+ printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
tp, tp->pos, tp->bits, key, plen);
/* Rebalance the trie */
@@ -1248,7 +1268,7 @@ err:
}
-/* should be clalled with rcu_read_lock */
+/* should be called with rcu_read_lock */
static inline int check_leaf(struct trie *t, struct leaf *l,
t_key key, int *plen, const struct flowi *flp,
struct fib_result *res)
@@ -1590,7 +1610,7 @@ fn_trie_delete(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta,
rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id, nlhdr, req);
l = fib_find_node(t, key);
- li = find_leaf_info(&l->list, plen);
+ li = find_leaf_info(l, plen);
list_del_rcu(&fa->fa_list);
@@ -1714,7 +1734,6 @@ static int fn_trie_flush(struct fib_table *tb)
t->revision++;
- rcu_read_lock();
for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
found += trie_flush_leaf(t, l);
@@ -1722,7 +1741,6 @@ static int fn_trie_flush(struct fib_table *tb)
trie_leaf_remove(t, ll->key);
ll = l;
}
- rcu_read_unlock();
if (ll && hlist_empty(&ll->list))
trie_leaf_remove(t, ll->key);
@@ -1833,16 +1851,7 @@ static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fi
i++;
continue;
}
- if (fa->fa_info->fib_nh == NULL) {
- printk("Trie error _fib_nh=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen);
- i++;
- continue;
- }
- if (fa->fa_info == NULL) {
- printk("Trie error fa_info=NULL in fa[%d] k=%08x plen=%d\n", i, key, plen);
- i++;
- continue;
- }
+ BUG_ON(!fa->fa_info);
if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
@@ -1965,7 +1974,7 @@ struct fib_table * __init fib_hash_init(int id)
trie_main = t;
if (id == RT_TABLE_LOCAL)
- printk("IPv4 FIB: Using LC-trie version %s\n", VERSION);
+ printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
return tb;
}
@@ -2029,7 +2038,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
iter->tnode = (struct tnode *) n;
iter->trie = t;
iter->index = 0;
- iter->depth = 0;
+ iter->depth = 1;
return n;
}
return NULL;
@@ -2274,11 +2283,12 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "<local>:\n");
else
seq_puts(seq, "<main>:\n");
- } else {
- seq_indent(seq, iter->depth-1);
- seq_printf(seq, " +-- %d.%d.%d.%d/%d\n",
- NIPQUAD(prf), tn->pos);
- }
+ }
+ seq_indent(seq, iter->depth-1);
+ seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
+ NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
+ tn->empty_children);
+
} else {
struct leaf *l = (struct leaf *) n;
int i;
@@ -2287,7 +2297,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
seq_indent(seq, iter->depth);
seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
for (i = 32; i >= 0; i--) {
- struct leaf_info *li = find_leaf_info(&l->list, i);
+ struct leaf_info *li = find_leaf_info(l, i);
if (li) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -2383,7 +2393,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
return 0;
for (i=32; i>=0; i--) {
- struct leaf_info *li = find_leaf_info(&l->list, i);
+ struct leaf_info *li = find_leaf_info(l, i);
struct fib_alias *fa;
u32 mask, prefix;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 24eb56ae1b5..90dca711ac9 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -188,7 +188,7 @@ struct icmp_err icmp_err_convert[] = {
/* Control parameters for ECHO replies. */
int sysctl_icmp_echo_ignore_all;
-int sysctl_icmp_echo_ignore_broadcasts;
+int sysctl_icmp_echo_ignore_broadcasts = 1;
/* Control parameter - ignore bogus broadcast responses? */
int sysctl_icmp_ignore_bogus_error_responses;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 44607f4767b..8b6d3939e1e 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1323,7 +1323,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
}
if (dev) {
imr->imr_ifindex = dev->ifindex;
- idev = __in_dev_get(dev);
+ idev = __in_dev_get_rtnl(dev);
}
return idev;
}
@@ -1603,7 +1603,7 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
}
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
- pmc->sfcount[MCAST_EXCLUDE] = 0;
+ pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index fe3c6d3d0c9..94468a76c5b 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -494,7 +494,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
- const unsigned int __nocast priority)
+ const gfp_t priority)
{
struct sock *newsk = sk_clone(sk, priority);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4d1502a4985..a010e9a6881 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,7 +20,7 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi
struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb;
/* Unlink from established hashes. */
- struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent];
+ struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
write_lock(&ehead->lock);
if (hlist_unhashed(&tw->tw_node)) {
@@ -60,7 +60,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
+ struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in
@@ -106,11 +106,12 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
tw->tw_dport = inet->dport;
tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse;
- tw->tw_hashent = sk->sk_hashent;
+ tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_prot = sk->sk_prot_creator;
atomic_set(&tw->tw_refcnt, 1);
inet_twsk_dead_node_init(tw);
+ __module_get(tw->tw_prot->owner);
}
return tw;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f0d5740d7e2..896ce3f8f53 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1104,10 +1104,10 @@ static int ipgre_open(struct net_device *dev)
return -EADDRNOTAVAIL;
dev = rt->u.dst.dev;
ip_rt_put(rt);
- if (__in_dev_get(dev) == NULL)
+ if (__in_dev_get_rtnl(dev) == NULL)
return -EADDRNOTAVAIL;
t->mlink = dev->ifindex;
- ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr);
+ ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
}
return 0;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9dbf5909f3a..302b7eb507c 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -149,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
dev->flags |= IFF_MULTICAST;
- in_dev = __in_dev_get(dev);
+ in_dev = __in_dev_get_rtnl(dev);
if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
goto failure;
in_dev->cnf.rp_filter = 0;
@@ -278,7 +278,7 @@ static int vif_delete(int vifi)
dev_set_allmulti(dev, -1);
- if ((in_dev = __in_dev_get(dev)) != NULL) {
+ if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
in_dev->cnf.mc_forwarding--;
ip_rt_multicast_event(in_dev);
}
@@ -421,7 +421,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
return -EINVAL;
}
- if ((in_dev = __in_dev_get(dev)) == NULL)
+ if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
return -EADDRNOTAVAIL;
in_dev->cnf.mc_forwarding++;
dev_set_allmulti(dev, +1);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 6e092dadb38..fc6f95aaa96 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = {
/*
* Replace a segment of data with a new segment
*/
-int ip_vs_skb_replace(struct sk_buff *skb, int pri,
+int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
char *o_buf, int o_len, char *n_buf, int n_len)
{
struct iphdr *iph;
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index e11952ea17a..f828fa2eb7d 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -196,6 +196,7 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (s_addr==cp->caddr && s_port==cp->cport &&
d_port==cp->vport && d_addr==cp->vaddr &&
+ ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
protocol==cp->protocol) {
/* HIT */
atomic_inc(&cp->refcnt);
@@ -227,6 +228,40 @@ struct ip_vs_conn *ip_vs_conn_in_get
return cp;
}
+/* Get reference to connection template */
+struct ip_vs_conn *ip_vs_ct_in_get
+(int protocol, __u32 s_addr, __u16 s_port, __u32 d_addr, __u16 d_port)
+{
+ unsigned hash;
+ struct ip_vs_conn *cp;
+
+ hash = ip_vs_conn_hashkey(protocol, s_addr, s_port);
+
+ ct_read_lock(hash);
+
+ list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ if (s_addr==cp->caddr && s_port==cp->cport &&
+ d_port==cp->vport && d_addr==cp->vaddr &&
+ cp->flags & IP_VS_CONN_F_TEMPLATE &&
+ protocol==cp->protocol) {
+ /* HIT */
+ atomic_inc(&cp->refcnt);
+ goto out;
+ }
+ }
+ cp = NULL;
+
+ out:
+ ct_read_unlock(hash);
+
+ IP_VS_DBG(7, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n",
+ ip_vs_proto_name(protocol),
+ NIPQUAD(s_addr), ntohs(s_port),
+ NIPQUAD(d_addr), ntohs(d_port),
+ cp?"hit":"not hit");
+
+ return cp;
+}
/*
* Gets ip_vs_conn associated with supplied parameters in the ip_vs_conn_tab.
@@ -367,7 +402,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
atomic_read(&dest->refcnt));
/* Update the connection counters */
- if (cp->cport || (cp->flags & IP_VS_CONN_F_NO_CPORT)) {
+ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so increase the inactive
connection counter because it is in TCP SYNRECV
state (inactive) or other protocol inacive state */
@@ -406,7 +441,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
atomic_read(&dest->refcnt));
/* Update the connection counters */
- if (cp->cport || (cp->flags & IP_VS_CONN_F_NO_CPORT)) {
+ if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
if (cp->flags & IP_VS_CONN_F_INACTIVE) {
@@ -467,7 +502,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
/*
* Invalidate the connection template
*/
- if (ct->cport) {
+ if (ct->vport != 65535) {
if (ip_vs_conn_unhash(ct)) {
ct->dport = 65535;
ct->vport = 65535;
@@ -776,7 +811,7 @@ void ip_vs_random_dropentry(void)
ct_write_lock_bh(hash);
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
- if (!cp->cport && !(cp->flags & IP_VS_CONN_F_NO_CPORT))
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 3ac7eeca04a..981cc3244ef 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -243,10 +243,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
if (ports[1] == svc->port) {
/* Check if a template already exists */
if (svc->port != FTPPORT)
- ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
+ ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, ports[1]);
else
- ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
+ ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, 0);
if (!ct || !ip_vs_check_template(ct)) {
@@ -272,14 +272,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
iph->daddr,
ports[1],
dest->addr, dest->port,
- 0,
+ IP_VS_CONN_F_TEMPLATE,
dest);
else
ct = ip_vs_conn_new(iph->protocol,
snet, 0,
iph->daddr, 0,
dest->addr, 0,
- 0,
+ IP_VS_CONN_F_TEMPLATE,
dest);
if (ct == NULL)
return NULL;
@@ -298,10 +298,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
*/
if (svc->fwmark)
- ct = ip_vs_conn_in_get(IPPROTO_IP, snet, 0,
+ ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0,
htonl(svc->fwmark), 0);
else
- ct = ip_vs_conn_in_get(iph->protocol, snet, 0,
+ ct = ip_vs_ct_in_get(iph->protocol, snet, 0,
iph->daddr, 0);
if (!ct || !ip_vs_check_template(ct)) {
@@ -326,14 +326,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
snet, 0,
htonl(svc->fwmark), 0,
dest->addr, 0,
- 0,
+ IP_VS_CONN_F_TEMPLATE,
dest);
else
ct = ip_vs_conn_new(iph->protocol,
snet, 0,
iph->daddr, 0,
dest->addr, 0,
- 0,
+ IP_VS_CONN_F_TEMPLATE,
dest);
if (ct == NULL)
return NULL;
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 574d1f509b4..2e5ced3d806 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -297,16 +297,24 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
for (i=0; i<m->nr_conns; i++) {
+ unsigned flags;
+
s = (struct ip_vs_sync_conn *)p;
- cp = ip_vs_conn_in_get(s->protocol,
- s->caddr, s->cport,
- s->vaddr, s->vport);
+ flags = ntohs(s->flags);
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ cp = ip_vs_conn_in_get(s->protocol,
+ s->caddr, s->cport,
+ s->vaddr, s->vport);
+ else
+ cp = ip_vs_ct_in_get(s->protocol,
+ s->caddr, s->cport,
+ s->vaddr, s->vport);
if (!cp) {
cp = ip_vs_conn_new(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport,
s->daddr, s->dport,
- ntohs(s->flags), NULL);
+ flags, NULL);
if (!cp) {
IP_VS_ERR("ip_vs_conn_new failed\n");
return;
@@ -315,11 +323,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
} else if (!cp->dest) {
/* it is an entry created by the synchronization */
cp->state = ntohs(s->state);
- cp->flags = ntohs(s->flags) | IP_VS_CONN_F_HASHED;
+ cp->flags = flags | IP_VS_CONN_F_HASHED;
} /* Note that we don't touch its state and flags
if it is a normal entry. */
- if (ntohs(s->flags) & IP_VS_CONN_F_SEQ_MASK) {
+ if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
memcpy(&cp->in_seq, opt, sizeof(*opt));
p += FULL_CONN_SIZE;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 30aa8e2ee21..7d917e4ce1d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -51,6 +51,14 @@ config IP_NF_CONNTRACK_EVENTS
IF unsure, say `N'.
+config IP_NF_CONNTRACK_NETLINK
+ tristate 'Connection tracking netlink interface'
+ depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
+ depends on IP_NF_CONNTRACK!=y || NETFILTER_NETLINK!=m
+ help
+ This option enables support for a netlink-based userspace interface
+
+
config IP_NF_CT_PROTO_SCTP
tristate 'SCTP protocol connection tracking support (EXPERIMENTAL)'
depends on IP_NF_CONNTRACK && EXPERIMENTAL
@@ -129,6 +137,23 @@ config IP_NF_AMANDA
To compile it as a module, choose M here. If unsure, say Y.
+config IP_NF_PPTP
+ tristate 'PPTP protocol support'
+ depends on IP_NF_CONNTRACK
+ help
+ This module adds support for PPTP (Point to Point Tunnelling
+ Protocol, RFC2637) connection tracking and NAT.
+
+ If you are running PPTP sessions over a stateful firewall or NAT
+ box, you may want to enable this feature.
+
+ Please note that not all PPTP modes of operation are supported yet.
+ For more info, read top of the file
+ net/ipv4/netfilter/ip_conntrack_pptp.c
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
config IP_NF_QUEUE
tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
help
@@ -474,9 +499,14 @@ config IP_NF_TARGET_LOG
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_ULOG
- tristate "ULOG target support"
+ tristate "ULOG target support (OBSOLETE)"
depends on IP_NF_IPTABLES
---help---
+
+ This option enables the old IPv4-only "ipt_ULOG" implementation
+ which has been obsoleted by the new "nfnetlink_log" code (see
+ CONFIG_NETFILTER_NETLINK_LOG).
+
This option adds a `ULOG' target, which allows you to create rules in
any iptables table. The packet is passed to a userspace logging
daemon using netlink multicast sockets; unlike the LOG target
@@ -513,6 +543,17 @@ config IP_NF_TARGET_TCPMSS
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_NFQUEUE
+ tristate "NFQUEUE Target Support"
+ depends on IP_NF_IPTABLES
+ help
+ This Target replaced the old obsolete QUEUE target.
+
+ As opposed to QUEUE, it supports 65535 different queues,
+ not just one.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# NAT + specific targets
config IP_NF_NAT
tristate "Full NAT"
@@ -613,6 +654,12 @@ config IP_NF_NAT_AMANDA
default IP_NF_NAT if IP_NF_AMANDA=y
default m if IP_NF_AMANDA=m
+config IP_NF_NAT_PPTP
+ tristate
+ depends on IP_NF_NAT!=n && IP_NF_PPTP!=n
+ default IP_NF_NAT if IP_NF_PPTP=y
+ default m if IP_NF_PPTP=m
+
# mangle + specific targets
config IP_NF_MANGLE
tristate "Packet mangling"
@@ -774,11 +821,5 @@ config IP_NF_ARP_MANGLE
Allows altering the ARP packet payload: source and destination
hardware and network addresses.
-config IP_NF_CONNTRACK_NETLINK
- tristate 'Connection tracking netlink interface'
- depends on IP_NF_CONNTRACK && NETFILTER_NETLINK
- help
- This option enables support for a netlink-based userspace interface
-
endmenu
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 1ba0db74681..dab4b58dd31 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -4,7 +4,11 @@
# objects for the standalone - connection tracking / NAT
ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o
-iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
+ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o
+iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o
+
+ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o
+ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o
# connection tracking
obj-$(CONFIG_IP_NF_CONNTRACK) += ip_conntrack.o
@@ -17,6 +21,7 @@ obj-$(CONFIG_IP_NF_CONNTRACK_NETLINK) += ip_conntrack_netlink.o
obj-$(CONFIG_IP_NF_CT_PROTO_SCTP) += ip_conntrack_proto_sctp.o
# connection tracking helpers
+obj-$(CONFIG_IP_NF_PPTP) += ip_conntrack_pptp.o
obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o
obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o
obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
@@ -24,6 +29,7 @@ obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o
# NAT helpers
+obj-$(CONFIG_IP_NF_NAT_PPTP) += ip_nat_pptp.o
obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o
obj-$(CONFIG_IP_NF_NAT_TFTP) += ip_nat_tftp.o
obj-$(CONFIG_IP_NF_NAT_FTP) += ip_nat_ftp.o
@@ -35,7 +41,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
# the three instances of ip_tables
obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o
obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o
-obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
+obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o ip_nat.o
obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
# matches
@@ -87,6 +93,7 @@ obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
+obj-$(CONFIG_IP_NF_TARGET_NFQUEUE) += ipt_NFQUEUE.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
@@ -96,4 +103,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
-obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ipt_NFQUEUE.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fa163425668..a7969286e6e 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -716,8 +716,10 @@ static int translate_table(const char *name,
}
/* And one copy for every other CPU */
- for (i = 1; i < num_possible_cpus(); i++) {
- memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ for_each_cpu(i) {
+ if (i == 0)
+ continue;
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
}
@@ -767,7 +769,7 @@ static void get_counters(const struct arpt_table_info *t,
unsigned int cpu;
unsigned int i;
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
i = 0;
ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
@@ -885,7 +887,8 @@ static int do_replace(void __user *user, unsigned int len)
return -ENOMEM;
newinfo = vmalloc(sizeof(struct arpt_table_info)
- + SMP_ALIGN(tmp.size) * num_possible_cpus());
+ + SMP_ALIGN(tmp.size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
@@ -1158,7 +1161,8 @@ int arpt_register_table(struct arpt_table *table,
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct arpt_table_info)
- + SMP_ALIGN(repl->size) * num_possible_cpus());
+ + SMP_ALIGN(repl->size) *
+ (highest_possible_processor_id()+1));
if (!newinfo) {
ret = -ENOMEM;
return ret;
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index dc20881004b..fa3f914117e 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -65,7 +65,7 @@ static int help(struct sk_buff **pskb,
/* increase the UDP timeout of the master connection as replies from
* Amanda clients to the server can be quite delayed */
- ip_ct_refresh_acct(ct, ctinfo, NULL, master_timeout * HZ);
+ ip_ct_refresh(ct, *pskb, master_timeout * HZ);
/* No data? */
dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 19cba16e6e1..07a80b56e8d 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -233,7 +233,7 @@ __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
/* Just find a expectation corresponding to a tuple. */
struct ip_conntrack_expect *
-ip_conntrack_expect_find_get(const struct ip_conntrack_tuple *tuple)
+ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
{
struct ip_conntrack_expect *i;
@@ -1112,42 +1112,49 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
synchronize_net();
}
-static inline void ct_add_counters(struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- const struct sk_buff *skb)
-{
-#ifdef CONFIG_IP_NF_CT_ACCT
- if (skb) {
- ct->counters[CTINFO2DIR(ctinfo)].packets++;
- ct->counters[CTINFO2DIR(ctinfo)].bytes +=
- ntohs(skb->nh.iph->tot_len);
- }
-#endif
-}
-
-/* Refresh conntrack for this many jiffies and do accounting (if skb != NULL) */
-void ip_ct_refresh_acct(struct ip_conntrack *ct,
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __ip_ct_refresh_acct(struct ip_conntrack *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
- unsigned long extra_jiffies)
+ unsigned long extra_jiffies,
+ int do_acct)
{
+ int event = 0;
+
IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
+ IP_NF_ASSERT(skb);
+
+ write_lock_bh(&ip_conntrack_lock);
/* If not in hash table, timer will not be active yet */
if (!is_confirmed(ct)) {
ct->timeout.expires = extra_jiffies;
- ct_add_counters(ct, ctinfo, skb);
+ event = IPCT_REFRESH;
} else {
- write_lock_bh(&ip_conntrack_lock);
/* Need del_timer for race avoidance (may already be dying). */
if (del_timer(&ct->timeout)) {
ct->timeout.expires = jiffies + extra_jiffies;
add_timer(&ct->timeout);
- ip_conntrack_event_cache(IPCT_REFRESH, skb);
+ event = IPCT_REFRESH;
}
- ct_add_counters(ct, ctinfo, skb);
- write_unlock_bh(&ip_conntrack_lock);
}
+
+#ifdef CONFIG_IP_NF_CT_ACCT
+ if (do_acct) {
+ ct->counters[CTINFO2DIR(ctinfo)].packets++;
+ ct->counters[CTINFO2DIR(ctinfo)].bytes +=
+ ntohs(skb->nh.iph->tot_len);
+ if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
+ || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
+ event |= IPCT_COUNTER_FILLING;
+ }
+#endif
+
+ write_unlock_bh(&ip_conntrack_lock);
+
+ /* must be unlocked when calling event cache */
+ if (event)
+ ip_conntrack_event_cache(event, skb);
}
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
new file mode 100644
index 00000000000..926a6684643
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c
@@ -0,0 +1,806 @@
+/*
+ * ip_conntrack_pptp.c - Version 3.0
+ *
+ * Connection tracking support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft. PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702. Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * Limitations:
+ * - We blindly assume that control connections are always
+ * established in PNS->PAC direction. This is a violation
+ * of RFFC2673
+ * - We can only support one single call within each session
+ *
+ * TODO:
+ * - testing of incoming PPTP calls
+ *
+ * Changes:
+ * 2002-02-05 - Version 1.3
+ * - Call ip_conntrack_unexpect_related() from
+ * pptp_destroy_siblings() to destroy expectations in case
+ * CALL_DISCONNECT_NOTIFY or tcp fin packet was seen
+ * (Philip Craig <philipc@snapgear.com>)
+ * - Add Version information at module loadtime
+ * 2002-02-10 - Version 1.6
+ * - move to C99 style initializers
+ * - remove second expectation if first arrives
+ * 2004-10-22 - Version 2.0
+ * - merge Mandrake's 2.6.x port with recent 2.6.x API changes
+ * - fix lots of linear skb assumptions from Mandrake's port
+ * 2005-06-10 - Version 2.1
+ * - use ip_conntrack_expect_free() instead of kfree() on the
+ * expect's (which are from the slab for quite some time)
+ * 2005-06-10 - Version 3.0
+ * - port helper to post-2.6.11 API changes,
+ * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
+ * 2005-07-30 - Version 3.1
+ * - port helper to 2.6.13 API changes
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+#define IP_CT_PPTP_VERSION "3.1"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter connection tracking helper module for PPTP");
+
+static DEFINE_SPINLOCK(ip_pptp_lock);
+
+int
+(*ip_nat_pptp_hook_outbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+int
+(*ip_nat_pptp_hook_inbound)(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+int
+(*ip_nat_pptp_hook_exp_gre)(struct ip_conntrack_expect *expect_orig,
+ struct ip_conntrack_expect *expect_reply);
+
+void
+(*ip_nat_pptp_hook_expectfn)(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp);
+
+#if 0
+/* PptpControlMessageType names */
+const char *pptp_msg_name[] = {
+ "UNKNOWN_MESSAGE",
+ "START_SESSION_REQUEST",
+ "START_SESSION_REPLY",
+ "STOP_SESSION_REQUEST",
+ "STOP_SESSION_REPLY",
+ "ECHO_REQUEST",
+ "ECHO_REPLY",
+ "OUT_CALL_REQUEST",
+ "OUT_CALL_REPLY",
+ "IN_CALL_REQUEST",
+ "IN_CALL_REPLY",
+ "IN_CALL_CONNECT",
+ "CALL_CLEAR_REQUEST",
+ "CALL_DISCONNECT_NOTIFY",
+ "WAN_ERROR_NOTIFY",
+ "SET_LINK_INFO"
+};
+EXPORT_SYMBOL(pptp_msg_name);
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define SECS *HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+
+#define PPTP_GRE_TIMEOUT (10 MINS)
+#define PPTP_GRE_STREAM_TIMEOUT (5 HOURS)
+
+static void pptp_expectfn(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp)
+{
+ DEBUGP("increasing timeouts\n");
+
+ /* increase timeout of GRE data channel conntrack entry */
+ ct->proto.gre.timeout = PPTP_GRE_TIMEOUT;
+ ct->proto.gre.stream_timeout = PPTP_GRE_STREAM_TIMEOUT;
+
+ /* Can you see how rusty this code is, compared with the pre-2.6.11
+ * one? That's what happened to my shiny newnat of 2002 ;( -HW */
+
+ if (!ip_nat_pptp_hook_expectfn) {
+ struct ip_conntrack_tuple inv_t;
+ struct ip_conntrack_expect *exp_other;
+
+ /* obviously this tuple inversion only works until you do NAT */
+ invert_tuplepr(&inv_t, &exp->tuple);
+ DEBUGP("trying to unexpect other dir: ");
+ DUMP_TUPLE(&inv_t);
+
+ exp_other = ip_conntrack_expect_find(&inv_t);
+ if (exp_other) {
+ /* delete other expectation. */
+ DEBUGP("found\n");
+ ip_conntrack_unexpect_related(exp_other);
+ ip_conntrack_expect_put(exp_other);
+ } else {
+ DEBUGP("not found\n");
+ }
+ } else {
+ /* we need more than simple inversion */
+ ip_nat_pptp_hook_expectfn(ct, exp);
+ }
+}
+
+static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t)
+{
+ struct ip_conntrack_tuple_hash *h;
+ struct ip_conntrack_expect *exp;
+
+ DEBUGP("trying to timeout ct or exp for tuple ");
+ DUMP_TUPLE(t);
+
+ h = ip_conntrack_find_get(t, NULL);
+ if (h) {
+ struct ip_conntrack *sibling = tuplehash_to_ctrack(h);
+ DEBUGP("setting timeout of conntrack %p to 0\n", sibling);
+ sibling->proto.gre.timeout = 0;
+ sibling->proto.gre.stream_timeout = 0;
+ if (del_timer(&sibling->timeout))
+ sibling->timeout.function((unsigned long)sibling);
+ ip_conntrack_put(sibling);
+ return 1;
+ } else {
+ exp = ip_conntrack_expect_find(t);
+ if (exp) {
+ DEBUGP("unexpect_related of expect %p\n", exp);
+ ip_conntrack_unexpect_related(exp);
+ ip_conntrack_expect_put(exp);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+
+/* timeout GRE data connections */
+static void pptp_destroy_siblings(struct ip_conntrack *ct)
+{
+ struct ip_conntrack_tuple t;
+
+ /* Since ct->sibling_list has literally rusted away in 2.6.11,
+ * we now need another way to find out about our sibling
+ * contrack and expects... -HW */
+
+ /* try original (pns->pac) tuple */
+ memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
+ t.dst.protonum = IPPROTO_GRE;
+ t.src.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id);
+ t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id);
+
+ if (!destroy_sibling_or_exp(&t))
+ DEBUGP("failed to timeout original pns->pac ct/exp\n");
+
+ /* try reply (pac->pns) tuple */
+ memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
+ t.dst.protonum = IPPROTO_GRE;
+ t.src.u.gre.key = htons(ct->help.ct_pptp_info.pac_call_id);
+ t.dst.u.gre.key = htons(ct->help.ct_pptp_info.pns_call_id);
+
+ if (!destroy_sibling_or_exp(&t))
+ DEBUGP("failed to timeout reply pac->pns ct/exp\n");
+}
+
+/* expect GRE connections (PNS->PAC and PAC->PNS direction) */
+static inline int
+exp_gre(struct ip_conntrack *master,
+ u_int32_t seq,
+ __be16 callid,
+ __be16 peer_callid)
+{
+ struct ip_conntrack_tuple inv_tuple;
+ struct ip_conntrack_tuple exp_tuples[] = {
+ /* tuple in original direction, PNS->PAC */
+ { .src = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip,
+ .u = { .gre = { .key = peer_callid } }
+ },
+ .dst = { .ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip,
+ .u = { .gre = { .key = callid } },
+ .protonum = IPPROTO_GRE
+ },
+ },
+ /* tuple in reply direction, PAC->PNS */
+ { .src = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip,
+ .u = { .gre = { .key = callid } }
+ },
+ .dst = { .ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip,
+ .u = { .gre = { .key = peer_callid } },
+ .protonum = IPPROTO_GRE
+ },
+ }
+ };
+ struct ip_conntrack_expect *exp_orig, *exp_reply;
+ int ret = 1;
+
+ exp_orig = ip_conntrack_expect_alloc(master);
+ if (exp_orig == NULL)
+ goto out;
+
+ exp_reply = ip_conntrack_expect_alloc(master);
+ if (exp_reply == NULL)
+ goto out_put_orig;
+
+ memcpy(&exp_orig->tuple, &exp_tuples[0], sizeof(exp_orig->tuple));
+
+ exp_orig->mask.src.ip = 0xffffffff;
+ exp_orig->mask.src.u.all = 0;
+ exp_orig->mask.dst.u.all = 0;
+ exp_orig->mask.dst.u.gre.key = htons(0xffff);
+ exp_orig->mask.dst.ip = 0xffffffff;
+ exp_orig->mask.dst.protonum = 0xff;
+
+ exp_orig->master = master;
+ exp_orig->expectfn = pptp_expectfn;
+ exp_orig->flags = 0;
+
+ exp_orig->dir = IP_CT_DIR_ORIGINAL;
+
+ /* both expectations are identical apart from tuple */
+ memcpy(exp_reply, exp_orig, sizeof(*exp_reply));
+ memcpy(&exp_reply->tuple, &exp_tuples[1], sizeof(exp_reply->tuple));
+
+ exp_reply->dir = !exp_orig->dir;
+
+ if (ip_nat_pptp_hook_exp_gre)
+ ret = ip_nat_pptp_hook_exp_gre(exp_orig, exp_reply);
+ else {
+
+ DEBUGP("calling expect_related PNS->PAC");
+ DUMP_TUPLE(&exp_orig->tuple);
+
+ if (ip_conntrack_expect_related(exp_orig) != 0) {
+ DEBUGP("cannot expect_related()\n");
+ goto out_put_both;
+ }
+
+ DEBUGP("calling expect_related PAC->PNS");
+ DUMP_TUPLE(&exp_reply->tuple);
+
+ if (ip_conntrack_expect_related(exp_reply) != 0) {
+ DEBUGP("cannot expect_related()\n");
+ goto out_unexpect_orig;
+ }
+
+ /* Add GRE keymap entries */
+ if (ip_ct_gre_keymap_add(master, &exp_reply->tuple, 0) != 0) {
+ DEBUGP("cannot keymap_add() exp\n");
+ goto out_unexpect_both;
+ }
+
+ invert_tuplepr(&inv_tuple, &exp_reply->tuple);
+ if (ip_ct_gre_keymap_add(master, &inv_tuple, 1) != 0) {
+ ip_ct_gre_keymap_destroy(master);
+ DEBUGP("cannot keymap_add() exp_inv\n");
+ goto out_unexpect_both;
+ }
+ ret = 0;
+ }
+
+out_put_both:
+ ip_conntrack_expect_put(exp_reply);
+out_put_orig:
+ ip_conntrack_expect_put(exp_orig);
+out:
+ return ret;
+
+out_unexpect_both:
+ ip_conntrack_unexpect_related(exp_reply);
+out_unexpect_orig:
+ ip_conntrack_unexpect_related(exp_orig);
+ goto out_put_both;
+}
+
+static inline int
+pptp_inbound_pkt(struct sk_buff **pskb,
+ struct tcphdr *tcph,
+ unsigned int nexthdr_off,
+ unsigned int datalen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct PptpControlHeader _ctlh, *ctlh;
+ unsigned int reqlen;
+ union pptp_ctrl_union _pptpReq, *pptpReq;
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ u_int16_t msg;
+ __be16 *cid, *pcid;
+ u_int32_t seq;
+
+ ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
+ if (!ctlh) {
+ DEBUGP("error during skb_header_pointer\n");
+ return NF_ACCEPT;
+ }
+ nexthdr_off += sizeof(_ctlh);
+ datalen -= sizeof(_ctlh);
+
+ reqlen = datalen;
+ if (reqlen > sizeof(*pptpReq))
+ reqlen = sizeof(*pptpReq);
+ pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
+ if (!pptpReq) {
+ DEBUGP("error during skb_header_pointer\n");
+ return NF_ACCEPT;
+ }
+
+ msg = ntohs(ctlh->messageType);
+ DEBUGP("inbound control message %s\n", pptp_msg_name[msg]);
+
+ switch (msg) {
+ case PPTP_START_SESSION_REPLY:
+ if (reqlen < sizeof(_pptpReq.srep)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms new control session */
+ if (info->sstate < PPTP_SESSION_REQUESTED) {
+ DEBUGP("%s without START_SESS_REQUEST\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->srep.resultCode == PPTP_START_OK)
+ info->sstate = PPTP_SESSION_CONFIRMED;
+ else
+ info->sstate = PPTP_SESSION_ERROR;
+ break;
+
+ case PPTP_STOP_SESSION_REPLY:
+ if (reqlen < sizeof(_pptpReq.strep)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms end of control session */
+ if (info->sstate > PPTP_SESSION_STOPREQ) {
+ DEBUGP("%s without STOP_SESS_REQUEST\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->strep.resultCode == PPTP_STOP_OK)
+ info->sstate = PPTP_SESSION_NONE;
+ else
+ info->sstate = PPTP_SESSION_ERROR;
+ break;
+
+ case PPTP_OUT_CALL_REPLY:
+ if (reqlen < sizeof(_pptpReq.ocack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server accepted call, we now expect GRE frames */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (info->cstate != PPTP_CALL_OUT_REQ &&
+ info->cstate != PPTP_CALL_OUT_CONF) {
+ DEBUGP("%s without OUTCALL_REQ\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->ocack.resultCode != PPTP_OUTCALL_CONNECT) {
+ info->cstate = PPTP_CALL_NONE;
+ break;
+ }
+
+ cid = &pptpReq->ocack.callID;
+ pcid = &pptpReq->ocack.peersCallID;
+
+ info->pac_call_id = ntohs(*cid);
+
+ if (htons(info->pns_call_id) != *pcid) {
+ DEBUGP("%s for unknown callid %u\n",
+ pptp_msg_name[msg], ntohs(*pcid));
+ break;
+ }
+
+ DEBUGP("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+ ntohs(*cid), ntohs(*pcid));
+
+ info->cstate = PPTP_CALL_OUT_CONF;
+
+ seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
+ + sizeof(struct PptpControlHeader)
+ + ((void *)pcid - (void *)pptpReq);
+
+ if (exp_gre(ct, seq, *cid, *pcid) != 0)
+ printk("ip_conntrack_pptp: error during exp_gre\n");
+ break;
+
+ case PPTP_IN_CALL_REQUEST:
+ if (reqlen < sizeof(_pptpReq.icack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server tells us about incoming call request */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ pcid = &pptpReq->icack.peersCallID;
+ DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ info->cstate = PPTP_CALL_IN_REQ;
+ info->pac_call_id = ntohs(*pcid);
+ break;
+
+ case PPTP_IN_CALL_CONNECT:
+ if (reqlen < sizeof(_pptpReq.iccon)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server tells us about incoming call established */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n", pptp_msg_name[msg]);
+ break;
+ }
+ if (info->sstate != PPTP_CALL_IN_REP
+ && info->sstate != PPTP_CALL_IN_CONF) {
+ DEBUGP("%s but never sent IN_CALL_REPLY\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+
+ pcid = &pptpReq->iccon.peersCallID;
+ cid = &info->pac_call_id;
+
+ if (info->pns_call_id != ntohs(*pcid)) {
+ DEBUGP("%s for unknown CallID %u\n",
+ pptp_msg_name[msg], ntohs(*pcid));
+ break;
+ }
+
+ DEBUGP("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ info->cstate = PPTP_CALL_IN_CONF;
+
+ /* we expect a GRE connection from PAC to PNS */
+ seq = ntohl(tcph->seq) + sizeof(struct pptp_pkt_hdr)
+ + sizeof(struct PptpControlHeader)
+ + ((void *)pcid - (void *)pptpReq);
+
+ if (exp_gre(ct, seq, *cid, *pcid) != 0)
+ printk("ip_conntrack_pptp: error during exp_gre\n");
+
+ break;
+
+ case PPTP_CALL_DISCONNECT_NOTIFY:
+ if (reqlen < sizeof(_pptpReq.disc)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* server confirms disconnect */
+ cid = &pptpReq->disc.callID;
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid));
+ info->cstate = PPTP_CALL_NONE;
+
+ /* untrack this call id, unexpect GRE packets */
+ pptp_destroy_siblings(ct);
+ break;
+
+ case PPTP_WAN_ERROR_NOTIFY:
+ break;
+
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* I don't have to explain these ;) */
+ break;
+ default:
+ DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)
+ ? pptp_msg_name[msg]:pptp_msg_name[0], msg);
+ break;
+ }
+
+
+ if (ip_nat_pptp_hook_inbound)
+ return ip_nat_pptp_hook_inbound(pskb, ct, ctinfo, ctlh,
+ pptpReq);
+
+ return NF_ACCEPT;
+
+}
+
+static inline int
+pptp_outbound_pkt(struct sk_buff **pskb,
+ struct tcphdr *tcph,
+ unsigned int nexthdr_off,
+ unsigned int datalen,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct PptpControlHeader _ctlh, *ctlh;
+ unsigned int reqlen;
+ union pptp_ctrl_union _pptpReq, *pptpReq;
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ u_int16_t msg;
+ __be16 *cid, *pcid;
+
+ ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh);
+ if (!ctlh)
+ return NF_ACCEPT;
+ nexthdr_off += sizeof(_ctlh);
+ datalen -= sizeof(_ctlh);
+
+ reqlen = datalen;
+ if (reqlen > sizeof(*pptpReq))
+ reqlen = sizeof(*pptpReq);
+ pptpReq = skb_header_pointer(*pskb, nexthdr_off, reqlen, &_pptpReq);
+ if (!pptpReq)
+ return NF_ACCEPT;
+
+ msg = ntohs(ctlh->messageType);
+ DEBUGP("outbound control message %s\n", pptp_msg_name[msg]);
+
+ switch (msg) {
+ case PPTP_START_SESSION_REQUEST:
+ /* client requests for new control session */
+ if (info->sstate != PPTP_SESSION_NONE) {
+ DEBUGP("%s but we already have one",
+ pptp_msg_name[msg]);
+ }
+ info->sstate = PPTP_SESSION_REQUESTED;
+ break;
+ case PPTP_STOP_SESSION_REQUEST:
+ /* client requests end of control session */
+ info->sstate = PPTP_SESSION_STOPREQ;
+ break;
+
+ case PPTP_OUT_CALL_REQUEST:
+ if (reqlen < sizeof(_pptpReq.ocreq)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ /* FIXME: break; */
+ }
+
+ /* client initiating connection to server */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("%s but no session\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ info->cstate = PPTP_CALL_OUT_REQ;
+ /* track PNS call id */
+ cid = &pptpReq->ocreq.callID;
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*cid));
+ info->pns_call_id = ntohs(*cid);
+ break;
+ case PPTP_IN_CALL_REPLY:
+ if (reqlen < sizeof(_pptpReq.icack)) {
+ DEBUGP("%s: short packet\n", pptp_msg_name[msg]);
+ break;
+ }
+
+ /* client answers incoming call */
+ if (info->cstate != PPTP_CALL_IN_REQ
+ && info->cstate != PPTP_CALL_IN_REP) {
+ DEBUGP("%s without incall_req\n",
+ pptp_msg_name[msg]);
+ break;
+ }
+ if (pptpReq->icack.resultCode != PPTP_INCALL_ACCEPT) {
+ info->cstate = PPTP_CALL_NONE;
+ break;
+ }
+ pcid = &pptpReq->icack.peersCallID;
+ if (info->pac_call_id != ntohs(*pcid)) {
+ DEBUGP("%s for unknown call %u\n",
+ pptp_msg_name[msg], ntohs(*pcid));
+ break;
+ }
+ DEBUGP("%s, CID=%X\n", pptp_msg_name[msg], ntohs(*pcid));
+ /* part two of the three-way handshake */
+ info->cstate = PPTP_CALL_IN_REP;
+ info->pns_call_id = ntohs(pptpReq->icack.callID);
+ break;
+
+ case PPTP_CALL_CLEAR_REQUEST:
+ /* client requests hangup of call */
+ if (info->sstate != PPTP_SESSION_CONFIRMED) {
+ DEBUGP("CLEAR_CALL but no session\n");
+ break;
+ }
+ /* FUTURE: iterate over all calls and check if
+ * call ID is valid. We don't do this without newnat,
+ * because we only know about last call */
+ info->cstate = PPTP_CALL_CLEAR_REQ;
+ break;
+ case PPTP_SET_LINK_INFO:
+ break;
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* I don't have to explain these ;) */
+ break;
+ default:
+ DEBUGP("invalid %s (TY=%d)\n", (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0], msg);
+ /* unknown: no need to create GRE masq table entry */
+ break;
+ }
+
+ if (ip_nat_pptp_hook_outbound)
+ return ip_nat_pptp_hook_outbound(pskb, ct, ctinfo, ctlh,
+ pptpReq);
+
+ return NF_ACCEPT;
+}
+
+
+/* track caller id inside control connection, call expect_related */
+static int
+conntrack_pptp_help(struct sk_buff **pskb,
+ struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
+
+{
+ struct pptp_pkt_hdr _pptph, *pptph;
+ struct tcphdr _tcph, *tcph;
+ u_int32_t tcplen = (*pskb)->len - (*pskb)->nh.iph->ihl * 4;
+ u_int32_t datalen;
+ int dir = CTINFO2DIR(ctinfo);
+ struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info;
+ unsigned int nexthdr_off;
+
+ int oldsstate, oldcstate;
+ int ret;
+
+ /* don't do any tracking before tcp handshake complete */
+ if (ctinfo != IP_CT_ESTABLISHED
+ && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+ DEBUGP("ctinfo = %u, skipping\n", ctinfo);
+ return NF_ACCEPT;
+ }
+
+ nexthdr_off = (*pskb)->nh.iph->ihl*4;
+ tcph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_tcph), &_tcph);
+ BUG_ON(!tcph);
+ nexthdr_off += tcph->doff * 4;
+ datalen = tcplen - tcph->doff * 4;
+
+ if (tcph->fin || tcph->rst) {
+ DEBUGP("RST/FIN received, timeouting GRE\n");
+ /* can't do this after real newnat */
+ info->cstate = PPTP_CALL_NONE;
+
+ /* untrack this call id, unexpect GRE packets */
+ pptp_destroy_siblings(ct);
+ }
+
+ pptph = skb_header_pointer(*pskb, nexthdr_off, sizeof(_pptph), &_pptph);
+ if (!pptph) {
+ DEBUGP("no full PPTP header, can't track\n");
+ return NF_ACCEPT;
+ }
+ nexthdr_off += sizeof(_pptph);
+ datalen -= sizeof(_pptph);
+
+ /* if it's not a control message we can't do anything with it */
+ if (ntohs(pptph->packetType) != PPTP_PACKET_CONTROL ||
+ ntohl(pptph->magicCookie) != PPTP_MAGIC_COOKIE) {
+ DEBUGP("not a control packet\n");
+ return NF_ACCEPT;
+ }
+
+ oldsstate = info->sstate;
+ oldcstate = info->cstate;
+
+ spin_lock_bh(&ip_pptp_lock);
+
+ /* FIXME: We just blindly assume that the control connection is always
+ * established from PNS->PAC. However, RFC makes no guarantee */
+ if (dir == IP_CT_DIR_ORIGINAL)
+ /* client -> server (PNS -> PAC) */
+ ret = pptp_outbound_pkt(pskb, tcph, nexthdr_off, datalen, ct,
+ ctinfo);
+ else
+ /* server -> client (PAC -> PNS) */
+ ret = pptp_inbound_pkt(pskb, tcph, nexthdr_off, datalen, ct,
+ ctinfo);
+ DEBUGP("sstate: %d->%d, cstate: %d->%d\n",
+ oldsstate, info->sstate, oldcstate, info->cstate);
+ spin_unlock_bh(&ip_pptp_lock);
+
+ return ret;
+}
+
+/* control protocol helper */
+static struct ip_conntrack_helper pptp = {
+ .list = { NULL, NULL },
+ .name = "pptp",
+ .me = THIS_MODULE,
+ .max_expected = 2,
+ .timeout = 5 * 60,
+ .tuple = { .src = { .ip = 0,
+ .u = { .tcp = { .port =
+ __constant_htons(PPTP_CONTROL_PORT) } }
+ },
+ .dst = { .ip = 0,
+ .u = { .all = 0 },
+ .protonum = IPPROTO_TCP
+ }
+ },
+ .mask = { .src = { .ip = 0,
+ .u = { .tcp = { .port = __constant_htons(0xffff) } }
+ },
+ .dst = { .ip = 0,
+ .u = { .all = 0 },
+ .protonum = 0xff
+ }
+ },
+ .help = conntrack_pptp_help
+};
+
+extern void __exit ip_ct_proto_gre_fini(void);
+extern int __init ip_ct_proto_gre_init(void);
+
+/* ip_conntrack_pptp initialization */
+static int __init init(void)
+{
+ int retcode;
+
+ retcode = ip_ct_proto_gre_init();
+ if (retcode < 0)
+ return retcode;
+
+ DEBUGP(" registering helper\n");
+ if ((retcode = ip_conntrack_helper_register(&pptp))) {
+ printk(KERN_ERR "Unable to register conntrack application "
+ "helper for pptp: %d\n", retcode);
+ ip_ct_proto_gre_fini();
+ return retcode;
+ }
+
+ printk("ip_conntrack_pptp version %s loaded\n", IP_CT_PPTP_VERSION);
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ ip_conntrack_helper_unregister(&pptp);
+ ip_ct_proto_gre_fini();
+ printk("ip_conntrack_pptp version %s unloaded\n", IP_CT_PPTP_VERSION);
+}
+
+module_init(init);
+module_exit(fini);
+
+EXPORT_SYMBOL(ip_nat_pptp_hook_outbound);
+EXPORT_SYMBOL(ip_nat_pptp_hook_inbound);
+EXPORT_SYMBOL(ip_nat_pptp_hook_exp_gre);
+EXPORT_SYMBOL(ip_nat_pptp_hook_expectfn);
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
index 71ef19d126d..186646eb249 100644
--- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -58,7 +58,7 @@ static int help(struct sk_buff **pskb,
goto out;
rcu_read_lock();
- in_dev = __in_dev_get(rt->u.dst.dev);
+ in_dev = __in_dev_get_rcu(rt->u.dst.dev);
if (in_dev != NULL) {
for_primary_ifa(in_dev) {
if (ifa->ifa_broadcast == iph->daddr) {
@@ -91,7 +91,7 @@ static int help(struct sk_buff **pskb,
ip_conntrack_expect_related(exp);
ip_conntrack_expect_put(exp);
- ip_ct_refresh_acct(ct, ctinfo, NULL, timeout * HZ);
+ ip_ct_refresh(ct, *pskb, timeout * HZ);
out:
return NF_ACCEPT;
}
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 15aef356474..166e6069f12 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -177,11 +177,11 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
struct nfattr *nest_count = NFA_NEST(skb, type);
u_int64_t tmp;
- tmp = cpu_to_be64(ct->counters[dir].packets);
- NFA_PUT(skb, CTA_COUNTERS_PACKETS, sizeof(u_int64_t), &tmp);
+ tmp = htonl(ct->counters[dir].packets);
+ NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp);
- tmp = cpu_to_be64(ct->counters[dir].bytes);
- NFA_PUT(skb, CTA_COUNTERS_BYTES, sizeof(u_int64_t), &tmp);
+ tmp = htonl(ct->counters[dir].bytes);
+ NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp);
NFA_NEST_END(skb, nest_count);
@@ -833,7 +833,8 @@ out:
static inline int
ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[])
{
- unsigned long d, status = *(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]);
+ unsigned long d;
+ unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]));
d = ct->status ^ status;
if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
@@ -948,6 +949,31 @@ ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[])
return 0;
}
+static inline int
+ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
+{
+ struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1];
+ struct ip_conntrack_protocol *proto;
+ u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
+ int err = 0;
+
+ if (nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr) < 0)
+ goto nfattr_failure;
+
+ proto = ip_conntrack_proto_find_get(npt);
+ if (!proto)
+ return -EINVAL;
+
+ if (proto->from_nfattr)
+ err = proto->from_nfattr(tb, ct);
+ ip_conntrack_proto_put(proto);
+
+ return err;
+
+nfattr_failure:
+ return -ENOMEM;
+}
+
static int
ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
{
@@ -973,6 +999,12 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
return err;
}
+ if (cda[CTA_PROTOINFO-1]) {
+ err = ctnetlink_change_protoinfo(ct, cda);
+ if (err < 0)
+ return err;
+ }
+
DEBUGP("all done\n");
return 0;
}
@@ -1002,6 +1034,12 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
if (err < 0)
goto err;
+ if (cda[CTA_PROTOINFO-1]) {
+ err = ctnetlink_change_protoinfo(ct, cda);
+ if (err < 0)
+ return err;
+ }
+
ct->helper = ip_conntrack_helper_find_get(rtuple);
add_timer(&ct->timeout);
@@ -1270,7 +1308,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- exp = ip_conntrack_expect_find_get(&tuple);
+ exp = ip_conntrack_expect_find(&tuple);
if (!exp)
return -ENOENT;
@@ -1318,7 +1356,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
return err;
/* bump usage count to 2 */
- exp = ip_conntrack_expect_find_get(&tuple);
+ exp = ip_conntrack_expect_find(&tuple);
if (!exp)
return -ENOENT;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
new file mode 100644
index 00000000000..744abb9d377
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c
@@ -0,0 +1,328 @@
+/*
+ * ip_conntrack_proto_gre.c - Version 3.0
+ *
+ * Connection tracking protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/list.h>
+
+static DEFINE_RWLOCK(ip_ct_gre_lock);
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <linux/netfilter_ipv4/listhelp.h>
+#include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("netfilter connection tracking protocol helper for GRE");
+
+/* shamelessly stolen from ip_conntrack_proto_udp.c */
+#define GRE_TIMEOUT (30*HZ)
+#define GRE_STREAM_TIMEOUT (180*HZ)
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, __FUNCTION__, ## args)
+#define DUMP_TUPLE_GRE(x) printk("%u.%u.%u.%u:0x%x -> %u.%u.%u.%u:0x%x\n", \
+ NIPQUAD((x)->src.ip), ntohs((x)->src.u.gre.key), \
+ NIPQUAD((x)->dst.ip), ntohs((x)->dst.u.gre.key))
+#else
+#define DEBUGP(x, args...)
+#define DUMP_TUPLE_GRE(x)
+#endif
+
+/* GRE KEYMAP HANDLING FUNCTIONS */
+static LIST_HEAD(gre_keymap_list);
+
+static inline int gre_key_cmpfn(const struct ip_ct_gre_keymap *km,
+ const struct ip_conntrack_tuple *t)
+{
+ return ((km->tuple.src.ip == t->src.ip) &&
+ (km->tuple.dst.ip == t->dst.ip) &&
+ (km->tuple.dst.protonum == t->dst.protonum) &&
+ (km->tuple.dst.u.all == t->dst.u.all));
+}
+
+/* look up the source key for a given tuple */
+static u_int32_t gre_keymap_lookup(struct ip_conntrack_tuple *t)
+{
+ struct ip_ct_gre_keymap *km;
+ u_int32_t key = 0;
+
+ read_lock_bh(&ip_ct_gre_lock);
+ km = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
+ struct ip_ct_gre_keymap *, t);
+ if (km)
+ key = km->tuple.src.u.gre.key;
+ read_unlock_bh(&ip_ct_gre_lock);
+
+ DEBUGP("lookup src key 0x%x up key for ", key);
+ DUMP_TUPLE_GRE(t);
+
+ return key;
+}
+
+/* add a single keymap entry, associate with specified master ct */
+int
+ip_ct_gre_keymap_add(struct ip_conntrack *ct,
+ struct ip_conntrack_tuple *t, int reply)
+{
+ struct ip_ct_gre_keymap **exist_km, *km, *old;
+
+ if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
+ DEBUGP("refusing to add GRE keymap to non-pptp session\n");
+ return -1;
+ }
+
+ if (!reply)
+ exist_km = &ct->help.ct_pptp_info.keymap_orig;
+ else
+ exist_km = &ct->help.ct_pptp_info.keymap_reply;
+
+ if (*exist_km) {
+ /* check whether it's a retransmission */
+ old = LIST_FIND(&gre_keymap_list, gre_key_cmpfn,
+ struct ip_ct_gre_keymap *, t);
+ if (old == *exist_km) {
+ DEBUGP("retransmission\n");
+ return 0;
+ }
+
+ DEBUGP("trying to override keymap_%s for ct %p\n",
+ reply? "reply":"orig", ct);
+ return -EEXIST;
+ }
+
+ km = kmalloc(sizeof(*km), GFP_ATOMIC);
+ if (!km)
+ return -ENOMEM;
+
+ memcpy(&km->tuple, t, sizeof(*t));
+ *exist_km = km;
+
+ DEBUGP("adding new entry %p: ", km);
+ DUMP_TUPLE_GRE(&km->tuple);
+
+ write_lock_bh(&ip_ct_gre_lock);
+ list_append(&gre_keymap_list, km);
+ write_unlock_bh(&ip_ct_gre_lock);
+
+ return 0;
+}
+
+/* destroy the keymap entries associated with specified master ct */
+void ip_ct_gre_keymap_destroy(struct ip_conntrack *ct)
+{
+ DEBUGP("entering for ct %p\n", ct);
+
+ if (!ct->helper || strcmp(ct->helper->name, "pptp")) {
+ DEBUGP("refusing to destroy GRE keymap to non-pptp session\n");
+ return;
+ }
+
+ write_lock_bh(&ip_ct_gre_lock);
+ if (ct->help.ct_pptp_info.keymap_orig) {
+ DEBUGP("removing %p from list\n",
+ ct->help.ct_pptp_info.keymap_orig);
+ list_del(&ct->help.ct_pptp_info.keymap_orig->list);
+ kfree(ct->help.ct_pptp_info.keymap_orig);
+ ct->help.ct_pptp_info.keymap_orig = NULL;
+ }
+ if (ct->help.ct_pptp_info.keymap_reply) {
+ DEBUGP("removing %p from list\n",
+ ct->help.ct_pptp_info.keymap_reply);
+ list_del(&ct->help.ct_pptp_info.keymap_reply->list);
+ kfree(ct->help.ct_pptp_info.keymap_reply);
+ ct->help.ct_pptp_info.keymap_reply = NULL;
+ }
+ write_unlock_bh(&ip_ct_gre_lock);
+}
+
+
+/* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
+
+/* invert gre part of tuple */
+static int gre_invert_tuple(struct ip_conntrack_tuple *tuple,
+ const struct ip_conntrack_tuple *orig)
+{
+ tuple->dst.u.gre.key = orig->src.u.gre.key;
+ tuple->src.u.gre.key = orig->dst.u.gre.key;
+
+ return 1;
+}
+
+/* gre hdr info to tuple */
+static int gre_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct ip_conntrack_tuple *tuple)
+{
+ struct gre_hdr_pptp _pgrehdr, *pgrehdr;
+ u_int32_t srckey;
+ struct gre_hdr _grehdr, *grehdr;
+
+ /* first only delinearize old RFC1701 GRE header */
+ grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
+ if (!grehdr || grehdr->version != GRE_VERSION_PPTP) {
+ /* try to behave like "ip_conntrack_proto_generic" */
+ tuple->src.u.all = 0;
+ tuple->dst.u.all = 0;
+ return 1;
+ }
+
+ /* PPTP header is variable length, only need up to the call_id field */
+ pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
+ if (!pgrehdr)
+ return 1;
+
+ if (ntohs(grehdr->protocol) != GRE_PROTOCOL_PPTP) {
+ DEBUGP("GRE_VERSION_PPTP but unknown proto\n");
+ return 0;
+ }
+
+ tuple->dst.u.gre.key = pgrehdr->call_id;
+ srckey = gre_keymap_lookup(tuple);
+ tuple->src.u.gre.key = srckey;
+
+ return 1;
+}
+
+/* print gre part of tuple */
+static int gre_print_tuple(struct seq_file *s,
+ const struct ip_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "srckey=0x%x dstkey=0x%x ",
+ ntohs(tuple->src.u.gre.key),
+ ntohs(tuple->dst.u.gre.key));
+}
+
+/* print private data for conntrack */
+static int gre_print_conntrack(struct seq_file *s,
+ const struct ip_conntrack *ct)
+{
+ return seq_printf(s, "timeout=%u, stream_timeout=%u ",
+ (ct->proto.gre.timeout / HZ),
+ (ct->proto.gre.stream_timeout / HZ));
+}
+
+/* Returns verdict for packet, and may modify conntrack */
+static int gre_packet(struct ip_conntrack *ct,
+ const struct sk_buff *skb,
+ enum ip_conntrack_info conntrackinfo)
+{
+ /* If we've seen traffic both ways, this is a GRE connection.
+ * Extend timeout. */
+ if (ct->status & IPS_SEEN_REPLY) {
+ ip_ct_refresh_acct(ct, conntrackinfo, skb,
+ ct->proto.gre.stream_timeout);
+ /* Also, more likely to be important, and not a probe. */
+ set_bit(IPS_ASSURED_BIT, &ct->status);
+ ip_conntrack_event_cache(IPCT_STATUS, skb);
+ } else
+ ip_ct_refresh_acct(ct, conntrackinfo, skb,
+ ct->proto.gre.timeout);
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int gre_new(struct ip_conntrack *ct,
+ const struct sk_buff *skb)
+{
+ DEBUGP(": ");
+ DUMP_TUPLE_GRE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+
+ /* initialize to sane value. Ideally a conntrack helper
+ * (e.g. in case of pptp) is increasing them */
+ ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
+ ct->proto.gre.timeout = GRE_TIMEOUT;
+
+ return 1;
+}
+
+/* Called when a conntrack entry has already been removed from the hashes
+ * and is about to be deleted from memory */
+static void gre_destroy(struct ip_conntrack *ct)
+{
+ struct ip_conntrack *master = ct->master;
+ DEBUGP(" entering\n");
+
+ if (!master)
+ DEBUGP("no master !?!\n");
+ else
+ ip_ct_gre_keymap_destroy(master);
+}
+
+/* protocol helper struct */
+static struct ip_conntrack_protocol gre = {
+ .proto = IPPROTO_GRE,
+ .name = "gre",
+ .pkt_to_tuple = gre_pkt_to_tuple,
+ .invert_tuple = gre_invert_tuple,
+ .print_tuple = gre_print_tuple,
+ .print_conntrack = gre_print_conntrack,
+ .packet = gre_packet,
+ .new = gre_new,
+ .destroy = gre_destroy,
+ .me = THIS_MODULE,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+ .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
+ .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
+#endif
+};
+
+/* ip_conntrack_proto_gre initialization */
+int __init ip_ct_proto_gre_init(void)
+{
+ return ip_conntrack_protocol_register(&gre);
+}
+
+void __exit ip_ct_proto_gre_fini(void)
+{
+ struct list_head *pos, *n;
+
+ /* delete all keymap entries */
+ write_lock_bh(&ip_ct_gre_lock);
+ list_for_each_safe(pos, n, &gre_keymap_list) {
+ DEBUGP("deleting keymap %p at module unload time\n", pos);
+ list_del(pos);
+ kfree(pos);
+ }
+ write_unlock_bh(&ip_ct_gre_lock);
+
+ ip_conntrack_protocol_unregister(&gre);
+}
+
+EXPORT_SYMBOL(ip_ct_gre_keymap_add);
+EXPORT_SYMBOL(ip_ct_gre_keymap_destroy);
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 838d1d69b36..98f0015dd25 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -296,8 +296,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
struct ip_conntrack_tuple *tuple)
{
if (!tb[CTA_PROTO_ICMP_TYPE-1]
- || !tb[CTA_PROTO_ICMP_CODE-1]
- || !tb[CTA_PROTO_ICMP_ID-1])
+ || !tb[CTA_PROTO_ICMP_CODE-1])
return -1;
tuple->dst.u.icmp.type =
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
index a875f35e576..59a4a0111dd 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c
@@ -416,6 +416,7 @@ static int sctp_packet(struct ip_conntrack *conntrack,
&& newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
DEBUGP("Setting assured bit\n");
set_bit(IPS_ASSURED_BIT, &conntrack->status);
+ ip_conntrack_event_cache(IPCT_STATUS, skb);
}
return NF_ACCEPT;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 1985abc59d2..d6701cafbcc 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -341,17 +341,43 @@ static int tcp_print_conntrack(struct seq_file *s,
static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
const struct ip_conntrack *ct)
{
+ struct nfattr *nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
+
read_lock_bh(&tcp_lock);
NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
&ct->proto.tcp.state);
read_unlock_bh(&tcp_lock);
+ NFA_NEST_END(skb, nest_parms);
+
return 0;
nfattr_failure:
read_unlock_bh(&tcp_lock);
return -1;
}
+
+static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
+{
+ struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1];
+ struct nfattr *tb[CTA_PROTOINFO_TCP_MAX];
+
+ if (nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr) < 0)
+ goto nfattr_failure;
+
+ if (!tb[CTA_PROTOINFO_TCP_STATE-1])
+ return -EINVAL;
+
+ write_lock_bh(&tcp_lock);
+ ct->proto.tcp.state =
+ *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]);
+ write_unlock_bh(&tcp_lock);
+
+ return 0;
+
+nfattr_failure:
+ return -1;
+}
#endif
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
@@ -1014,7 +1040,8 @@ static int tcp_packet(struct ip_conntrack *conntrack,
/* Set ASSURED if we see see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
- set_bit(IPS_ASSURED_BIT, &conntrack->status);
+ set_bit(IPS_ASSURED_BIT, &conntrack->status);
+ ip_conntrack_event_cache(IPCT_STATUS, skb);
}
ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout);
@@ -1122,6 +1149,7 @@ struct ip_conntrack_protocol ip_conntrack_protocol_tcp =
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
.to_nfattr = tcp_to_nfattr,
+ .from_nfattr = nfattr_to_tcp,
.tuple_to_nfattr = ip_ct_port_tuple_to_nfattr,
.nfattr_to_tuple = ip_ct_port_nfattr_to_tuple,
#endif
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index ae3e3e655db..dd476b191f4 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -989,15 +989,15 @@ EXPORT_SYMBOL(need_ip_conntrack);
EXPORT_SYMBOL(ip_conntrack_helper_register);
EXPORT_SYMBOL(ip_conntrack_helper_unregister);
EXPORT_SYMBOL(ip_ct_iterate_cleanup);
-EXPORT_SYMBOL(ip_ct_refresh_acct);
+EXPORT_SYMBOL(__ip_ct_refresh_acct);
EXPORT_SYMBOL(ip_conntrack_expect_alloc);
EXPORT_SYMBOL(ip_conntrack_expect_put);
-EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
+EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
+EXPORT_SYMBOL_GPL(ip_conntrack_expect_find);
EXPORT_SYMBOL(ip_conntrack_expect_related);
EXPORT_SYMBOL(ip_conntrack_unexpect_related);
EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
-EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
EXPORT_SYMBOL_GPL(ip_ct_unlink_expect);
EXPORT_SYMBOL(ip_conntrack_tuple_taken);
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 1adedb743f6..c5e3abd2467 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -74,12 +74,14 @@ ip_nat_proto_find_get(u_int8_t protonum)
return p;
}
+EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
void
ip_nat_proto_put(struct ip_nat_protocol *p)
{
module_put(p->me);
}
+EXPORT_SYMBOL_GPL(ip_nat_proto_put);
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
@@ -111,6 +113,7 @@ ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck)
return csum_fold(csum_partial((char *)diffs, sizeof(diffs),
oldcheck^0xFFFF));
}
+EXPORT_SYMBOL(ip_nat_cheat_check);
/* Is this tuple already taken? (not by us) */
int
@@ -127,6 +130,7 @@ ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
invert_tuplepr(&reply, tuple);
return ip_conntrack_tuple_taken(&reply, ignored_conntrack);
}
+EXPORT_SYMBOL(ip_nat_used_tuple);
/* If we source map this tuple so reply looks like reply_tuple, will
* that meet the constraints of range. */
@@ -347,6 +351,7 @@ ip_nat_setup_info(struct ip_conntrack *conntrack,
return NF_ACCEPT;
}
+EXPORT_SYMBOL(ip_nat_setup_info);
/* Returns true if succeeded. */
static int
@@ -387,10 +392,10 @@ manip_pkt(u_int16_t proto,
}
/* Do packet manipulations according to ip_nat_setup_info. */
-unsigned int nat_packet(struct ip_conntrack *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int hooknum,
- struct sk_buff **pskb)
+unsigned int ip_nat_packet(struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int hooknum,
+ struct sk_buff **pskb)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned long statusbit;
@@ -417,12 +422,13 @@ unsigned int nat_packet(struct ip_conntrack *ct,
}
return NF_ACCEPT;
}
+EXPORT_SYMBOL_GPL(ip_nat_packet);
/* Dir is direction ICMP is coming from (opposite to packet it contains) */
-int icmp_reply_translation(struct sk_buff **pskb,
- struct ip_conntrack *ct,
- enum ip_nat_manip_type manip,
- enum ip_conntrack_dir dir)
+int ip_nat_icmp_reply_translation(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_nat_manip_type manip,
+ enum ip_conntrack_dir dir)
{
struct {
struct icmphdr icmp;
@@ -509,6 +515,7 @@ int icmp_reply_translation(struct sk_buff **pskb,
return 1;
}
+EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation);
/* Protocol registration. */
int ip_nat_protocol_register(struct ip_nat_protocol *proto)
@@ -525,6 +532,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto)
write_unlock_bh(&ip_nat_lock);
return ret;
}
+EXPORT_SYMBOL(ip_nat_protocol_register);
/* Noone stores the protocol anywhere; simply delete it. */
void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
@@ -536,6 +544,7 @@ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto)
/* Someone could be still looking at the proto in a bh. */
synchronize_net();
}
+EXPORT_SYMBOL(ip_nat_protocol_unregister);
#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
@@ -578,9 +587,11 @@ ip_nat_port_nfattr_to_range(struct nfattr *tb[], struct ip_nat_range *range)
return ret;
}
+EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range);
+EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr);
#endif
-int __init ip_nat_init(void)
+static int __init ip_nat_init(void)
{
size_t i;
@@ -622,10 +633,14 @@ static int clean_nat(struct ip_conntrack *i, void *data)
return 0;
}
-/* Not __exit: called from ip_nat_standalone.c:init_or_cleanup() --RR */
-void ip_nat_cleanup(void)
+static void __exit ip_nat_cleanup(void)
{
ip_ct_iterate_cleanup(&clean_nat, NULL);
ip_conntrack_destroyed = NULL;
vfree(bysource);
}
+
+MODULE_LICENSE("GPL");
+
+module_init(ip_nat_init);
+module_exit(ip_nat_cleanup);
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index d2dd5d31355..5d506e0564d 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -199,6 +199,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
}
return 1;
}
+EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
/* Generic function for mangling variable-length address changes inside
* NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
@@ -256,6 +257,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb,
return 1;
}
+EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
/* Adjust one found SACK option including checksum correction */
static void
@@ -399,6 +401,7 @@ ip_nat_seq_adjust(struct sk_buff **pskb,
return 1;
}
+EXPORT_SYMBOL(ip_nat_seq_adjust);
/* Setup NAT on this expected conntrack so it follows master. */
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
@@ -425,3 +428,4 @@ void ip_nat_follow_master(struct ip_conntrack *ct,
/* hook doesn't matter, but it has to do destination manip */
ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
}
+EXPORT_SYMBOL(ip_nat_follow_master);
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
new file mode 100644
index 00000000000..3cdd0684d30
--- /dev/null
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -0,0 +1,401 @@
+/*
+ * ip_nat_pptp.c - Version 3.0
+ *
+ * NAT support for PPTP (Point to Point Tunneling Protocol).
+ * PPTP is a a protocol for creating virtual private networks.
+ * It is a specification defined by Microsoft and some vendors
+ * working with Microsoft. PPTP is built on top of a modified
+ * version of the Internet Generic Routing Encapsulation Protocol.
+ * GRE is defined in RFC 1701 and RFC 1702. Documentation of
+ * PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ * TODO: - NAT to a unique tuple, not to TCP source port
+ * (needs netfilter tuple reservation)
+ *
+ * Changes:
+ * 2002-02-10 - Version 1.3
+ * - Use ip_nat_mangle_tcp_packet() because of cloned skb's
+ * in local connections (Philip Craig <philipc@snapgear.com>)
+ * - add checks for magicCookie and pptp version
+ * - make argument list of pptp_{out,in}bound_packet() shorter
+ * - move to C99 style initializers
+ * - print version number at module loadtime
+ * 2003-09-22 - Version 1.5
+ * - use SNATed tcp sourceport as callid, since we get called before
+ * TCP header is mangled (Philip Craig <philipc@snapgear.com>)
+ * 2004-10-22 - Version 2.0
+ * - kernel 2.6.x version
+ * 2005-06-10 - Version 3.0
+ * - kernel >= 2.6.11 version,
+ * funded by Oxcoda NetBox Blue (http://www.netboxblue.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_rule.h>
+#include <linux/netfilter_ipv4/ip_nat_helper.h>
+#include <linux/netfilter_ipv4/ip_nat_pptp.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+#include <linux/netfilter_ipv4/ip_conntrack_pptp.h>
+
+#define IP_NAT_PPTP_VERSION "3.0"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT helper module for PPTP");
+
+
+#if 0
+extern const char *pptp_msg_name[];
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+ __FUNCTION__, ## args)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static void pptp_nat_expected(struct ip_conntrack *ct,
+ struct ip_conntrack_expect *exp)
+{
+ struct ip_conntrack *master = ct->master;
+ struct ip_conntrack_expect *other_exp;
+ struct ip_conntrack_tuple t;
+ struct ip_ct_pptp_master *ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info;
+
+ ct_pptp_info = &master->help.ct_pptp_info;
+ nat_pptp_info = &master->nat.help.nat_pptp_info;
+
+ /* And here goes the grand finale of corrosion... */
+
+ if (exp->dir == IP_CT_DIR_ORIGINAL) {
+ DEBUGP("we are PNS->PAC\n");
+ /* therefore, build tuple for PAC->PNS */
+ t.src.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip;
+ t.src.u.gre.key = htons(master->help.ct_pptp_info.pac_call_id);
+ t.dst.ip = master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip;
+ t.dst.u.gre.key = htons(master->help.ct_pptp_info.pns_call_id);
+ t.dst.protonum = IPPROTO_GRE;
+ } else {
+ DEBUGP("we are PAC->PNS\n");
+ /* build tuple for PNS->PAC */
+ t.src.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
+ t.src.u.gre.key =
+ htons(master->nat.help.nat_pptp_info.pns_call_id);
+ t.dst.ip = master->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip;
+ t.dst.u.gre.key =
+ htons(master->nat.help.nat_pptp_info.pac_call_id);
+ t.dst.protonum = IPPROTO_GRE;
+ }
+
+ DEBUGP("trying to unexpect other dir: ");
+ DUMP_TUPLE(&t);
+ other_exp = ip_conntrack_expect_find(&t);
+ if (other_exp) {
+ ip_conntrack_unexpect_related(other_exp);
+ ip_conntrack_expect_put(other_exp);
+ DEBUGP("success\n");
+ } else {
+ DEBUGP("not found!\n");
+ }
+
+ ip_nat_follow_master(ct, exp);
+}
+
+/* outbound packets == from PNS to PAC */
+static int
+pptp_outbound_pkt(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq)
+
+{
+ struct ip_ct_pptp_master *ct_pptp_info = &ct->help.ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
+
+ u_int16_t msg, *cid = NULL, new_callid;
+
+ new_callid = htons(ct_pptp_info->pns_call_id);
+
+ switch (msg = ntohs(ctlh->messageType)) {
+ case PPTP_OUT_CALL_REQUEST:
+ cid = &pptpReq->ocreq.callID;
+ /* FIXME: ideally we would want to reserve a call ID
+ * here. current netfilter NAT core is not able to do
+ * this :( For now we use TCP source port. This breaks
+ * multiple calls within one control session */
+
+ /* save original call ID in nat_info */
+ nat_pptp_info->pns_call_id = ct_pptp_info->pns_call_id;
+
+ /* don't use tcph->source since we are at a DSTmanip
+ * hook (e.g. PREROUTING) and pkt is not mangled yet */
+ new_callid = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port;
+
+ /* save new call ID in ct info */
+ ct_pptp_info->pns_call_id = ntohs(new_callid);
+ break;
+ case PPTP_IN_CALL_REPLY:
+ cid = &pptpReq->icreq.callID;
+ break;
+ case PPTP_CALL_CLEAR_REQUEST:
+ cid = &pptpReq->clrreq.callID;
+ break;
+ default:
+ DEBUGP("unknown outbound packet 0x%04x:%s\n", msg,
+ (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0]);
+ /* fall through */
+
+ case PPTP_SET_LINK_INFO:
+ /* only need to NAT in case PAC is behind NAT box */
+ case PPTP_START_SESSION_REQUEST:
+ case PPTP_START_SESSION_REPLY:
+ case PPTP_STOP_SESSION_REQUEST:
+ case PPTP_STOP_SESSION_REPLY:
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* no need to alter packet */
+ return NF_ACCEPT;
+ }
+
+ /* only OUT_CALL_REQUEST, IN_CALL_REPLY, CALL_CLEAR_REQUEST pass
+ * down to here */
+
+ IP_NF_ASSERT(cid);
+
+ DEBUGP("altering call id from 0x%04x to 0x%04x\n",
+ ntohs(*cid), ntohs(new_callid));
+
+ /* mangle packet */
+ if (ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_callid),
+ (char *)&new_callid,
+ sizeof(new_callid)) == 0)
+ return NF_DROP;
+
+ return NF_ACCEPT;
+}
+
+static int
+pptp_exp_gre(struct ip_conntrack_expect *expect_orig,
+ struct ip_conntrack_expect *expect_reply)
+{
+ struct ip_ct_pptp_master *ct_pptp_info =
+ &expect_orig->master->help.ct_pptp_info;
+ struct ip_nat_pptp *nat_pptp_info =
+ &expect_orig->master->nat.help.nat_pptp_info;
+
+ struct ip_conntrack *ct = expect_orig->master;
+
+ struct ip_conntrack_tuple inv_t;
+ struct ip_conntrack_tuple *orig_t, *reply_t;
+
+ /* save original PAC call ID in nat_info */
+ nat_pptp_info->pac_call_id = ct_pptp_info->pac_call_id;
+
+ /* alter expectation */
+ orig_t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ reply_t = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ /* alter expectation for PNS->PAC direction */
+ invert_tuplepr(&inv_t, &expect_orig->tuple);
+ expect_orig->saved_proto.gre.key = htons(nat_pptp_info->pac_call_id);
+ expect_orig->tuple.src.u.gre.key = htons(nat_pptp_info->pns_call_id);
+ expect_orig->tuple.dst.u.gre.key = htons(ct_pptp_info->pac_call_id);
+ inv_t.src.ip = reply_t->src.ip;
+ inv_t.dst.ip = reply_t->dst.ip;
+ inv_t.src.u.gre.key = htons(nat_pptp_info->pac_call_id);
+ inv_t.dst.u.gre.key = htons(ct_pptp_info->pns_call_id);
+
+ if (!ip_conntrack_expect_related(expect_orig)) {
+ DEBUGP("successfully registered expect\n");
+ } else {
+ DEBUGP("can't expect_related(expect_orig)\n");
+ return 1;
+ }
+
+ /* alter expectation for PAC->PNS direction */
+ invert_tuplepr(&inv_t, &expect_reply->tuple);
+ expect_reply->saved_proto.gre.key = htons(nat_pptp_info->pns_call_id);
+ expect_reply->tuple.src.u.gre.key = htons(nat_pptp_info->pac_call_id);
+ expect_reply->tuple.dst.u.gre.key = htons(ct_pptp_info->pns_call_id);
+ inv_t.src.ip = orig_t->src.ip;
+ inv_t.dst.ip = orig_t->dst.ip;
+ inv_t.src.u.gre.key = htons(nat_pptp_info->pns_call_id);
+ inv_t.dst.u.gre.key = htons(ct_pptp_info->pac_call_id);
+
+ if (!ip_conntrack_expect_related(expect_reply)) {
+ DEBUGP("successfully registered expect\n");
+ } else {
+ DEBUGP("can't expect_related(expect_reply)\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ return 1;
+ }
+
+ if (ip_ct_gre_keymap_add(ct, &expect_reply->tuple, 0) < 0) {
+ DEBUGP("can't register original keymap\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ ip_conntrack_unexpect_related(expect_reply);
+ return 1;
+ }
+
+ if (ip_ct_gre_keymap_add(ct, &inv_t, 1) < 0) {
+ DEBUGP("can't register reply keymap\n");
+ ip_conntrack_unexpect_related(expect_orig);
+ ip_conntrack_unexpect_related(expect_reply);
+ ip_ct_gre_keymap_destroy(ct);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* inbound packets == from PAC to PNS */
+static int
+pptp_inbound_pkt(struct sk_buff **pskb,
+ struct ip_conntrack *ct,
+ enum ip_conntrack_info ctinfo,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq)
+{
+ struct ip_nat_pptp *nat_pptp_info = &ct->nat.help.nat_pptp_info;
+ u_int16_t msg, new_cid = 0, new_pcid, *pcid = NULL, *cid = NULL;
+
+ int ret = NF_ACCEPT, rv;
+
+ new_pcid = htons(nat_pptp_info->pns_call_id);
+
+ switch (msg = ntohs(ctlh->messageType)) {
+ case PPTP_OUT_CALL_REPLY:
+ pcid = &pptpReq->ocack.peersCallID;
+ cid = &pptpReq->ocack.callID;
+ break;
+ case PPTP_IN_CALL_CONNECT:
+ pcid = &pptpReq->iccon.peersCallID;
+ break;
+ case PPTP_IN_CALL_REQUEST:
+ /* only need to nat in case PAC is behind NAT box */
+ break;
+ case PPTP_WAN_ERROR_NOTIFY:
+ pcid = &pptpReq->wanerr.peersCallID;
+ break;
+ case PPTP_CALL_DISCONNECT_NOTIFY:
+ pcid = &pptpReq->disc.callID;
+ break;
+ case PPTP_SET_LINK_INFO:
+ pcid = &pptpReq->setlink.peersCallID;
+ break;
+
+ default:
+ DEBUGP("unknown inbound packet %s\n", (msg <= PPTP_MSG_MAX)?
+ pptp_msg_name[msg]:pptp_msg_name[0]);
+ /* fall through */
+
+ case PPTP_START_SESSION_REQUEST:
+ case PPTP_START_SESSION_REPLY:
+ case PPTP_STOP_SESSION_REQUEST:
+ case PPTP_STOP_SESSION_REPLY:
+ case PPTP_ECHO_REQUEST:
+ case PPTP_ECHO_REPLY:
+ /* no need to alter packet */
+ return NF_ACCEPT;
+ }
+
+ /* only OUT_CALL_REPLY, IN_CALL_CONNECT, IN_CALL_REQUEST,
+ * WAN_ERROR_NOTIFY, CALL_DISCONNECT_NOTIFY pass down here */
+
+ /* mangle packet */
+ IP_NF_ASSERT(pcid);
+ DEBUGP("altering peer call id from 0x%04x to 0x%04x\n",
+ ntohs(*pcid), ntohs(new_pcid));
+
+ rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)pcid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_pcid), (char *)&new_pcid,
+ sizeof(new_pcid));
+ if (rv != NF_ACCEPT)
+ return rv;
+
+ if (new_cid) {
+ IP_NF_ASSERT(cid);
+ DEBUGP("altering call id from 0x%04x to 0x%04x\n",
+ ntohs(*cid), ntohs(new_cid));
+ rv = ip_nat_mangle_tcp_packet(pskb, ct, ctinfo,
+ (void *)cid - ((void *)ctlh - sizeof(struct pptp_pkt_hdr)),
+ sizeof(new_cid),
+ (char *)&new_cid,
+ sizeof(new_cid));
+ if (rv != NF_ACCEPT)
+ return rv;
+ }
+
+ /* check for earlier return value of 'switch' above */
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ /* great, at least we don't need to resize packets */
+ return NF_ACCEPT;
+}
+
+
+extern int __init ip_nat_proto_gre_init(void);
+extern void __exit ip_nat_proto_gre_fini(void);
+
+static int __init init(void)
+{
+ int ret;
+
+ DEBUGP("%s: registering NAT helper\n", __FILE__);
+
+ ret = ip_nat_proto_gre_init();
+ if (ret < 0)
+ return ret;
+
+ BUG_ON(ip_nat_pptp_hook_outbound);
+ ip_nat_pptp_hook_outbound = &pptp_outbound_pkt;
+
+ BUG_ON(ip_nat_pptp_hook_inbound);
+ ip_nat_pptp_hook_inbound = &pptp_inbound_pkt;
+
+ BUG_ON(ip_nat_pptp_hook_exp_gre);
+ ip_nat_pptp_hook_exp_gre = &pptp_exp_gre;
+
+ BUG_ON(ip_nat_pptp_hook_expectfn);
+ ip_nat_pptp_hook_expectfn = &pptp_nat_expected;
+
+ printk("ip_nat_pptp version %s loaded\n", IP_NAT_PPTP_VERSION);
+ return 0;
+}
+
+static void __exit fini(void)
+{
+ DEBUGP("cleanup_module\n" );
+
+ ip_nat_pptp_hook_expectfn = NULL;
+ ip_nat_pptp_hook_exp_gre = NULL;
+ ip_nat_pptp_hook_inbound = NULL;
+ ip_nat_pptp_hook_outbound = NULL;
+
+ ip_nat_proto_gre_fini();
+ /* Make sure noone calls it, meanwhile */
+ synchronize_net();
+
+ printk("ip_nat_pptp version %s unloaded\n", IP_NAT_PPTP_VERSION);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c
new file mode 100644
index 00000000000..7c128540167
--- /dev/null
+++ b/net/ipv4/netfilter/ip_nat_proto_gre.c
@@ -0,0 +1,214 @@
+/*
+ * ip_nat_proto_gre.c - Version 2.0
+ *
+ * NAT protocol helper module for GRE.
+ *
+ * GRE is a generic encapsulation protocol, which is generally not very
+ * suited for NAT, as it has no protocol-specific part as port numbers.
+ *
+ * It has an optional key field, which may help us distinguishing two
+ * connections between the same two hosts.
+ *
+ * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
+ *
+ * PPTP is built on top of a modified version of GRE, and has a mandatory
+ * field called "CallID", which serves us for the same purpose as the key
+ * field in plain GRE.
+ *
+ * Documentation about PPTP can be found in RFC 2637
+ *
+ * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_rule.h>
+#include <linux/netfilter_ipv4/ip_nat_protocol.h>
+#include <linux/netfilter_ipv4/ip_conntrack_proto_gre.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
+MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
+
+#if 0
+#define DEBUGP(format, args...) printk(KERN_DEBUG "%s:%s: " format, __FILE__, \
+ __FUNCTION__, ## args)
+#else
+#define DEBUGP(x, args...)
+#endif
+
+/* is key in given range between min and max */
+static int
+gre_in_range(const struct ip_conntrack_tuple *tuple,
+ enum ip_nat_manip_type maniptype,
+ const union ip_conntrack_manip_proto *min,
+ const union ip_conntrack_manip_proto *max)
+{
+ u_int32_t key;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+ key = tuple->src.u.gre.key;
+ else
+ key = tuple->dst.u.gre.key;
+
+ return ntohl(key) >= ntohl(min->gre.key)
+ && ntohl(key) <= ntohl(max->gre.key);
+}
+
+/* generate unique tuple ... */
+static int
+gre_unique_tuple(struct ip_conntrack_tuple *tuple,
+ const struct ip_nat_range *range,
+ enum ip_nat_manip_type maniptype,
+ const struct ip_conntrack *conntrack)
+{
+ static u_int16_t key;
+ u_int16_t *keyptr;
+ unsigned int min, i, range_size;
+
+ if (maniptype == IP_NAT_MANIP_SRC)
+ keyptr = &tuple->src.u.gre.key;
+ else
+ keyptr = &tuple->dst.u.gre.key;
+
+ if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
+ DEBUGP("%p: NATing GRE PPTP\n", conntrack);
+ min = 1;
+ range_size = 0xffff;
+ } else {
+ min = ntohl(range->min.gre.key);
+ range_size = ntohl(range->max.gre.key) - min + 1;
+ }
+
+ DEBUGP("min = %u, range_size = %u\n", min, range_size);
+
+ for (i = 0; i < range_size; i++, key++) {
+ *keyptr = htonl(min + key % range_size);
+ if (!ip_nat_used_tuple(tuple, conntrack))
+ return 1;
+ }
+
+ DEBUGP("%p: no NAT mapping\n", conntrack);
+
+ return 0;
+}
+
+/* manipulate a GRE packet according to maniptype */
+static int
+gre_manip_pkt(struct sk_buff **pskb,
+ unsigned int iphdroff,
+ const struct ip_conntrack_tuple *tuple,
+ enum ip_nat_manip_type maniptype)
+{
+ struct gre_hdr *greh;
+ struct gre_hdr_pptp *pgreh;
+ struct iphdr *iph = (struct iphdr *)((*pskb)->data + iphdroff);
+ unsigned int hdroff = iphdroff + iph->ihl*4;
+
+ /* pgreh includes two optional 32bit fields which are not required
+ * to be there. That's where the magic '8' comes from */
+ if (!skb_make_writable(pskb, hdroff + sizeof(*pgreh)-8))
+ return 0;
+
+ greh = (void *)(*pskb)->data + hdroff;
+ pgreh = (struct gre_hdr_pptp *) greh;
+
+ /* we only have destination manip of a packet, since 'source key'
+ * is not present in the packet itself */
+ if (maniptype == IP_NAT_MANIP_DST) {
+ /* key manipulation is always dest */
+ switch (greh->version) {
+ case 0:
+ if (!greh->key) {
+ DEBUGP("can't nat GRE w/o key\n");
+ break;
+ }
+ if (greh->csum) {
+ /* FIXME: Never tested this code... */
+ *(gre_csum(greh)) =
+ ip_nat_cheat_check(~*(gre_key(greh)),
+ tuple->dst.u.gre.key,
+ *(gre_csum(greh)));
+ }
+ *(gre_key(greh)) = tuple->dst.u.gre.key;
+ break;
+ case GRE_VERSION_PPTP:
+ DEBUGP("call_id -> 0x%04x\n",
+ ntohl(tuple->dst.u.gre.key));
+ pgreh->call_id = htons(ntohl(tuple->dst.u.gre.key));
+ break;
+ default:
+ DEBUGP("can't nat unknown GRE version\n");
+ return 0;
+ break;
+ }
+ }
+ return 1;
+}
+
+/* print out a nat tuple */
+static unsigned int
+gre_print(char *buffer,
+ const struct ip_conntrack_tuple *match,
+ const struct ip_conntrack_tuple *mask)
+{
+ unsigned int len = 0;
+
+ if (mask->src.u.gre.key)
+ len += sprintf(buffer + len, "srckey=0x%x ",
+ ntohl(match->src.u.gre.key));
+
+ if (mask->dst.u.gre.key)
+ len += sprintf(buffer + len, "dstkey=0x%x ",
+ ntohl(match->src.u.gre.key));
+
+ return len;
+}
+
+/* print a range of keys */
+static unsigned int
+gre_print_range(char *buffer, const struct ip_nat_range *range)
+{
+ if (range->min.gre.key != 0
+ || range->max.gre.key != 0xFFFF) {
+ if (range->min.gre.key == range->max.gre.key)
+ return sprintf(buffer, "key 0x%x ",
+ ntohl(range->min.gre.key));
+ else
+ return sprintf(buffer, "keys 0x%u-0x%u ",
+ ntohl(range->min.gre.key),
+ ntohl(range->max.gre.key));
+ } else
+ return 0;
+}
+
+/* nat helper struct */
+static struct ip_nat_protocol gre = {
+ .name = "GRE",
+ .protonum = IPPROTO_GRE,
+ .manip_pkt = gre_manip_pkt,
+ .in_range = gre_in_range,
+ .unique_tuple = gre_unique_tuple,
+ .print = gre_print,
+ .print_range = gre_print_range,
+#if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
+ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
+ .range_to_nfattr = ip_nat_port_range_to_nfattr,
+ .nfattr_to_range = ip_nat_port_nfattr_to_range,
+#endif
+};
+
+int __init ip_nat_proto_gre_init(void)
+{
+ return ip_nat_protocol_register(&gre);
+}
+
+void __exit ip_nat_proto_gre_fini(void)
+{
+ ip_nat_protocol_unregister(&gre);
+}
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 0ff368b131f..30cd4e18c12 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -108,8 +108,8 @@ ip_nat_fn(unsigned int hooknum,
case IP_CT_RELATED:
case IP_CT_RELATED+IP_CT_IS_REPLY:
if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) {
- if (!icmp_reply_translation(pskb, ct, maniptype,
- CTINFO2DIR(ctinfo)))
+ if (!ip_nat_icmp_reply_translation(pskb, ct, maniptype,
+ CTINFO2DIR(ctinfo)))
return NF_DROP;
else
return NF_ACCEPT;
@@ -152,7 +152,7 @@ ip_nat_fn(unsigned int hooknum,
}
IP_NF_ASSERT(info);
- return nat_packet(ct, ctinfo, hooknum, pskb);
+ return ip_nat_packet(ct, ctinfo, hooknum, pskb);
}
static unsigned int
@@ -325,15 +325,10 @@ static int init_or_cleanup(int init)
printk("ip_nat_init: can't setup rules.\n");
goto cleanup_nothing;
}
- ret = ip_nat_init();
- if (ret < 0) {
- printk("ip_nat_init: can't setup rules.\n");
- goto cleanup_rule_init;
- }
ret = nf_register_hook(&ip_nat_in_ops);
if (ret < 0) {
printk("ip_nat_init: can't register in hook.\n");
- goto cleanup_nat;
+ goto cleanup_rule_init;
}
ret = nf_register_hook(&ip_nat_out_ops);
if (ret < 0) {
@@ -374,8 +369,6 @@ static int init_or_cleanup(int init)
nf_unregister_hook(&ip_nat_out_ops);
cleanup_inops:
nf_unregister_hook(&ip_nat_in_ops);
- cleanup_nat:
- ip_nat_cleanup();
cleanup_rule_init:
ip_nat_rule_cleanup();
cleanup_nothing:
@@ -395,14 +388,4 @@ static void __exit fini(void)
module_init(init);
module_exit(fini);
-EXPORT_SYMBOL(ip_nat_setup_info);
-EXPORT_SYMBOL(ip_nat_protocol_register);
-EXPORT_SYMBOL(ip_nat_protocol_unregister);
-EXPORT_SYMBOL_GPL(ip_nat_proto_find_get);
-EXPORT_SYMBOL_GPL(ip_nat_proto_put);
-EXPORT_SYMBOL(ip_nat_cheat_check);
-EXPORT_SYMBOL(ip_nat_mangle_tcp_packet);
-EXPORT_SYMBOL(ip_nat_mangle_udp_packet);
-EXPORT_SYMBOL(ip_nat_used_tuple);
-EXPORT_SYMBOL(ip_nat_follow_master);
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d54f14d926f..36339eb39e1 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -240,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
pmsg->packet_id = (unsigned long )entry;
pmsg->data_len = data_len;
- pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec;
- pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec;
+ pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
+ pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
pmsg->mark = entry->skb->nfmark;
pmsg->hook = entry->info->hook;
pmsg->hw_protocol = entry->skb->protocol;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index eef99a1b5de..75c27e92f6a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -27,6 +27,7 @@
#include <asm/semaphore.h>
#include <linux/proc_fs.h>
#include <linux/err.h>
+#include <linux/cpumask.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -921,8 +922,10 @@ translate_table(const char *name,
}
/* And one copy for every other CPU */
- for (i = 1; i < num_possible_cpus(); i++) {
- memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i,
+ for_each_cpu(i) {
+ if (i == 0)
+ continue;
+ memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i,
newinfo->entries,
SMP_ALIGN(newinfo->size));
}
@@ -943,7 +946,7 @@ replace_table(struct ipt_table *table,
struct ipt_entry *table_base;
unsigned int i;
- for (i = 0; i < num_possible_cpus(); i++) {
+ for_each_cpu(i) {
table_base =
(void *)newinfo->entries
+ TABLE_OFFSET(newinfo, i);
@@ -990,7 +993,7 @@ get_counters(const struct ipt_table_info *t,
unsigned int cpu;
unsigned int i;
- for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ for_each_cpu(cpu) {
i = 0;
IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu),
t->size,
@@ -1128,7 +1131,8 @@ do_replace(void __user *user, unsigned int len)
return -ENOMEM;
newinfo = vmalloc(sizeof(struct ipt_table_info)
- + SMP_ALIGN(tmp.size) * num_possible_cpus());
+ + SMP_ALIGN(tmp.size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
@@ -1458,7 +1462,8 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
= { 0, 0, 0, { 0 }, { 0 }, { } };
newinfo = vmalloc(sizeof(struct ipt_table_info)
- + SMP_ALIGN(repl->size) * num_possible_cpus());
+ + SMP_ALIGN(repl->size) *
+ (highest_possible_processor_id()+1));
if (!newinfo)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 7d38913754b..9bcb398fbc1 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -13,6 +13,7 @@
#include <linux/config.h>
#include <linux/proc_fs.h>
#include <linux/jhash.h>
+#include <linux/bitops.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -30,7 +31,7 @@
#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
-#define CLUSTERIP_VERSION "0.7"
+#define CLUSTERIP_VERSION "0.8"
#define DEBUG_CLUSTERIP
@@ -49,13 +50,14 @@ MODULE_DESCRIPTION("iptables target for CLUSTERIP");
struct clusterip_config {
struct list_head list; /* list of all configs */
atomic_t refcount; /* reference count */
+ atomic_t entries; /* number of entries/rules
+ * referencing us */
u_int32_t clusterip; /* the IP address */
u_int8_t clustermac[ETH_ALEN]; /* the MAC address */
struct net_device *dev; /* device */
u_int16_t num_total_nodes; /* total number of nodes */
- u_int16_t num_local_nodes; /* number of local nodes */
- u_int16_t local_nodes[CLUSTERIP_MAX_NODES]; /* node number array */
+ unsigned long local_nodes; /* node number array */
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde; /* proc dir entry */
@@ -66,8 +68,7 @@ struct clusterip_config {
static LIST_HEAD(clusterip_configs);
-/* clusterip_lock protects the clusterip_configs list _AND_ the configurable
- * data within all structurses (num_local_nodes, local_nodes[]) */
+/* clusterip_lock protects the clusterip_configs list */
static DEFINE_RWLOCK(clusterip_lock);
#ifdef CONFIG_PROC_FS
@@ -76,23 +77,48 @@ static struct proc_dir_entry *clusterip_procdir;
#endif
static inline void
-clusterip_config_get(struct clusterip_config *c) {
+clusterip_config_get(struct clusterip_config *c)
+{
atomic_inc(&c->refcount);
}
static inline void
-clusterip_config_put(struct clusterip_config *c) {
- if (atomic_dec_and_test(&c->refcount)) {
+clusterip_config_put(struct clusterip_config *c)
+{
+ if (atomic_dec_and_test(&c->refcount))
+ kfree(c);
+}
+
+/* increase the count of entries(rules) using/referencing this config */
+static inline void
+clusterip_config_entry_get(struct clusterip_config *c)
+{
+ atomic_inc(&c->entries);
+}
+
+/* decrease the count of entries using/referencing this config. If last
+ * entry(rule) is removed, remove the config from lists, but don't free it
+ * yet, since proc-files could still be holding references */
+static inline void
+clusterip_config_entry_put(struct clusterip_config *c)
+{
+ if (atomic_dec_and_test(&c->entries)) {
write_lock_bh(&clusterip_lock);
list_del(&c->list);
write_unlock_bh(&clusterip_lock);
+
dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0);
dev_put(c->dev);
- kfree(c);
+
+ /* In case anyone still accesses the file, the open/close
+ * functions are also incrementing the refcount on their own,
+ * so it's safe to remove the entry even if it's in use. */
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry(c->pde->name, c->pde->parent);
+#endif
}
}
-
static struct clusterip_config *
__clusterip_config_find(u_int32_t clusterip)
{
@@ -111,7 +137,7 @@ __clusterip_config_find(u_int32_t clusterip)
}
static inline struct clusterip_config *
-clusterip_config_find_get(u_int32_t clusterip)
+clusterip_config_find_get(u_int32_t clusterip, int entry)
{
struct clusterip_config *c;
@@ -122,11 +148,24 @@ clusterip_config_find_get(u_int32_t clusterip)
return NULL;
}
atomic_inc(&c->refcount);
+ if (entry)
+ atomic_inc(&c->entries);
read_unlock_bh(&clusterip_lock);
return c;
}
+static void
+clusterip_config_init_nodelist(struct clusterip_config *c,
+ const struct ipt_clusterip_tgt_info *i)
+{
+ int n;
+
+ for (n = 0; n < i->num_local_nodes; n++) {
+ set_bit(i->local_nodes[n] - 1, &c->local_nodes);
+ }
+}
+
static struct clusterip_config *
clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
struct net_device *dev)
@@ -143,11 +182,11 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
c->clusterip = ip;
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
c->num_total_nodes = i->num_total_nodes;
- c->num_local_nodes = i->num_local_nodes;
- memcpy(&c->local_nodes, &i->local_nodes, sizeof(c->local_nodes));
+ clusterip_config_init_nodelist(c, i);
c->hash_mode = i->hash_mode;
c->hash_initval = i->hash_initval;
atomic_set(&c->refcount, 1);
+ atomic_set(&c->entries, 1);
#ifdef CONFIG_PROC_FS
/* create proc dir entry */
@@ -171,53 +210,28 @@ clusterip_config_init(struct ipt_clusterip_tgt_info *i, u_int32_t ip,
static int
clusterip_add_node(struct clusterip_config *c, u_int16_t nodenum)
{
- int i;
-
- write_lock_bh(&clusterip_lock);
- if (c->num_local_nodes >= CLUSTERIP_MAX_NODES
- || nodenum > CLUSTERIP_MAX_NODES) {
- write_unlock_bh(&clusterip_lock);
+ if (nodenum == 0 ||
+ nodenum > c->num_total_nodes)
return 1;
- }
-
- /* check if we alrady have this number in our array */
- for (i = 0; i < c->num_local_nodes; i++) {
- if (c->local_nodes[i] == nodenum) {
- write_unlock_bh(&clusterip_lock);
- return 1;
- }
- }
- c->local_nodes[c->num_local_nodes++] = nodenum;
+ /* check if we already have this number in our bitfield */
+ if (test_and_set_bit(nodenum - 1, &c->local_nodes))
+ return 1;
- write_unlock_bh(&clusterip_lock);
return 0;
}
static int
clusterip_del_node(struct clusterip_config *c, u_int16_t nodenum)
{
- int i;
-
- write_lock_bh(&clusterip_lock);
-
- if (c->num_local_nodes <= 1 || nodenum > CLUSTERIP_MAX_NODES) {
- write_unlock_bh(&clusterip_lock);
+ if (nodenum == 0 ||
+ nodenum > c->num_total_nodes)
return 1;
- }
- for (i = 0; i < c->num_local_nodes; i++) {
- if (c->local_nodes[i] == nodenum) {
- int size = sizeof(u_int16_t)*(c->num_local_nodes-(i+1));
- memmove(&c->local_nodes[i], &c->local_nodes[i+1], size);
- c->num_local_nodes--;
- write_unlock_bh(&clusterip_lock);
- return 0;
- }
- }
+ if (test_and_clear_bit(nodenum - 1, &c->local_nodes))
+ return 0;
- write_unlock_bh(&clusterip_lock);
return 1;
}
@@ -285,25 +299,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
static inline int
clusterip_responsible(struct clusterip_config *config, u_int32_t hash)
{
- int i;
-
- read_lock_bh(&clusterip_lock);
-
- if (config->num_local_nodes == 0) {
- read_unlock_bh(&clusterip_lock);
- return 0;
- }
-
- for (i = 0; i < config->num_local_nodes; i++) {
- if (config->local_nodes[i] == hash) {
- read_unlock_bh(&clusterip_lock);
- return 1;
- }
- }
-
- read_unlock_bh(&clusterip_lock);
-
- return 0;
+ return test_bit(hash - 1, &config->local_nodes);
}
/***********************************************************************
@@ -415,8 +411,26 @@ checkentry(const char *tablename,
/* FIXME: further sanity checks */
- config = clusterip_config_find_get(e->ip.dst.s_addr);
- if (!config) {
+ config = clusterip_config_find_get(e->ip.dst.s_addr, 1);
+ if (config) {
+ if (cipinfo->config != NULL) {
+ /* Case A: This is an entry that gets reloaded, since
+ * it still has a cipinfo->config pointer. Simply
+ * increase the entry refcount and return */
+ if (cipinfo->config != config) {
+ printk(KERN_ERR "CLUSTERIP: Reloaded entry "
+ "has invalid config pointer!\n");
+ return 0;
+ }
+ clusterip_config_entry_get(cipinfo->config);
+ } else {
+ /* Case B: This is a new rule referring to an existing
+ * clusterip config. */
+ cipinfo->config = config;
+ clusterip_config_entry_get(cipinfo->config);
+ }
+ } else {
+ /* Case C: This is a completely new clusterip config */
if (!(cipinfo->flags & CLUSTERIP_FLAG_NEW)) {
printk(KERN_WARNING "CLUSTERIP: no config found for %u.%u.%u.%u, need 'new'\n", NIPQUAD(e->ip.dst.s_addr));
return 0;
@@ -443,10 +457,9 @@ checkentry(const char *tablename,
}
dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0);
}
+ cipinfo->config = config;
}
- cipinfo->config = config;
-
return 1;
}
@@ -455,13 +468,10 @@ static void destroy(void *matchinfo, unsigned int matchinfosize)
{
struct ipt_clusterip_tgt_info *cipinfo = matchinfo;
- /* we first remove the proc entry and then drop the reference
- * count. In case anyone still accesses the file, the open/close
- * functions are also incrementing the refcount on their own */
-#ifdef CONFIG_PROC_FS
- remove_proc_entry(cipinfo->config->pde->name,
- cipinfo->config->pde->parent);
-#endif
+ /* if no more entries are referencing the config, remove it
+ * from the list and destroy the proc entry */
+ clusterip_config_entry_put(cipinfo->config);
+
clusterip_config_put(cipinfo->config);
}
@@ -533,7 +543,7 @@ arp_mangle(unsigned int hook,
/* if there is no clusterip configuration for the arp reply's
* source ip, we don't want to mangle it */
- c = clusterip_config_find_get(payload->src_ip);
+ c = clusterip_config_find_get(payload->src_ip, 0);
if (!c)
return NF_ACCEPT;
@@ -574,56 +584,69 @@ static struct nf_hook_ops cip_arp_ops = {
#ifdef CONFIG_PROC_FS
+struct clusterip_seq_position {
+ unsigned int pos; /* position */
+ unsigned int weight; /* number of bits set == size */
+ unsigned int bit; /* current bit */
+ unsigned long val; /* current value */
+};
+
static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
{
struct proc_dir_entry *pde = s->private;
struct clusterip_config *c = pde->data;
- unsigned int *nodeidx;
-
- read_lock_bh(&clusterip_lock);
- if (*pos >= c->num_local_nodes)
+ unsigned int weight;
+ u_int32_t local_nodes;
+ struct clusterip_seq_position *idx;
+
+ /* FIXME: possible race */
+ local_nodes = c->local_nodes;
+ weight = hweight32(local_nodes);
+ if (*pos >= weight)
return NULL;
- nodeidx = kmalloc(sizeof(unsigned int), GFP_KERNEL);
- if (!nodeidx)
+ idx = kmalloc(sizeof(struct clusterip_seq_position), GFP_KERNEL);
+ if (!idx)
return ERR_PTR(-ENOMEM);
- *nodeidx = *pos;
- return nodeidx;
+ idx->pos = *pos;
+ idx->weight = weight;
+ idx->bit = ffs(local_nodes);
+ idx->val = local_nodes;
+ clear_bit(idx->bit - 1, &idx->val);
+
+ return idx;
}
static void *clusterip_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct proc_dir_entry *pde = s->private;
- struct clusterip_config *c = pde->data;
- unsigned int *nodeidx = (unsigned int *)v;
+ struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
- *pos = ++(*nodeidx);
- if (*pos >= c->num_local_nodes) {
+ *pos = ++idx->pos;
+ if (*pos >= idx->weight) {
kfree(v);
return NULL;
}
- return nodeidx;
+ idx->bit = ffs(idx->val);
+ clear_bit(idx->bit - 1, &idx->val);
+ return idx;
}
static void clusterip_seq_stop(struct seq_file *s, void *v)
{
kfree(v);
-
- read_unlock_bh(&clusterip_lock);
}
static int clusterip_seq_show(struct seq_file *s, void *v)
{
- struct proc_dir_entry *pde = s->private;
- struct clusterip_config *c = pde->data;
- unsigned int *nodeidx = (unsigned int *)v;
+ struct clusterip_seq_position *idx = (struct clusterip_seq_position *)v;
- if (*nodeidx != 0)
+ if (idx->pos != 0)
seq_putc(s, ',');
- seq_printf(s, "%u", c->local_nodes[*nodeidx]);
- if (*nodeidx == c->num_local_nodes-1)
+ seq_printf(s, "%u", idx->bit);
+
+ if (idx->pos == idx->weight - 1)
seq_putc(s, '\n');
return 0;
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 715cb613405..5245bfd33d5 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -93,7 +93,7 @@ redirect_target(struct sk_buff **pskb,
newdst = 0;
rcu_read_lock();
- indev = __in_dev_get((*pskb)->dev);
+ indev = __in_dev_get_rcu((*pskb)->dev);
if (indev && (ifa = indev->ifa_list))
newdst = ifa->ifa_local;
rcu_read_unlock();
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index e2c14f3cb2f..2883ccd8a91 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -225,8 +225,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
/* copy hook, prefix, timestamp, payload, etc. */
pm->data_len = copy_len;
- pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec;
- pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec;
+ pm->timestamp_sec = skb->tstamp.off_sec;
+ pm->timestamp_usec = skb->tstamp.off_usec;
pm->mark = skb->nfmark;
pm->hook = hooknum;
if (prefix != NULL)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 304bb0a1d4f..4b0d7e4d626 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -361,7 +361,7 @@ static void raw_probe_proto_opt(struct flowi *fl, struct msghdr *msg)
if (type && code) {
get_user(fl->fl_icmp_type, type);
- __get_user(fl->fl_icmp_code, code);
+ get_user(fl->fl_icmp_code, code);
probed = 1;
}
break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8549f26e249..381dd6a6aeb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2128,7 +2128,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
struct in_device *in_dev;
rcu_read_lock();
- if ((in_dev = __in_dev_get(dev)) != NULL) {
+ if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
int our = ip_check_mc(in_dev, daddr, saddr,
skb->nh.iph->protocol);
if (our
@@ -2443,7 +2443,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
err = -ENODEV;
if (dev_out == NULL)
goto out;
- if (__in_dev_get(dev_out) == NULL) {
+
+ /* RACE: Check return value of inet_select_addr instead. */
+ if (__in_dev_get_rtnl(dev_out) == NULL) {
dev_put(dev_out);
goto out; /* Wrong error code */
}
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index b940346de4e..6d80e063c18 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
/* slow start */
ca->cnt = (cwnd * (BICTCP_B-1))
- / cwnd-ca->last_max_cwnd;
+ / (cwnd - ca->last_max_cwnd);
else
/* linear increase */
ca->cnt = cwnd / max_increment;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 29222b96495..677419d0c9a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -355,8 +355,6 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
app_win -= icsk->icsk_ack.rcv_mss;
app_win = max(app_win, 2U*tp->advmss);
- if (!ofo_win)
- tp->window_clamp = min(tp->window_clamp, app_win);
tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
}
}
@@ -979,14 +977,19 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
+ in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
+ !before(end_seq, TCP_SKB_CB(skb)->end_seq);
+
pcount = tcp_skb_pcount(skb);
- if (pcount > 1 &&
- (after(start_seq, TCP_SKB_CB(skb)->seq) ||
- before(end_seq, TCP_SKB_CB(skb)->end_seq))) {
+ if (pcount > 1 && !in_sack &&
+ after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
unsigned int pkt_len;
- if (after(start_seq, TCP_SKB_CB(skb)->seq))
+ in_sack = !after(start_seq,
+ TCP_SKB_CB(skb)->seq);
+
+ if (!in_sack)
pkt_len = (start_seq -
TCP_SKB_CB(skb)->seq);
else
@@ -999,9 +1002,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
fack_count += pcount;
- in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
- !before(end_seq, TCP_SKB_CB(skb)->end_seq);
-
sacked = TCP_SKB_CB(skb)->sacked;
/* Account D-SACK for retransmitted packet. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13dfb391cdf..c85819d8474 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -130,19 +130,20 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
int dif = sk->sk_bound_dev_if;
INET_ADDR_COOKIE(acookie, saddr, daddr)
const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
- const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
- struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
+ unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
+ struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
struct sock *sk2;
const struct hlist_node *node;
struct inet_timewait_sock *tw;
+ prefetch(head->chain.first);
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
tw = inet_twsk(sk2);
- if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
+ if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
struct tcp_sock *tp = tcp_sk(sk);
@@ -179,7 +180,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
/* And established part... */
sk_for_each(sk2, node, &head->chain) {
- if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif))
+ if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
goto not_unique;
}
@@ -188,7 +189,7 @@ unique:
* in hash table socket with a funny identity. */
inet->num = lport;
inet->sport = htons(lport);
- sk->sk_hashent = hash;
+ sk->sk_hash = hash;
BUG_TRAP(sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sock_prot_inc_use(sk->sk_prot);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index a88db28b0af..b1a63b2c6b4 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -384,7 +384,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
- newicsk->icsk_ca_ops = &tcp_reno;
+ newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index c10e4435e3b..7114031fdc7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -190,7 +190,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
}
/* Set initial window to value enough for senders,
- * following RFC1414. Senders, not following this RFC,
+ * following RFC2414. Senders, not following this RFC,
* will be satisfied with 2.
*/
if (mss > (1<<*rcv_wscale)) {
@@ -435,6 +435,17 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
int nsize, old_factor;
u16 flags;
+ if (unlikely(len >= skb->len)) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
+ "end_seq=%u, skb->len=%u.\n", len, mss_now,
+ TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
+ skb->len);
+ WARN_ON(1);
+ }
+ return 0;
+ }
+
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
@@ -459,9 +470,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
flags = TCP_SKB_CB(skb)->flags;
TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
TCP_SKB_CB(buff)->flags = flags;
- TCP_SKB_CB(buff)->sacked =
- (TCP_SKB_CB(skb)->sacked &
- (TCPCB_LOST | TCPCB_EVER_RETRANS | TCPCB_AT_TAIL));
+ TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
@@ -499,11 +508,26 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
tcp_skb_pcount(buff);
tp->packets_out -= diff;
+
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+ tp->sacked_out -= diff;
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
+ tp->retrans_out -= diff;
+
if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
tp->lost_out -= diff;
tp->left_out -= diff;
}
+
if (diff > 0) {
+ /* Adjust Reno SACK estimate. */
+ if (!tp->rx_opt.sack_ok) {
+ tp->sacked_out -= diff;
+ if ((int)tp->sacked_out < 0)
+ tp->sacked_out = 0;
+ tcp_sync_left_out(tp);
+ }
+
tp->fackets_out -= diff;
if ((int)tp->fackets_out < 0)
tp->fackets_out = 0;
@@ -1595,7 +1619,7 @@ void tcp_send_fin(struct sock *sk)
* was unread data in the receive queue. This behavior is recommended
* by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM
*/
-void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
+void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;