summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/flow.c42
-rw-r--r--net/core/netpoll.c14
-rw-r--r--net/core/pktgen.c30
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/scm.c3
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c4
-rw-r--r--net/core/utils.c116
-rw-r--r--net/core/wireless.c33
10 files changed, 97 insertions, 159 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4d891beab13..81c426adcd1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3502,8 +3502,6 @@ static int __init net_dev_init(void)
BUG_ON(!dev_boot_phase);
- net_random_init();
-
if (dev_proc_init())
goto out;
diff --git a/net/core/flow.c b/net/core/flow.c
index f23e7e38654..b16d31ae5e5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -85,6 +85,14 @@ static void flow_cache_new_hashrnd(unsigned long arg)
add_timer(&flow_hash_rnd_timer);
}
+static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+{
+ if (fle->object)
+ atomic_dec(fle->object_ref);
+ kmem_cache_free(flow_cachep, fle);
+ flow_count(cpu)--;
+}
+
static void __flow_cache_shrink(int cpu, int shrink_to)
{
struct flow_cache_entry *fle, **flp;
@@ -100,10 +108,7 @@ static void __flow_cache_shrink(int cpu, int shrink_to)
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
- if (fle->object)
- atomic_dec(fle->object_ref);
- kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
+ flow_entry_kill(cpu, fle);
}
}
}
@@ -220,24 +225,33 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
nocache:
{
+ int err;
void *obj;
atomic_t *obj_ref;
- resolver(key, family, dir, &obj, &obj_ref);
+ err = resolver(key, family, dir, &obj, &obj_ref);
if (fle) {
- fle->genid = atomic_read(&flow_cache_genid);
-
- if (fle->object)
- atomic_dec(fle->object_ref);
-
- fle->object = obj;
- fle->object_ref = obj_ref;
- if (obj)
- atomic_inc(fle->object_ref);
+ if (err) {
+ /* Force security policy check on next lookup */
+ *head = fle->next;
+ flow_entry_kill(cpu, fle);
+ } else {
+ fle->genid = atomic_read(&flow_cache_genid);
+
+ if (fle->object)
+ atomic_dec(fle->object_ref);
+
+ fle->object = obj;
+ fle->object_ref = obj_ref;
+ if (obj)
+ atomic_inc(fle->object_ref);
+ }
}
local_bh_enable();
+ if (err)
+ obj = ERR_PTR(err);
return obj;
}
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index ead5920c26d..6589adb14cb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -335,13 +335,19 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
memcpy(skb->data, msg, len);
skb->len += len;
- udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
+ skb->h.uh = udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
udph->source = htons(np->local_port);
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
udph->check = 0;
+ udph->check = csum_tcpudp_magic(htonl(np->local_ip),
+ htonl(np->remote_ip),
+ udp_len, IPPROTO_UDP,
+ csum_partial((unsigned char *)udph, udp_len, 0));
+ if (udph->check == 0)
+ udph->check = -1;
- iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
+ skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
/* iph->version = 4; iph->ihl = 5; */
put_unaligned(0x45, (unsigned char *)iph);
@@ -357,8 +363,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
-
- eth->h_proto = htons(ETH_P_IP);
+ skb->mac.raw = skb->data;
+ skb->protocol = eth->h_proto = htons(ETH_P_IP);
memcpy(eth->h_source, np->local_mac, 6);
memcpy(eth->h_dest, np->remote_mac, 6);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index dd023fd2830..733d86d0a4f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2304,6 +2304,12 @@ static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
*mpls |= MPLS_STACK_BOTTOM;
}
+static inline __be16 build_tci(unsigned int id, unsigned int cfi,
+ unsigned int prio)
+{
+ return htons(id | (cfi << 12) | (prio << 13));
+}
+
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@@ -2353,16 +2359,16 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
if (pkt_dev->vlan_id != 0xffff) {
if(pkt_dev->svlan_id != 0xffff) {
svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
- *svlan_tci = htons(pkt_dev->svlan_id);
- *svlan_tci |= pkt_dev->svlan_p << 5;
- *svlan_tci |= pkt_dev->svlan_cfi << 4;
+ *svlan_tci = build_tci(pkt_dev->svlan_id,
+ pkt_dev->svlan_cfi,
+ pkt_dev->svlan_p);
svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
*svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
}
vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
- *vlan_tci = htons(pkt_dev->vlan_id);
- *vlan_tci |= pkt_dev->vlan_p << 5;
- *vlan_tci |= pkt_dev->vlan_cfi << 4;
+ *vlan_tci = build_tci(pkt_dev->vlan_id,
+ pkt_dev->vlan_cfi,
+ pkt_dev->vlan_p);
vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
*vlan_encapsulated_proto = __constant_htons(ETH_P_IP);
}
@@ -2689,16 +2695,16 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
if (pkt_dev->vlan_id != 0xffff) {
if(pkt_dev->svlan_id != 0xffff) {
svlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
- *svlan_tci = htons(pkt_dev->svlan_id);
- *svlan_tci |= pkt_dev->svlan_p << 5;
- *svlan_tci |= pkt_dev->svlan_cfi << 4;
+ *svlan_tci = build_tci(pkt_dev->svlan_id,
+ pkt_dev->svlan_cfi,
+ pkt_dev->svlan_p);
svlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
*svlan_encapsulated_proto = __constant_htons(ETH_P_8021Q);
}
vlan_tci = (__be16 *)skb_put(skb, sizeof(__be16));
- *vlan_tci = htons(pkt_dev->vlan_id);
- *vlan_tci |= pkt_dev->vlan_p << 5;
- *vlan_tci |= pkt_dev->vlan_cfi << 4;
+ *vlan_tci = build_tci(pkt_dev->vlan_id,
+ pkt_dev->vlan_cfi,
+ pkt_dev->vlan_p);
vlan_encapsulated_proto = (__be16 *)skb_put(skb, sizeof(__be16));
*vlan_encapsulated_proto = __constant_htons(ETH_P_IPV6);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 221e4038216..02f3c794789 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -602,7 +602,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
goto errout;
}
- err = rtnl_unicast(skb, NETLINK_CB(skb).pid);
+ err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
errout:
kfree(iw_buf);
dev_put(dev);
diff --git a/net/core/scm.c b/net/core/scm.c
index 649d01ef35b..271cf060ef8 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -245,8 +245,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
if (i > 0)
{
int cmlen = CMSG_LEN(i*sizeof(int));
- if (!err)
- err = put_user(SOL_SOCKET, &cm->cmsg_level);
+ err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3c23760c582..b8b10635804 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -639,6 +639,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
n->csum = skb->csum;
n->ip_summed = skb->ip_summed;
+ n->truesize += skb->data_len;
n->data_len = skb->data_len;
n->len = skb->len;
@@ -1946,7 +1947,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
do {
struct sk_buff *nskb;
skb_frag_t *frag;
- int hsize, nsize;
+ int hsize;
int k;
int size;
@@ -1957,11 +1958,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
hsize = skb_headlen(skb) - offset;
if (hsize < 0)
hsize = 0;
- nsize = hsize + doffset;
- if (nsize > len + doffset || !sg)
- nsize = len + doffset;
+ if (hsize > len || !sg)
+ hsize = len;
- nskb = alloc_skb(nsize + headroom, GFP_ATOMIC);
+ nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
if (unlikely(!nskb))
goto err;
diff --git a/net/core/sock.c b/net/core/sock.c
index b77e155cbe6..ee6cd2541d3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -823,7 +823,7 @@ static void inline sock_lock_init(struct sock *sk)
af_family_slock_key_strings[sk->sk_family]);
lockdep_init_map(&sk->sk_lock.dep_map,
af_family_key_strings[sk->sk_family],
- af_family_keys + sk->sk_family);
+ af_family_keys + sk->sk_family, 0);
}
/**
@@ -1160,7 +1160,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
goto failure;
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
- skb = alloc_skb(header_len, sk->sk_allocation);
+ skb = alloc_skb(header_len, gfp_mask);
if (skb) {
int npages;
int i;
diff --git a/net/core/utils.c b/net/core/utils.c
index 94c5d761c83..d93fe64f669 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -30,119 +30,6 @@
#include <asm/system.h>
#include <asm/uaccess.h>
-/*
- This is a maximally equidistributed combined Tausworthe generator
- based on code from GNU Scientific Library 1.5 (30 Jun 2004)
-
- x_n = (s1_n ^ s2_n ^ s3_n)
-
- s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
- s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
- s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
-
- The period of this generator is about 2^88.
-
- From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
- Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
-
- This is available on the net from L'Ecuyer's home page,
-
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
- ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
-
- There is an erratum in the paper "Tables of Maximally
- Equidistributed Combined LFSR Generators", Mathematics of
- Computation, 68, 225 (1999), 261--269:
- http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
-
- ... the k_j most significant bits of z_j must be non-
- zero, for each j. (Note: this restriction also applies to the
- computer code given in [4], but was mistakenly not mentioned in
- that paper.)
-
- This affects the seeding procedure by imposing the requirement
- s1 > 1, s2 > 7, s3 > 15.
-
-*/
-struct nrnd_state {
- u32 s1, s2, s3;
-};
-
-static DEFINE_PER_CPU(struct nrnd_state, net_rand_state);
-
-static u32 __net_random(struct nrnd_state *state)
-{
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
- state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
- state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
- state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
-
- return (state->s1 ^ state->s2 ^ state->s3);
-}
-
-static void __net_srandom(struct nrnd_state *state, unsigned long s)
-{
- if (s == 0)
- s = 1; /* default seed is 1 */
-
-#define LCG(n) (69069 * n)
- state->s1 = LCG(s);
- state->s2 = LCG(state->s1);
- state->s3 = LCG(state->s2);
-
- /* "warm it up" */
- __net_random(state);
- __net_random(state);
- __net_random(state);
- __net_random(state);
- __net_random(state);
- __net_random(state);
-}
-
-
-unsigned long net_random(void)
-{
- unsigned long r;
- struct nrnd_state *state = &get_cpu_var(net_rand_state);
- r = __net_random(state);
- put_cpu_var(state);
- return r;
-}
-
-
-void net_srandom(unsigned long entropy)
-{
- struct nrnd_state *state = &get_cpu_var(net_rand_state);
- __net_srandom(state, state->s1^entropy);
- put_cpu_var(state);
-}
-
-void __init net_random_init(void)
-{
- int i;
-
- for_each_possible_cpu(i) {
- struct nrnd_state *state = &per_cpu(net_rand_state,i);
- __net_srandom(state, i+jiffies);
- }
-}
-
-static int net_random_reseed(void)
-{
- int i;
- unsigned long seed;
-
- for_each_possible_cpu(i) {
- struct nrnd_state *state = &per_cpu(net_rand_state,i);
-
- get_random_bytes(&seed, sizeof(seed));
- __net_srandom(state, seed);
- }
- return 0;
-}
-late_initcall(net_random_reseed);
-
int net_msg_cost = 5*HZ;
int net_msg_burst = 10;
@@ -153,10 +40,7 @@ int net_ratelimit(void)
{
return __printk_ratelimit(net_msg_cost, net_msg_burst);
}
-
-EXPORT_SYMBOL(net_random);
EXPORT_SYMBOL(net_ratelimit);
-EXPORT_SYMBOL(net_srandom);
/*
* Convert an ASCII string to binary IP.
diff --git a/net/core/wireless.c b/net/core/wireless.c
index ffff0da46c6..cb1b8728d7e 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -748,11 +748,39 @@ static int ioctl_standard_call(struct net_device * dev,
int extra_size;
int user_length = 0;
int err;
+ int essid_compat = 0;
/* Calculate space needed by arguments. Always allocate
* for max space. Easier, and won't last long... */
extra_size = descr->max_tokens * descr->token_size;
+ /* Check need for ESSID compatibility for WE < 21 */
+ switch (cmd) {
+ case SIOCSIWESSID:
+ case SIOCGIWESSID:
+ case SIOCSIWNICKN:
+ case SIOCGIWNICKN:
+ if (iwr->u.data.length == descr->max_tokens + 1)
+ essid_compat = 1;
+ else if (IW_IS_SET(cmd) && (iwr->u.data.length != 0)) {
+ char essid[IW_ESSID_MAX_SIZE + 1];
+
+ err = copy_from_user(essid, iwr->u.data.pointer,
+ iwr->u.data.length *
+ descr->token_size);
+ if (err)
+ return -EFAULT;
+
+ if (essid[iwr->u.data.length - 1] == '\0')
+ essid_compat = 1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ iwr->u.data.length -= essid_compat;
+
/* Check what user space is giving us */
if(IW_IS_SET(cmd)) {
/* Check NULL pointer */
@@ -795,7 +823,8 @@ static int ioctl_standard_call(struct net_device * dev,
#endif /* WE_IOCTL_DEBUG */
/* Create the kernel buffer */
- extra = kmalloc(extra_size, GFP_KERNEL);
+ /* kzalloc ensures NULL-termination for essid_compat */
+ extra = kzalloc(extra_size, GFP_KERNEL);
if (extra == NULL) {
return -ENOMEM;
}
@@ -819,6 +848,8 @@ static int ioctl_standard_call(struct net_device * dev,
/* Call the handler */
ret = handler(dev, &info, &(iwr->u), extra);
+ iwr->u.data.length += essid_compat;
+
/* If we have something to return to the user */
if (!ret && IW_IS_GET(cmd)) {
/* Check if there is enough buffer up there */