summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/conv.c1
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/bluetooth/l2cap.c48
-rw-r--r--net/bluetooth/rfcomm/tty.c5
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_if.c77
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/core/dev.c132
-rw-r--r--net/core/ethtool.c64
-rw-r--r--net/core/pktgen.c106
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/utils.c1
-rw-r--r--net/dccp/ccid.c1
-rw-r--r--net/dccp/feat.c14
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c11
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/ip_options.c7
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c1
-rw-r--r--net/ipv4/netfilter/ipt_recent.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c2
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_cubic.c48
-rw-r--r--net/ipv4/tcp_htcp.c14
-rw-r--r--net/ipv4/tcp_illinois.c8
-rw-r--r--net/ipv4/tcp_input.c74
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_lp.c6
-rw-r--r--net/ipv4/tcp_vegas.c6
-rw-r--r--net/ipv4/tcp_vegas.h2
-rw-r--r--net/ipv4/tcp_veno.c6
-rw-r--r--net/ipv4/tcp_westwood.c7
-rw-r--r--net/ipv4/tcp_yeah.c4
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/addrconf_core.c3
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_tunnel.c17
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c4
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/iucv/iucv.c15
-rw-r--r--net/key/af_key.c7
-rw-r--r--net/mac80211/debugfs_netdev.c8
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/ieee80211.c1
-rw-r--r--net/mac80211/ieee80211_ioctl.c19
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_expect.c13
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c17
-rw-r--r--net/netfilter/xt_connlimit.c6
-rw-r--r--net/netfilter/xt_u32.c11
-rw-r--r--net/netlabel/netlabel_domainhash.c10
-rw-r--r--net/netlabel/netlabel_user.c4
-rw-r--r--net/rxrpc/ar-connection.c2
-rw-r--r--net/sched/act_police.c1
-rw-r--r--net/sched/sch_api.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_prio.c13
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/sm_make_chunk.c6
-rw-r--r--net/sctp/sm_statefuns.c103
-rw-r--r--net/sctp/socket.c45
-rw-r--r--net/sctp/tsnmap.c14
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c1
-rw-r--r--net/sunrpc/cache.c3
-rw-r--r--net/sunrpc/rpc_pipe.c3
-rw-r--r--net/sunrpc/sched.c57
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/tipc/link.c30
-rw-r--r--net/tipc/msg.h6
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/port.c1
-rw-r--r--net/unix/af_unix.c30
-rw-r--r--net/wanrouter/wanmain.c1
-rw-r--r--net/xfrm/xfrm_policy.c11
-rw-r--r--net/xfrm/xfrm_state.c7
-rw-r--r--net/xfrm/xfrm_user.c7
94 files changed, 711 insertions, 549 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4d2aa4dd42a..4bab322c9f8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -668,9 +668,6 @@ int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (real_dev->do_ioctl && netif_device_present(real_dev))
err = real_dev->do_ioctl(real_dev, &ifrr, cmd);
break;
-
- case SIOCETHTOOL:
- err = dev_ethtool(&ifrr);
}
if (!err)
diff --git a/net/9p/conv.c b/net/9p/conv.c
index 37451178e76..f2a041cb508 100644
--- a/net/9p/conv.c
+++ b/net/9p/conv.c
@@ -763,6 +763,7 @@ struct p9_fcall *p9_create_twrite(u32 fid, u64 offset, u32 count,
if (err) {
kfree(fc);
fc = ERR_PTR(err);
+ goto error;
}
if (buf_check_overflow(bufp)) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 2770fb451ae..59d5aa3366f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -21,7 +21,6 @@
#include <net/dst.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
/* TokenRing if needed */
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 670ff95ca64..c4e4ce4ebb2 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -353,7 +353,7 @@ static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16
}
/* ---- Socket interface ---- */
-static struct sock *__l2cap_get_sock_by_addr(u16 psm, bdaddr_t *src)
+static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
{
struct sock *sk;
struct hlist_node *node;
@@ -368,7 +368,7 @@ found:
/* Find socket with psm and source bdaddr.
* Returns closest match.
*/
-static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
+static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
@@ -392,7 +392,7 @@ static struct sock *__l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
/* Find socket with given address (psm, src).
* Returns locked socket */
-static inline struct sock *l2cap_get_sock_by_psm(int state, u16 psm, bdaddr_t *src)
+static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
{
struct sock *s;
read_lock(&l2cap_sk_list.lock);
@@ -586,7 +586,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
goto done;
}
- if (la->l2_psm > 0 && btohs(la->l2_psm) < 0x1001 &&
+ if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
!capable(CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto done;
@@ -748,7 +748,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
write_lock_bh(&l2cap_sk_list.lock);
for (psm = 0x1001; psm < 0x1100; psm += 2)
- if (!__l2cap_get_sock_by_addr(psm, src)) {
+ if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
l2cap_pi(sk)->psm = htobs(psm);
l2cap_pi(sk)->sport = htobs(psm);
err = 0;
@@ -873,7 +873,7 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
if (sk->sk_type == SOCK_DGRAM)
- put_unaligned(l2cap_pi(sk)->psm, (u16 *) skb_put(skb, 2));
+ put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
err = -EFAULT;
@@ -1256,11 +1256,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
break;
case 2:
- *val = __le16_to_cpu(*((u16 *)opt->val));
+ *val = __le16_to_cpu(*((__le16 *)opt->val));
break;
case 4:
- *val = __le32_to_cpu(*((u32 *)opt->val));
+ *val = __le32_to_cpu(*((__le32 *)opt->val));
break;
default:
@@ -1287,11 +1287,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
break;
case 2:
- *((u16 *) opt->val) = cpu_to_le16(val);
+ *((__le16 *) opt->val) = cpu_to_le16(val);
break;
case 4:
- *((u32 *) opt->val) = cpu_to_le32(val);
+ *((__le32 *) opt->val) = cpu_to_le32(val);
break;
default:
@@ -1406,7 +1406,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
int result = 0, status = 0;
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
- u16 psm = req->psm;
+ __le16 psm = req->psm;
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
@@ -1530,7 +1530,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
return 0;
}
-static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
+static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
{
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
u16 dcid, flags;
@@ -1550,7 +1550,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock;
/* Reject if config buffer is too small. */
- len = cmd->len - sizeof(*req);
+ len = cmd_len - sizeof(*req);
if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(sk, rsp,
@@ -1748,15 +1748,16 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
l2cap_raw_recv(conn, skb);
while (len >= L2CAP_CMD_HDR_SIZE) {
+ u16 cmd_len;
memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
data += L2CAP_CMD_HDR_SIZE;
len -= L2CAP_CMD_HDR_SIZE;
- cmd.len = __le16_to_cpu(cmd.len);
+ cmd_len = le16_to_cpu(cmd.len);
- BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
+ BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
- if (cmd.len > len || !cmd.ident) {
+ if (cmd_len > len || !cmd.ident) {
BT_DBG("corrupted command");
break;
}
@@ -1775,7 +1776,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
break;
case L2CAP_CONF_REQ:
- err = l2cap_config_req(conn, &cmd, data);
+ err = l2cap_config_req(conn, &cmd, cmd_len, data);
break;
case L2CAP_CONF_RSP:
@@ -1791,7 +1792,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
break;
case L2CAP_ECHO_REQ:
- l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd.len, data);
+ l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
break;
case L2CAP_ECHO_RSP:
@@ -1820,8 +1821,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}
- data += cmd.len;
- len -= cmd.len;
+ data += cmd_len;
+ len -= cmd_len;
}
kfree_skb(skb);
@@ -1863,7 +1864,7 @@ done:
return 0;
}
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, u16 psm, struct sk_buff *skb)
+static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
{
struct sock *sk;
@@ -1893,7 +1894,8 @@ done:
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct l2cap_hdr *lh = (void *) skb->data;
- u16 cid, psm, len;
+ u16 cid, len;
+ __le16 psm;
skb_pull(skb, L2CAP_HDR_SIZE);
cid = __le16_to_cpu(lh->cid);
@@ -1907,7 +1909,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case 0x0002:
- psm = get_unaligned((u16 *) skb->data);
+ psm = get_unaligned((__le16 *) skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 23ba61a13bd..22a832098d4 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -267,7 +267,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
out:
write_unlock_bh(&rfcomm_dev_lock);
- if (err) {
+ if (err < 0) {
kfree(dev);
return err;
}
@@ -275,9 +275,10 @@ out:
dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
if (IS_ERR(dev->tty_dev)) {
+ err = PTR_ERR(dev->tty_dev);
list_del(&dev->list);
kfree(dev);
- return PTR_ERR(dev->tty_dev);
+ return err;
}
return dev->id;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 5e1892d8d87..0eded176ce9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -179,5 +179,5 @@ void br_dev_setup(struct net_device *dev)
dev->priv_flags = IFF_EBRIDGE;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
+ NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 7b4ce9113be..749f0e8f541 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -29,35 +29,24 @@
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
*
- * Need to simulate user ioctl because not all device's that support
- * ethtool, use ethtool_ops. Also, since driver might sleep need to
- * not be holding any locks.
+ * Since driver might sleep need to not be holding any locks.
*/
static int port_cost(struct net_device *dev)
{
- struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- struct ifreq ifr;
- mm_segment_t old_fs;
- int err;
-
- strncpy(ifr.ifr_name, dev->name, IFNAMSIZ);
- ifr.ifr_data = (void __user *) &ecmd;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = dev_ethtool(&ifr);
- set_fs(old_fs);
-
- if (!err) {
- switch(ecmd.speed) {
- case SPEED_100:
- return 19;
- case SPEED_1000:
- return 4;
- case SPEED_10000:
- return 2;
- case SPEED_10:
- return 100;
+ if (dev->ethtool_ops->get_settings) {
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ int err = dev->ethtool_ops->get_settings(dev, &ecmd);
+ if (!err) {
+ switch(ecmd.speed) {
+ case SPEED_100:
+ return 19;
+ case SPEED_1000:
+ return 4;
+ case SPEED_10000:
+ return 2;
+ case SPEED_10:
+ return 100;
+ }
}
}
@@ -360,43 +349,15 @@ int br_min_mtu(const struct net_bridge *br)
void br_features_recompute(struct net_bridge *br)
{
struct net_bridge_port *p;
- unsigned long features, checksum;
+ unsigned long features;
- checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
- features = br->feature_mask & ~NETIF_F_ALL_CSUM;
+ features = br->feature_mask;
list_for_each_entry(p, &br->port_list, list) {
- unsigned long feature = p->dev->features;
-
- /* if device needs checksumming, downgrade to hw checksumming */
- if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
- checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
-
- /* if device can't do all checksum, downgrade to ipv4/ipv6 */
- if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
- checksum ^= NETIF_F_HW_CSUM
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
- if (checksum & NETIF_F_IPV6_CSUM && !(feature & NETIF_F_IPV6_CSUM))
- checksum &= ~NETIF_F_IPV6_CSUM;
-
- if (!(feature & NETIF_F_IP_CSUM))
- checksum = 0;
-
- if (feature & NETIF_F_GSO)
- feature |= NETIF_F_GSO_SOFTWARE;
- feature |= NETIF_F_GSO;
-
- features &= feature;
+ features = netdev_compute_features(features, p->dev->features);
}
- if (!(checksum & NETIF_F_ALL_CSUM))
- features &= ~NETIF_F_SG;
- if (!(features & NETIF_F_SG))
- features &= ~NETIF_F_GSO_MASK;
-
- br->dev->features = features | checksum | NETIF_F_LLTX |
- NETIF_F_GSO_ROBUST;
+ br->dev->features = features;
}
/* called with RTNL */
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 420bbb9955e..5c18595b761 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -112,9 +112,9 @@ static int br_handle_local_finish(struct sk_buff *skb)
*/
static inline int is_link_local(const unsigned char *dest)
{
- const u16 *a = (const u16 *) dest;
- static const u16 *const b = (const u16 *const ) br_group_address;
- static const u16 m = __constant_cpu_to_be16(0xfff0);
+ __be16 *a = (__be16 *)dest;
+ static const __be16 *b = (const __be16 *)br_group_address;
+ static const __be16 m = __constant_cpu_to_be16(0xfff0);
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index ee4035571c2..a76021c7120 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -817,7 +817,9 @@ int dev_alloc_name(struct net_device *dev, const char *name)
*/
int dev_change_name(struct net_device *dev, char *newname)
{
+ char oldname[IFNAMSIZ];
int err = 0;
+ int ret;
ASSERT_RTNL();
@@ -827,6 +829,8 @@ int dev_change_name(struct net_device *dev, char *newname)
if (!dev_valid_name(newname))
return -EINVAL;
+ memcpy(oldname, dev->name, IFNAMSIZ);
+
if (strchr(newname, '%')) {
err = dev_alloc_name(dev, newname);
if (err < 0)
@@ -838,10 +842,28 @@ int dev_change_name(struct net_device *dev, char *newname)
else
strlcpy(dev->name, newname, IFNAMSIZ);
+rollback:
device_rename(&dev->dev, dev->name);
+
+ write_lock_bh(&dev_base_lock);
hlist_del(&dev->name_hlist);
hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
- raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+ write_unlock_bh(&dev_base_lock);
+
+ ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
+ ret = notifier_to_errno(ret);
+
+ if (ret) {
+ if (err) {
+ printk(KERN_ERR
+ "%s: name change rollback failed: %d.\n",
+ dev->name, ret);
+ } else {
+ err = ret;
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ goto rollback;
+ }
+ }
return err;
}
@@ -1054,20 +1076,43 @@ int dev_close(struct net_device *dev)
int register_netdevice_notifier(struct notifier_block *nb)
{
struct net_device *dev;
+ struct net_device *last;
int err;
rtnl_lock();
err = raw_notifier_chain_register(&netdev_chain, nb);
- if (!err) {
- for_each_netdev(dev) {
- nb->notifier_call(nb, NETDEV_REGISTER, dev);
+ if (err)
+ goto unlock;
- if (dev->flags & IFF_UP)
- nb->notifier_call(nb, NETDEV_UP, dev);
- }
+ for_each_netdev(dev) {
+ err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
+ err = notifier_to_errno(err);
+ if (err)
+ goto rollback;
+
+ if (!(dev->flags & IFF_UP))
+ continue;
+
+ nb->notifier_call(nb, NETDEV_UP, dev);
}
+
+unlock:
rtnl_unlock();
return err;
+
+rollback:
+ last = dev;
+ for_each_netdev(dev) {
+ if (dev == last)
+ break;
+
+ if (dev->flags & IFF_UP) {
+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+ nb->notifier_call(nb, NETDEV_DOWN, dev);
+ }
+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ }
+ goto unlock;
}
/**
@@ -2718,9 +2763,11 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
/**
* dev_unicast_delete - Release secondary unicast address.
* @dev: device
+ * @addr: address to delete
+ * @alen: length of @addr
*
* Release reference to a secondary unicast address and remove it
- * from the device if the reference count drop to zero.
+ * from the device if the reference count drops to zero.
*
* The caller must hold the rtnl_mutex.
*/
@@ -2742,6 +2789,8 @@ EXPORT_SYMBOL(dev_unicast_delete);
/**
* dev_unicast_add - add a secondary unicast address
* @dev: device
+ * @addr: address to delete
+ * @alen: length of @addr
*
* Add a secondary unicast address to the device or increase
* the reference count if it already exists.
@@ -3333,7 +3382,7 @@ int register_netdevice(struct net_device *dev)
if (!dev_valid_name(dev->name)) {
ret = -EINVAL;
- goto out;
+ goto err_uninit;
}
dev->ifindex = dev_new_index();
@@ -3347,7 +3396,7 @@ int register_netdevice(struct net_device *dev)
= hlist_entry(p, struct net_device, name_hlist);
if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
ret = -EEXIST;
- goto out;
+ goto err_uninit;
}
}
@@ -3407,7 +3456,7 @@ int register_netdevice(struct net_device *dev)
ret = netdev_register_sysfs(dev);
if (ret)
- goto out;
+ goto err_uninit;
dev->reg_state = NETREG_REGISTERED;
/*
@@ -3426,12 +3475,18 @@ int register_netdevice(struct net_device *dev)
write_unlock_bh(&dev_base_lock);
/* Notify protocols, that a new device appeared. */
- raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
-
- ret = 0;
+ ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ unregister_netdevice(dev);
out:
return ret;
+
+err_uninit:
+ if (dev->uninit)
+ dev->uninit(dev);
+ goto out;
}
/**
@@ -3830,9 +3885,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
#ifdef CONFIG_NET_DMA
/**
- * net_dma_rebalance -
- * This is called when the number of channels allocated to the net_dma_client
- * changes. The net_dma_client tries to have one DMA channel per CPU.
+ * net_dma_rebalance - try to maintain one DMA channel per CPU
+ * @net_dma: DMA client and associated data (lock, channels, channel_mask)
+ *
+ * This is called when the number of channels allocated to the net_dma client
+ * changes. The net_dma client tries to have one DMA channel per CPU.
*/
static void net_dma_rebalance(struct net_dma *net_dma)
@@ -3869,7 +3926,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
* netdev_dma_event - event callback for the net_dma_client
* @client: should always be net_dma_client
* @chan: DMA channel for the event
- * @event: event type
+ * @state: DMA state to be handled
*/
static enum dma_state_client
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
@@ -3936,6 +3993,45 @@ static int __init netdev_dma_register(void)
static int __init netdev_dma_register(void) { return -ENODEV; }
#endif /* CONFIG_NET_DMA */
+/**
+ * netdev_compute_feature - compute conjunction of two feature sets
+ * @all: first feature set
+ * @one: second feature set
+ *
+ * Computes a new feature set after adding a device with feature set
+ * @one to the master device with current feature set @all. Returns
+ * the new feature set.
+ */
+int netdev_compute_features(unsigned long all, unsigned long one)
+{
+ /* if device needs checksumming, downgrade to hw checksumming */
+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
+ all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
+
+ /* if device can't do all checksum, downgrade to ipv4/ipv6 */
+ if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
+ all ^= NETIF_F_HW_CSUM
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ if (one & NETIF_F_GSO)
+ one |= NETIF_F_GSO_SOFTWARE;
+ one |= NETIF_F_GSO;
+
+ /* If even one device supports robust GSO, enable it for all. */
+ if (one & NETIF_F_GSO_ROBUST)
+ all |= NETIF_F_GSO_ROBUST;
+
+ all &= one | NETIF_F_LLTX;
+
+ if (!(all & NETIF_F_ALL_CSUM))
+ all &= ~NETIF_F_SG;
+ if (!(all & NETIF_F_SG))
+ all &= ~NETIF_F_GSO_MASK;
+
+ return all;
+}
+EXPORT_SYMBOL(netdev_compute_features);
+
/*
* Initialize the DEV module. At boot time this walks the device list and
* unhooks any devices that fail to initialise (normally hardware not
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 0b531e98ec3..2ab0a60046a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -3,10 +3,12 @@
* Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
*
* This file is where we call all the ethtool_ops commands to get
- * the information ethtool needs. We fall back to calling do_ioctl()
- * for drivers which haven't been converted to ethtool_ops yet.
+ * the information ethtool needs.
*
- * It's GPL, stupid.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
*/
#include <linux/module.h>
@@ -93,18 +95,6 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
return 0;
}
-int ethtool_op_get_perm_addr(struct net_device *dev, struct ethtool_perm_addr *addr, u8 *data)
-{
- unsigned char len = dev->addr_len;
- if ( addr->size < len )
- return -ETOOSMALL;
-
- addr->size = len;
- memcpy(data, dev->perm_addr, len);
- return 0;
-}
-
-
u32 ethtool_op_get_ufo(struct net_device *dev)
{
return (dev->features & NETIF_F_UFO) != 0;
@@ -777,34 +767,20 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
{
struct ethtool_perm_addr epaddr;
- u8 *data;
- int ret;
-
- if (!dev->ethtool_ops->get_perm_addr)
- return -EOPNOTSUPP;
- if (copy_from_user(&epaddr,useraddr,sizeof(epaddr)))
+ if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
return -EFAULT;
- data = kmalloc(epaddr.size, GFP_USER);
- if (!data)
- return -ENOMEM;
-
- ret = dev->ethtool_ops->get_perm_addr(dev,&epaddr,data);
- if (ret)
- return ret;
+ if (epaddr.size < dev->addr_len)
+ return -ETOOSMALL;
+ epaddr.size = dev->addr_len;
- ret = -EFAULT;
if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
- goto out;
+ return -EFAULT;
useraddr += sizeof(epaddr);
- if (copy_to_user(useraddr, data, epaddr.size))
- goto out;
- ret = 0;
-
- out:
- kfree(data);
- return ret;
+ if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
+ return -EFAULT;
+ return 0;
}
/* The main entry point in this file. Called from net/core/dev.c */
@@ -821,7 +797,7 @@ int dev_ethtool(struct ifreq *ifr)
return -ENODEV;
if (!dev->ethtool_ops)
- goto ioctl;
+ return -EOPNOTSUPP;
if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
return -EFAULT;
@@ -960,7 +936,7 @@ int dev_ethtool(struct ifreq *ifr)
rc = ethtool_set_gso(dev, useraddr);
break;
default:
- rc = -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
}
if (dev->ethtool_ops->complete)
@@ -970,20 +946,10 @@ int dev_ethtool(struct ifreq *ifr)
netdev_features_change(dev);
return rc;
-
- ioctl:
- /* Keep existing behaviour for the moment. */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (dev->do_ioctl)
- return dev->do_ioctl(dev, ifr, SIOCETHTOOL);
- return -EOPNOTSUPP;
}
EXPORT_SYMBOL(dev_ethtool);
EXPORT_SYMBOL(ethtool_op_get_link);
-EXPORT_SYMBOL_GPL(ethtool_op_get_perm_addr);
EXPORT_SYMBOL(ethtool_op_get_sg);
EXPORT_SYMBOL(ethtool_op_get_tso);
EXPORT_SYMBOL(ethtool_op_get_tx_csum);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index bca787fdbc5..7bae576ac11 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -567,7 +567,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user * buf,
pktgen_run_all_threads();
else
- printk("pktgen: Unknown command: %s\n", data);
+ printk(KERN_WARNING "pktgen: Unknown command: %s\n", data);
err = count;
@@ -908,14 +908,14 @@ static ssize_t pktgen_if_write(struct file *file,
pg_result = &(pkt_dev->result[0]);
if (count < 1) {
- printk("pktgen: wrong command format\n");
+ printk(KERN_WARNING "pktgen: wrong command format\n");
return -EINVAL;
}
max = count - i;
tmp = count_trail_chars(&user_buffer[i], max);
if (tmp < 0) {
- printk("pktgen: illegal format\n");
+ printk(KERN_WARNING "pktgen: illegal format\n");
return tmp;
}
i += tmp;
@@ -943,7 +943,7 @@ static ssize_t pktgen_if_write(struct file *file,
if (copy_from_user(tb, user_buffer, count))
return -EFAULT;
tb[count] = 0;
- printk("pktgen: %s,%lu buffer -:%s:-\n", name,
+ printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
(unsigned long)count, tb);
}
@@ -1248,7 +1248,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_min;
}
if (debug)
- printk("pktgen: dst_min set to: %s\n",
+ printk(KERN_DEBUG "pktgen: dst_min set to: %s\n",
pkt_dev->dst_min);
i += len;
sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min);
@@ -1271,7 +1271,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_daddr = pkt_dev->daddr_max;
}
if (debug)
- printk("pktgen: dst_max set to: %s\n",
+ printk(KERN_DEBUG "pktgen: dst_max set to: %s\n",
pkt_dev->dst_max);
i += len;
sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max);
@@ -1294,7 +1294,7 @@ static ssize_t pktgen_if_write(struct file *file,
ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
if (debug)
- printk("pktgen: dst6 set to: %s\n", buf);
+ printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6=%s", buf);
@@ -1317,7 +1317,7 @@ static ssize_t pktgen_if_write(struct file *file,
ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
&pkt_dev->min_in6_daddr);
if (debug)
- printk("pktgen: dst6_min set to: %s\n", buf);
+ printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_min=%s", buf);
@@ -1338,7 +1338,7 @@ static ssize_t pktgen_if_write(struct file *file,
fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
if (debug)
- printk("pktgen: dst6_max set to: %s\n", buf);
+ printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: dst6_max=%s", buf);
@@ -1361,7 +1361,7 @@ static ssize_t pktgen_if_write(struct file *file,
ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
if (debug)
- printk("pktgen: src6 set to: %s\n", buf);
+ printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf);
i += len;
sprintf(pg_result, "OK: src6=%s", buf);
@@ -1382,7 +1382,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_min;
}
if (debug)
- printk("pktgen: src_min set to: %s\n",
+ printk(KERN_DEBUG "pktgen: src_min set to: %s\n",
pkt_dev->src_min);
i += len;
sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min);
@@ -1403,7 +1403,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->cur_saddr = pkt_dev->saddr_max;
}
if (debug)
- printk("pktgen: src_max set to: %s\n",
+ printk(KERN_DEBUG "pktgen: src_max set to: %s\n",
pkt_dev->src_max);
i += len;
sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max);
@@ -1533,7 +1533,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk("pktgen: VLAN/SVLAN auto turned off\n");
+ printk(KERN_DEBUG "pktgen: VLAN/SVLAN auto turned off\n");
}
return count;
}
@@ -1548,10 +1548,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->vlan_id = value; /* turn on VLAN */
if (debug)
- printk("pktgen: VLAN turned on\n");
+ printk(KERN_DEBUG "pktgen: VLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk("pktgen: MPLS auto turned off\n");
+ printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id);
@@ -1560,7 +1560,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk("pktgen: VLAN/SVLAN turned off\n");
+ printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1605,10 +1605,10 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = value; /* turn on SVLAN */
if (debug)
- printk("pktgen: SVLAN turned on\n");
+ printk(KERN_DEBUG "pktgen: SVLAN turned on\n");
if (debug && pkt_dev->nr_labels)
- printk("pktgen: MPLS auto turned off\n");
+ printk(KERN_DEBUG "pktgen: MPLS auto turned off\n");
pkt_dev->nr_labels = 0; /* turn off MPLS */
sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id);
@@ -1617,7 +1617,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->svlan_id = 0xffff;
if (debug)
- printk("pktgen: VLAN/SVLAN turned off\n");
+ printk(KERN_DEBUG "pktgen: VLAN/SVLAN turned off\n");
}
return count;
}
@@ -1777,10 +1777,11 @@ static ssize_t pktgen_thread_write(struct file *file,
i += len;
if (debug)
- printk("pktgen: t=%s, count=%lu\n", name, (unsigned long)count);
+ printk(KERN_DEBUG "pktgen: t=%s, count=%lu\n",
+ name, (unsigned long)count);
if (!t) {
- printk("pktgen: ERROR: No thread\n");
+ printk(KERN_ERR "pktgen: ERROR: No thread\n");
ret = -EINVAL;
goto out;
}
@@ -1891,8 +1892,8 @@ static void pktgen_mark_device(const char *ifname)
mutex_lock(&pktgen_thread_lock);
if (++i >= max_tries) {
- printk("pktgen_mark_device: timed out after waiting "
- "%d msec for device %s to be removed\n",
+ printk(KERN_ERR "pktgen_mark_device: timed out after "
+ "waiting %d msec for device %s to be removed\n",
msec_per_try * i, ifname);
break;
}
@@ -1962,15 +1963,15 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
odev = dev_get_by_name(ifname);
if (!odev) {
- printk("pktgen: no such netdevice: \"%s\"\n", ifname);
+ printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
return -ENODEV;
}
if (odev->type != ARPHRD_ETHER) {
- printk("pktgen: not an ethernet device: \"%s\"\n", ifname);
+ printk(KERN_ERR "pktgen: not an ethernet device: \"%s\"\n", ifname);
err = -EINVAL;
} else if (!netif_running(odev)) {
- printk("pktgen: device is down: \"%s\"\n", ifname);
+ printk(KERN_ERR "pktgen: device is down: \"%s\"\n", ifname);
err = -ENETDOWN;
} else {
pkt_dev->odev = odev;
@@ -1987,7 +1988,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
if (!pkt_dev->odev) {
- printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n");
+ printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
+ "setup_inject.\n");
sprintf(pkt_dev->result,
"ERROR: pkt_dev->odev == NULL in setup_inject.\n");
return;
@@ -2049,7 +2051,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
}
rcu_read_unlock();
if (err)
- printk("pktgen: ERROR: IPv6 link address not availble.\n");
+ printk(KERN_ERR "pktgen: ERROR: IPv6 link "
+ "address not availble.\n");
}
#endif
} else {
@@ -2156,8 +2159,7 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
/* If there was already an IPSEC SA, we keep it as is, else
* we go look for it ...
*/
-inline
-void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
+static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
{
struct xfrm_state *x = pkt_dev->flows[flow].x;
if (!x) {
@@ -2441,7 +2443,8 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
if (nhead >0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
if (ret < 0) {
- printk("Error expanding ipsec packet %d\n",ret);
+ printk(KERN_ERR "Error expanding "
+ "ipsec packet %d\n",ret);
return 0;
}
}
@@ -2450,7 +2453,8 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
skb_pull(skb, ETH_HLEN);
ret = pktgen_output_ipsec(skb, pkt_dev);
if (ret) {
- printk("Error creating ipsec packet %d\n",ret);
+ printk(KERN_ERR "Error creating ipsec "
+ "packet %d\n",ret);
kfree_skb(skb);
return 0;
}
@@ -3184,8 +3188,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
if (!pkt_dev->running) {
- printk("pktgen: interface: %s is already stopped\n",
- pkt_dev->odev->name);
+ printk(KERN_WARNING "pktgen: interface: %s is already "
+ "stopped\n", pkt_dev->odev->name);
return -EINVAL;
}
@@ -3360,7 +3364,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->skb = fill_packet(odev, pkt_dev);
if (pkt_dev->skb == NULL) {
- printk("pktgen: ERROR: couldn't allocate skb in fill_packet.\n");
+ printk(KERN_ERR "pktgen: ERROR: couldn't "
+ "allocate skb in fill_packet.\n");
schedule();
pkt_dev->clone_count--; /* back out increment, OOM */
goto out;
@@ -3565,7 +3570,8 @@ static int add_dev_to_thread(struct pktgen_thread *t,
if_lock(t);
if (pkt_dev->pg_thread) {
- printk("pktgen: ERROR: already assigned to a thread.\n");
+ printk(KERN_ERR "pktgen: ERROR: already assigned "
+ "to a thread.\n");
rv = -EBUSY;
goto out;
}
@@ -3590,7 +3596,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev = __pktgen_NN_threads(ifname, FIND);
if (pkt_dev) {
- printk("pktgen: ERROR: interface already used.\n");
+ printk(KERN_ERR "pktgen: ERROR: interface already used.\n");
return -EBUSY;
}
@@ -3632,7 +3638,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir);
if (!pkt_dev->entry) {
- printk("pktgen: cannot create %s/%s procfs entry.\n",
+ printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
PG_PROC_DIR, ifname);
err = -EINVAL;
goto out2;
@@ -3665,7 +3671,8 @@ static int __init pktgen_create_thread(int cpu)
t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
if (!t) {
- printk("pktgen: ERROR: out of memory, can't create new thread.\n");
+ printk(KERN_ERR "pktgen: ERROR: out of memory, can't "
+ "create new thread.\n");
return -ENOMEM;
}
@@ -3678,7 +3685,8 @@ static int __init pktgen_create_thread(int cpu)
p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
if (IS_ERR(p)) {
- printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu);
+ printk(KERN_ERR "pktgen: kernel_thread() failed "
+ "for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
@@ -3688,7 +3696,7 @@ static int __init pktgen_create_thread(int cpu)
pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir);
if (!pe) {
- printk("pktgen: cannot create %s/%s procfs entry.\n",
+ printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n",
PG_PROC_DIR, t->tsk->comm);
kthread_stop(p);
list_del(&t->th_list);
@@ -3727,7 +3735,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev);
if (pkt_dev->running) {
- printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n");
+ printk(KERN_WARNING "pktgen: WARNING: trying to remove a "
+ "running interface, stopping it now.\n");
pktgen_stop_device(pkt_dev);
}
@@ -3759,7 +3768,7 @@ static int __init pg_init(void)
int cpu;
struct proc_dir_entry *pe;
- printk(version);
+ printk(KERN_INFO "%s", version);
pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net);
if (!pg_proc_dir)
@@ -3768,8 +3777,8 @@ static int __init pg_init(void)
pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
if (pe == NULL) {
- printk("pktgen: ERROR: cannot create %s procfs entry.\n",
- PGCTRL);
+ printk(KERN_ERR "pktgen: ERROR: cannot create %s "
+ "procfs entry.\n", PGCTRL);
proc_net_remove(PG_PROC_DIR);
return -EINVAL;
}
@@ -3785,12 +3794,13 @@ static int __init pg_init(void)
err = pktgen_create_thread(cpu);
if (err)
- printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n",
- cpu, err);
+ printk(KERN_WARNING "pktgen: WARNING: Cannot create "
+ "thread for cpu %d (%d)\n", cpu, err);
}
if (list_empty(&pktgen_threads)) {
- printk("pktgen: ERROR: Initialization failed for all threads\n");
+ printk(KERN_ERR "pktgen: ERROR: Initialization failed for "
+ "all threads\n");
unregister_netdevice_notifier(&pktgen_notifier_block);
remove_proc_entry(PGCTRL, pg_proc_dir);
proc_net_remove(PG_PROC_DIR);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 06eccca8cb5..4756d5857ab 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -952,7 +952,9 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct nlattr *linkinfo[IFLA_INFO_MAX+1];
int err;
+#ifdef CONFIG_KMOD
replay:
+#endif
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2030bb8c2d3..0bf17da40d5 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -25,6 +25,7 @@
#include <linux/random.h>
#include <linux/percpu.h>
#include <linux/init.h>
+#include <net/sock.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ccbf72c793b..c45088b5e6f 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -40,6 +40,7 @@ static inline void ccids_write_unlock(void)
static inline void ccids_read_lock(void)
{
atomic_inc(&ccids_lockct);
+ smp_mb__after_atomic_inc();
spin_unlock_wait(&ccids_lock);
}
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index cd845df5320..5ebdd86c1b9 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -327,10 +327,16 @@ static void dccp_feat_empty_confirm(struct dccp_minisock *dmsk,
}
switch (type) {
- case DCCPO_CHANGE_L: opt->dccpop_type = DCCPO_CONFIRM_R; break;
- case DCCPO_CHANGE_R: opt->dccpop_type = DCCPO_CONFIRM_L; break;
- default: DCCP_WARN("invalid type %d\n", type); return;
-
+ case DCCPO_CHANGE_L:
+ opt->dccpop_type = DCCPO_CONFIRM_R;
+ break;
+ case DCCPO_CHANGE_R:
+ opt->dccpop_type = DCCPO_CONFIRM_L;
+ break;
+ default:
+ DCCP_WARN("invalid type %d\n", type);
+ kfree(opt);
+ return;
}
opt->dccpop_feat = feature;
opt->dccpop_val = NULL;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index f2a61ef2af9..a4a620971ef 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1737,8 +1737,9 @@ static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct dn_rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct dn_rt_cache_iter_state *s;
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
rc = seq_open(file, &dn_rt_cache_seq_ops);
@@ -1746,7 +1747,6 @@ static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
goto out_kfree;
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index f13937bf9e8..d054e9224b3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -74,8 +74,8 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
struct ieee80211softmac_auth_queue_item *authptr;
int length = 0;
+check_assoc_again:
mutex_lock(&sm->associnfo.mutex);
-
/* Check if we're already associating to this or another network
* If it's another network, cancel and start over with our new network
* If it's our network, ignore the change, we're already doing it!
@@ -98,13 +98,18 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
cancel_delayed_work(&authptr->work);
sm->associnfo.bssvalid = 0;
sm->associnfo.bssfixed = 0;
- flush_scheduled_work();
sm->associnfo.associating = 0;
sm->associnfo.associated = 0;
+ /* We must unlock to avoid deadlocks with the assoc workqueue
+ * on the associnfo.mutex */
+ mutex_unlock(&sm->associnfo.mutex);
+ flush_scheduled_work();
+ /* Avoid race! Check assoc status again. Maybe someone started an
+ * association while we flushed. */
+ goto check_assoc_again;
}
}
-
sm->associnfo.static_essid = 0;
sm->associnfo.assoc_wait = 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 06c08e5740f..e68103475cc 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -831,7 +831,7 @@ const struct proto_ops inet_stream_ops = {
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
- .sendmsg = inet_sendmsg,
+ .sendmsg = tcp_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index abf6352f990..5b77bdaa57d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1056,10 +1056,9 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
if (!in_dev) {
if (event == NETDEV_REGISTER) {
in_dev = inetdev_init(dev);
+ if (!in_dev)
+ return notifier_from_errno(-ENOMEM);
if (dev == &loopback_dev) {
- if (!in_dev)
- panic("devinet: "
- "Failed to create loopback\n");
IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 251346828cb..2f14745a9e1 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -513,11 +513,8 @@ void ip_options_undo(struct ip_options * opt)
static struct ip_options *ip_options_get_alloc(const int optlen)
{
- struct ip_options *opt = kmalloc(sizeof(*opt) + ((optlen + 3) & ~3),
- GFP_KERNEL);
- if (opt)
- memset(opt, 0, sizeof(*opt));
- return opt;
+ return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3),
+ GFP_KERNEL);
}
static int ip_options_get_finish(struct ip_options **optp,
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c9e2b5e6305..0f1d7beacf7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -75,7 +75,6 @@
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
-#include <net/checksum.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 342ca8d8945..c5b24707753 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1281,9 +1281,9 @@ static int __init ip_auto_config(void)
*/
if (ic_myaddr == NONE ||
#ifdef CONFIG_ROOT_NFS
- (MAJOR(ROOT_DEV) == UNNAMED_MAJOR
- && root_server_addr == NONE
- && ic_servaddr == NONE) ||
+ (root_server_addr == NONE
+ && ic_servaddr == NONE
+ && ROOT_DEV == Root_NFS) ||
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index e1052bcf4ed..902fd578aa3 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -29,7 +29,6 @@
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/swap.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter.h>
@@ -909,7 +908,7 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
write_lock_bh(&__ip_vs_svc_lock);
/* Wait until all other svc users go away */
- while (atomic_read(&svc->usecnt) > 1) {};
+ IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
/* call the update_service, because server weight may be changed */
svc->scheduler->update_service(svc);
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 900ce29db38..666e080a74a 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -128,7 +128,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
#define IP_VS_XMIT(skb, rt) \
do { \
(skb)->ipvs_property = 1; \
- (skb)->ip_summed = CHECKSUM_NONE; \
+ skb_forward_csum(skb); \
NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, (skb), NULL, \
(rt)->u.dst.dev, dst_output); \
} while (0)
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index dcc12b18347..69bd362b5fa 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -19,7 +19,6 @@
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter/x_tables.h>
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 32180431565..6d0c0f7364a 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -387,12 +387,17 @@ static int recent_seq_open(struct inode *inode, struct file *file)
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
+
ret = seq_open(file, &recent_seq_ops);
- if (ret)
+ if (ret) {
kfree(st);
+ goto out;
+ }
+
st->table = pde->data;
seq = file->private_data;
seq->private = st;
+out:
return ret;
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 64552afd01c..d9b5177989c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -509,3 +509,9 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
module_init(nf_conntrack_l3proto_ipv4_init);
module_exit(nf_conntrack_l3proto_ipv4_fini);
+
+void need_ipv4_conntrack(void)
+{
+ return;
+}
+EXPORT_SYMBOL_GPL(need_ipv4_conntrack);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 27c7918e442..b3dd5de9a25 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -294,15 +294,14 @@ static int exp_open(struct inode *inode, struct file *file)
struct ct_expect_iter_state *st;
int ret;
- st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
- if (st == NULL)
+ st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
+ if (!st)
return -ENOMEM;
ret = seq_open(file, &exp_seq_ops);
if (ret)
goto out_free;
seq = file->private_data;
seq->private = st;
- memset(st, 0, sizeof(struct ct_expect_iter_state));
return ret;
out_free:
kfree(st);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index e848d8d6292..deab27facba 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -77,7 +77,8 @@ static inline unsigned int
hash_by_src(const struct nf_conntrack_tuple *tuple)
{
/* Original src, to ensure we map it consistently if poss. */
- return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
+ return jhash_3words((__force u32)tuple->src.u3.ip,
+ (__force u32)tuple->src.u.all,
tuple->dst.protonum, 0) % nf_nat_htable_size;
}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 0f45427e5fd..76ec59ae524 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -192,7 +192,7 @@ alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
- u_int16_t all
+ __be16 all
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
: ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 332814dac50..46cc99def16 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -328,7 +328,7 @@ static int __init nf_nat_standalone_init(void)
{
int ret = 0;
- need_conntrack();
+ need_ipv4_conntrack();
#ifdef CONFIG_XFRM
BUG_ON(ip_nat_decode_session != NULL);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 24d7c9f3191..c6d71526f62 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -900,8 +900,9 @@ static int raw_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct raw_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct raw_iter_state *s;
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
rc = seq_open(file, &raw_seq_ops);
@@ -910,7 +911,6 @@ static int raw_seq_open(struct inode *inode, struct file *file)
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index df42b7fb326..c7ca94bd152 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -374,8 +374,9 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
int rc = -ENOMEM;
- struct rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ struct rt_cache_iter_state *s;
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto out;
rc = seq_open(file, &rt_cache_seq_ops);
@@ -383,7 +384,6 @@ static int rt_cache_seq_open(struct inode *inode, struct file *file)
goto out_kfree;
seq = file->private_data;
seq->private = s;
- memset(s, 0, sizeof(*s));
out:
return rc;
out_kfree:
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index da4c0b6ab79..7e740112b23 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk)
return tmp;
}
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
{
+ struct sock *sk = sock->sk;
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 519de091a94..4586211e375 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -206,7 +206,7 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index d17da30d82d..485d7ea35f7 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -246,38 +246,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->cnt = 1;
}
-
-/* Keep track of minimum rtt */
-static inline void measure_delay(struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- struct bictcp *ca = inet_csk_ca(sk);
- u32 delay;
-
- /* No time stamp */
- if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) ||
- /* Discard delay samples right after fast recovery */
- (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
- return;
-
- delay = (tcp_time_stamp - tp->rx_opt.rcv_tsecr)<<3;
- if (delay == 0)
- delay = 1;
-
- /* first time call or link delay decreases */
- if (ca->delay_min == 0 || ca->delay_min > delay)
- ca->delay_min = delay;
-}
-
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
u32 in_flight, int data_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct bictcp *ca = inet_csk_ca(sk);
- if (data_acked)
- measure_delay(sk);
-
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
@@ -334,17 +308,33 @@ static void bictcp_state(struct sock *sk, u8 new_state)
/* Track delayed acknowledgment ratio using sliding window
* ratio = (15*ratio + sample) / 16
*/
-static void bictcp_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct bictcp *ca = inet_csk_ca(sk);
+ u32 delay;
if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
- struct bictcp *ca = inet_csk_ca(sk);
cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
ca->delayed_ack += cnt;
}
-}
+ /* Some calls are for duplicates without timetamps */
+ if (rtt_us < 0)
+ return;
+
+ /* Discard delay samples right after fast recovery */
+ if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ return;
+
+ delay = usecs_to_jiffies(rtt_us) << 3;
+ if (delay == 0)
+ delay = 1;
+
+ /* first time call or link delay decreases */
+ if (ca->delay_min == 0 || ca->delay_min > delay)
+ ca->delay_min = delay;
+}
static struct tcp_congestion_ops cubictcp = {
.init = bictcp_init,
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 08a02e6045c..5215691f276 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -76,20 +76,17 @@ static u32 htcp_cwnd_undo(struct sock *sk)
return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta);
}
-static inline void measure_rtt(struct sock *sk)
+static inline void measure_rtt(struct sock *sk, u32 srtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
- u32 srtt = tp->srtt >> 3;
/* keep track of minimum RTT seen so far, minRTT is zero at first */
if (ca->minRTT > srtt || !ca->minRTT)
ca->minRTT = srtt;
/* max RTT */
- if (icsk->icsk_ca_state == TCP_CA_Open
- && tp->snd_ssthresh < 0xFFFF && htcp_ccount(ca) > 3) {
+ if (icsk->icsk_ca_state == TCP_CA_Open) {
if (ca->maxRTT < ca->minRTT)
ca->maxRTT = ca->minRTT;
if (ca->maxRTT < srtt
@@ -98,7 +95,7 @@ static inline void measure_rtt(struct sock *sk)
}
}
-static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
@@ -108,6 +105,9 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, ktime_t
if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = pkts_acked;
+ if (rtt > 0)
+ measure_rtt(sk, usecs_to_jiffies(rtt));
+
if (!use_bandwidth_switch)
return;
@@ -237,8 +237,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack,
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
else {
- measure_rtt(sk);
-
/* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index cc5de6f69d4..64f1cbaf96e 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -83,18 +83,16 @@ static void tcp_illinois_init(struct sock *sk)
}
/* Measure RTT for each ack. */
-static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_illinois_acked(struct sock *sk, u32 pkts_acked, s32 rtt)
{
struct illinois *ca = inet_csk_ca(sk);
- u32 rtt;
ca->acked = pkts_acked;
- if (ktime_equal(last, net_invalid_timestamp()))
+ /* dup ack, no rtt sample */
+ if (rtt < 0)
return;
- rtt = ktime_to_us(net_timedelta(last));
-
/* ignore bogus values, this prevents wraparound in alpha math */
if (rtt > RTT_MAX)
rtt = RTT_MAX;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index fec8a7a4dba..f030435e0eb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -102,11 +102,14 @@ int sysctl_tcp_abc __read_mostly;
#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
+#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
+#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
+#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
#define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
#define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
@@ -964,12 +967,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* Check for D-SACK. */
if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+ flag |= FLAG_DSACKING_ACK;
found_dup_sack = 1;
tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1 &&
!after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
!before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+ flag |= FLAG_DSACKING_ACK;
found_dup_sack = 1;
tp->rx_opt.sack_ok |= 4;
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
@@ -1851,19 +1856,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
}
/* Decrease cwnd each second ack. */
-static void tcp_cwnd_down(struct sock *sk)
+static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
- tp->snd_cwnd_cnt = decr&1;
- decr >>= 1;
+ if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) ||
+ (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
+ tp->snd_cwnd_cnt = decr&1;
+ decr >>= 1;
- if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
- tp->snd_cwnd -= decr;
+ if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
+ tp->snd_cwnd -= decr;
- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
- tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ }
}
/* Nothing was retransmitted or returned timestamp is less
@@ -2060,7 +2068,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
}
tcp_moderate_cwnd(tp);
} else {
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
}
}
@@ -2104,12 +2112,13 @@ static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb)
* tcp_xmit_retransmit_queue().
*/
static void
-tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
- int prior_packets, int flag)
+tcp_fastretrans_alert(struct sock *sk, int prior_packets, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
+ int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
+ int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
+ (tp->fackets_out > tp->reordering));
/* Some technical things:
* 1. Reno does not count dupacks (sacked_out) automatically. */
@@ -2186,14 +2195,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* F. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
- if (prior_snd_una == tp->snd_una) {
+ if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (IsReno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else {
int acked = prior_packets - tp->packets_out;
if (IsReno(tp))
tcp_remove_reno_sacks(sk, acked);
- is_dupack = tcp_try_undo_partial(sk, acked);
+ do_lost = tcp_try_undo_partial(sk, acked);
}
break;
case TCP_CA_Loss:
@@ -2209,7 +2218,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
/* Loss is undone; fall through to processing in Open state. */
default:
if (IsReno(tp)) {
- if (tp->snd_una != prior_snd_una)
+ if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (is_dupack)
tcp_add_reno_sack(sk);
@@ -2258,9 +2267,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
tcp_set_ca_state(sk, TCP_CA_Recovery);
}
- if (is_dupack || tcp_head_timedout(sk))
+ if (do_lost || tcp_head_timedout(sk))
tcp_update_scoreboard(sk);
- tcp_cwnd_down(sk);
+ tcp_cwnd_down(sk, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -2490,12 +2499,23 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk);
- /* Is the ACK triggering packet unambiguous? */
- if (acked & FLAG_RETRANS_DATA_ACKED)
- last_ackt = net_invalid_timestamp();
+ if (ca_ops->pkts_acked) {
+ s32 rtt_us = -1;
+
+ /* Is the ACK triggering packet unambiguous? */
+ if (!(acked & FLAG_RETRANS_DATA_ACKED)) {
+ /* High resolution needed and available? */
+ if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
+ !ktime_equal(last_ackt,
+ net_invalid_timestamp()))
+ rtt_us = ktime_us_delta(ktime_get_real(),
+ last_ackt);
+ else if (seq_rtt > 0)
+ rtt_us = jiffies_to_usecs(seq_rtt);
+ }
- if (ca_ops->pkts_acked)
- ca_ops->pkts_acked(sk, pkts_acked, last_ackt);
+ ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
+ }
}
#if FASTRETRANS_DEBUG > 0
@@ -2667,7 +2687,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag)
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
-static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
+static int tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2687,8 +2707,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
* ACK isn't duplicate nor advances window, e.g., opposite dir
* data, winupdate
*/
- if ((tp->snd_una == prior_snd_una) && (flag&FLAG_NOT_DUP) &&
- !(flag&FLAG_FORWARD_PROGRESS))
+ if (!(flag&FLAG_ANY_PROGRESS) && (flag&FLAG_NOT_DUP))
return 1;
if (!(flag&FLAG_DATA_ACKED)) {
@@ -2768,6 +2787,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una))
goto old_ack;
+ if (after(ack, prior_snd_una))
+ flag |= FLAG_SND_UNA_ADVANCED;
+
if (sysctl_tcp_abc) {
if (icsk->icsk_ca_state < TCP_CA_CWR)
tp->bytes_acked += ack - prior_snd_una;
@@ -2820,14 +2842,14 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
if (tp->frto_counter)
- frto_cwnd = tcp_process_frto(sk, prior_snd_una, flag);
+ frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
- tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
+ tcp_fastretrans_alert(sk, prior_packets, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight, 1);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3f5f7423b95..9c94627c8c7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2425,7 +2425,6 @@ struct proto tcp_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
- .sendmsg = tcp_sendmsg,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v4_do_rcv,
.hash = tcp_v4_hash,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 80e140e3ec2..e7f5ef92cbd 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -260,13 +260,13 @@ static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
-static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, ktime_t last)
+static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
- if (!ktime_equal(last, net_invalid_timestamp()))
- tcp_lp_rtt_sample(sk, ktime_to_us(net_timedelta(last)));
+ if (rtt_us > 0)
+ tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 914e0307f7a..b49dedcda52 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -112,16 +112,16 @@ EXPORT_SYMBOL_GPL(tcp_vegas_init);
* o min-filter RTT samples from a much longer window (forever for now)
* to find the propagation delay (baseRTT)
*/
-void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct vegas *vegas = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < vegas->baseRTT)
diff --git a/net/ipv4/tcp_vegas.h b/net/ipv4/tcp_vegas.h
index 502fa818363..6c0eea2f824 100644
--- a/net/ipv4/tcp_vegas.h
+++ b/net/ipv4/tcp_vegas.h
@@ -17,7 +17,7 @@ struct vegas {
extern void tcp_vegas_init(struct sock *sk);
extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, ktime_t last);
+extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 7a55ddf8603..8fb2aee0b1a 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -69,16 +69,16 @@ static void tcp_veno_init(struct sock *sk)
}
/* Do rtt sampling needed for Veno. */
-static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_veno_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us)
{
struct veno *veno = inet_csk_ca(sk);
u32 vrtt;
- if (ktime_equal(last, net_invalid_timestamp()))
+ if (rtt_us < 0)
return;
/* Never allow zero rtt or baseRTT */
- vrtt = ktime_to_us(net_timedelta(last)) + 1;
+ vrtt = rtt_us + 1;
/* Filter to find propagation delay: */
if (vrtt < veno->basertt)
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index e61e09dd513..20151d6a624 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -100,11 +100,12 @@ static void westwood_filter(struct westwood *w, u32 delta)
* Called after processing group of packets.
* but all westwood needs is the last sample of srtt.
*/
-static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, ktime_t last)
+static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
{
struct westwood *w = inet_csk_ca(sk);
- if (cnt > 0)
- w->rtt = tcp_sk(sk)->srtt >> 3;
+
+ if (rtt > 0)
+ w->rtt = usecs_to_jiffies(rtt);
}
/*
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index c04b7c6ec70..c107fba7430 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -58,7 +58,7 @@ static void tcp_yeah_init(struct sock *sk)
}
-static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
+static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct yeah *yeah = inet_csk_ca(sk);
@@ -66,7 +66,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
if (icsk->icsk_ca_state == TCP_CA_Open)
yeah->pkts_acked = pkts_acked;
- tcp_vegas_pkts_acked(sk, pkts_acked, last);
+ tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
}
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 06012920912..91ef3be5aba 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2256,14 +2256,14 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
struct net_device *dev = (struct net_device *) data;
struct inet6_dev *idev = __in6_dev_get(dev);
int run_pending = 0;
+ int err;
switch(event) {
case NETDEV_REGISTER:
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
idev = ipv6_add_dev(dev);
if (!idev)
- printk(KERN_WARNING "IPv6: add_dev failed for %s\n",
- dev->name);
+ return notifier_from_errno(-ENOMEM);
}
break;
case NETDEV_UP:
@@ -2373,7 +2373,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
NULL);
addrconf_sysctl_register(idev, &idev->cnf);
#endif
- snmp6_register_dev(idev);
+ err = snmp6_register_dev(idev);
+ if (err)
+ return notifier_from_errno(err);
}
break;
}
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index faaefb69229..3f82e9542ed 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -50,6 +50,9 @@ int __ipv6_addr_type(const struct in6_addr *addr)
if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000))
return (IPV6_ADDR_SITELOCAL | IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_SITELOCAL)); /* addr-select 3.1 */
+ if ((st & htonl(0xFE000000)) == htonl(0xFC000000))
+ return (IPV6_ADDR_UNICAST |
+ IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* RFC 4193 */
if ((addr->s6_addr32[0] | addr->s6_addr32[1]) == 0) {
if (addr->s6_addr32[2] == 0) {
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index eed09373a45..b5f96372ad7 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -484,7 +484,7 @@ const struct proto_ops inet6_stream_ops = {
.shutdown = inet_shutdown, /* ok */
.setsockopt = sock_common_setsockopt, /* ok */
.getsockopt = sock_common_getsockopt, /* ok */
- .sendmsg = inet_sendmsg, /* ok */
+ .sendmsg = tcp_sendmsg, /* ok */
.recvmsg = sock_common_recvmsg, /* ok */
.mmap = sock_no_mmap,
.sendpage = tcp_sendpage,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index df30976f6df..ca774d8e3be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -385,7 +385,7 @@ parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
static int
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
- int *type, int *code, int *msg, __be32 *info, int offset)
+ int *type, int *code, int *msg, __u32 *info, int offset)
{
struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
struct ip6_tnl *t;
@@ -435,7 +435,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
if ((*code) == ICMPV6_HDR_FIELD)
teli = parse_tlv_tnl_enc_lim(skb, skb->data);
- if (teli && teli == ntohl(*info) - 2) {
+ if (teli && teli == *info - 2) {
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
if (tel->encap_limit == 0) {
if (net_ratelimit())
@@ -452,7 +452,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
}
break;
case ICMPV6_PKT_TOOBIG:
- mtu = ntohl(*info) - offset;
+ mtu = *info - offset;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
@@ -478,12 +478,12 @@ out:
static int
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __u32 info)
+ int type, int code, int offset, __be32 info)
{
int rel_msg = 0;
int rel_type = type;
int rel_code = code;
- __u32 rel_info = info;
+ __u32 rel_info = ntohl(info);
int err;
struct sk_buff *skb2;
struct iphdr *eiph;
@@ -564,10 +564,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
- rel_info = htonl(rel_info);
}
- icmp_send(skb2, rel_type, rel_code, rel_info);
+ icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
out:
kfree_skb(skb2);
@@ -576,12 +575,12 @@ out:
static int
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- int type, int code, int offset, __u32 info)
+ int type, int code, int offset, __be32 info)
{
int rel_msg = 0;
int rel_type = type;
int rel_code = code;
- __u32 rel_info = info;
+ __u32 rel_info = ntohl(info);
int err;
err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 36df2218b66..3153e15e0f7 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -86,8 +86,8 @@ static int ipv6_print_conntrack(struct seq_file *s,
* - Note also special handling of AUTH header. Thanks to IPsec wizards.
*/
-int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
- int len)
+static int nf_ct_ipv6_skip_exthdr(const struct sk_buff *skb, int start,
+ u8 *nexthdrp, int len)
{
u8 nexthdr = *nexthdrp;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 919de682b33..55ea80fac60 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1983,9 +1983,10 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
!dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
(dst_mtu(&rt->u.dst) > arg->mtu ||
(dst_mtu(&rt->u.dst) < arg->mtu &&
- dst_mtu(&rt->u.dst) == idev->cnf.mtu6)))
+ dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) {
rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu;
- rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
+ rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(arg->mtu);
+ }
return 0;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f10f3689d67..0f7defb482e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -56,7 +56,6 @@
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
-#include <net/addrconf.h>
#include <net/snmp.h>
#include <net/dsfield.h>
#include <net/timewait_sock.h>
@@ -2115,7 +2114,6 @@ struct proto tcpv6_prot = {
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
- .sendmsg = tcp_sendmsg,
.recvmsg = tcp_recvmsg,
.backlog_rcv = tcp_v6_do_rcv,
.hash = tcp_v6_hash,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index ad5150b8dfa..983058d432d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -479,7 +479,8 @@ static void iucv_setmask_mp(void)
/* Enable all cpus with a declared buffer. */
if (cpu_isset(cpu, iucv_buffer_cpumask) &&
!cpu_isset(cpu, iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_allow_cpu,
+ NULL, 0, 1);
preempt_enable();
}
@@ -497,7 +498,7 @@ static void iucv_setmask_up(void)
cpumask = iucv_irq_cpumask;
cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
for_each_cpu_mask(cpu, cpumask)
- smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_block_cpu, NULL, 0, 1);
}
/**
@@ -522,7 +523,7 @@ static int iucv_enable(void)
rc = -EIO;
preempt_disable();
for_each_online_cpu(cpu)
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
preempt_enable();
if (cpus_empty(iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
@@ -578,7 +579,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
case CPU_ONLINE_FROZEN:
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 0, 1);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
@@ -587,10 +588,10 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
if (cpus_empty(cpumask))
/* Can't offline last IUCV enabled cpu. */
return NOTIFY_BAD;
- smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
+ smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 0, 1);
if (cpus_empty(iucv_irq_cpumask))
- smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
- first_cpu(iucv_buffer_cpumask));
+ smp_call_function_single(first_cpu(iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 0, 1);
break;
}
return NOTIFY_OK;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 0f8304b0246..5502df115a6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1206,6 +1206,9 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct sadb_msg *hdr,
x->sel.prefixlen_s = addr->sadb_address_prefixlen;
}
+ if (!x->sel.family)
+ x->sel.family = x->props.family;
+
if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) {
struct sadb_x_nat_t_type* n_type;
struct xfrm_encap_tmpl *natt;
@@ -2540,7 +2543,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.sport)
- sel.sport_mask = ~0;
+ sel.sport_mask = htons(0xffff);
/* set destination address info of selector */
sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
@@ -2549,7 +2552,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
if (sel.dport)
- sel.dport_mask = ~0;
+ sel.dport_mask = htons(0xffff);
rq = (struct sadb_x_ipsecrequest *)(pol + 1);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 799a9208c4b..095be91829c 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -271,9 +271,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
}
}
-#define DEBUGFS_DEL(name, type)\
- debugfs_remove(sdata->debugfs.type.name);\
- sdata->debugfs.type.name = NULL;
+#define DEBUGFS_DEL(name, type) \
+ do { \
+ debugfs_remove(sdata->debugfs.type.name); \
+ sdata->debugfs.type.name = NULL; \
+ } while (0)
static void del_sta_files(struct ieee80211_sub_if_data *sdata)
{
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d41e696f398..da34ea70276 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -157,7 +157,7 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
struct sta_info *sta = file->private_data;
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
p += scnprintf(p, sizeof(buf)+buf-p, "%x ",
- sta->last_seq_ctrl[i]);
+ le16_to_cpu(sta->last_seq_ctrl[i]));
p += scnprintf(p, sizeof(buf)+buf-p, "\n");
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index c944b17d0fc..8ec5ed192b5 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -1650,6 +1650,7 @@ static int ieee80211_master_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < headroom) {
if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
dev_kfree_skb(skb);
+ dev_put(odev);
return 0;
}
}
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index d0e1ab5589d..e7904db5532 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -697,17 +697,24 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
if (!netif_running(dev))
return -ENETDOWN;
- if (local->scan_flags & IEEE80211_SCAN_MATCH_SSID) {
- if (sdata->type == IEEE80211_IF_TYPE_STA ||
- sdata->type == IEEE80211_IF_TYPE_IBSS) {
+ switch (sdata->type) {
+ case IEEE80211_IF_TYPE_STA:
+ case IEEE80211_IF_TYPE_IBSS:
+ if (local->scan_flags & IEEE80211_SCAN_MATCH_SSID) {
ssid = sdata->u.sta.ssid;
ssid_len = sdata->u.sta.ssid_len;
- } else if (sdata->type == IEEE80211_IF_TYPE_AP) {
+ }
+ break;
+ case IEEE80211_IF_TYPE_AP:
+ if (local->scan_flags & IEEE80211_SCAN_MATCH_SSID) {
ssid = sdata->u.ap.ssid;
ssid_len = sdata->u.ap.ssid_len;
- } else
- return -EINVAL;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+
return ieee80211_sta_req_scan(dev, ssid, ssid_len);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index aa086c83af8..0fe11889ce1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -79,7 +79,8 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
a = jhash2(tuple->src.u3.all, ARRAY_SIZE(tuple->src.u3.all),
(tuple->src.l3num << 16) | tuple->dst.protonum);
b = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
- (tuple->src.u.all << 16) | tuple->dst.u.all);
+ ((__force __u16)tuple->src.u.all << 16) |
+ (__force __u16)tuple->dst.u.all);
return jhash_2words(a, b, rnd) % size;
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 1aa6229ca99..3ac64e25f10 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -80,7 +80,7 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
- tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
+ (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
nf_ct_expect_hsize;
}
@@ -259,8 +259,8 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
}
if (src) {
- exp->tuple.src.u.all = (__force u16)*src;
- exp->mask.src.u.all = 0xFFFF;
+ exp->tuple.src.u.all = *src;
+ exp->mask.src.u.all = htons(0xFFFF);
} else {
exp->tuple.src.u.all = 0;
exp->mask.src.u.all = 0;
@@ -272,7 +272,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
memset((void *)&exp->tuple.dst.u3 + len, 0x00,
sizeof(exp->tuple.dst.u3) - len);
- exp->tuple.dst.u.all = (__force u16)*dst;
+ exp->tuple.dst.u.all = *dst;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
@@ -477,15 +477,14 @@ static int exp_open(struct inode *inode, struct file *file)
struct ct_expect_iter_state *st;
int ret;
- st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
- if (st == NULL)
+ st = kzalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
+ if (!st)
return -ENOMEM;
ret = seq_open(file, &exp_seq_ops);
if (ret)
goto out_free;
seq = file->private_data;
seq->private = st;
- memset(st, 0, sizeof(struct ct_expect_iter_state));
return ret;
out_free:
kfree(st);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index ca10df40784..96aa637c093 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -39,7 +39,7 @@ static int nf_ct_helper_vmalloc;
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
{
return (((tuple->src.l3num << 8) | tuple->dst.protonum) ^
- tuple->src.u.all) % nf_ct_helper_hsize;
+ (__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
}
struct nf_conntrack_helper *
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6f89b105a20..2863e72b409 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1052,17 +1052,18 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
}
/* implicit 'else' */
- /* we only allow nat config for new conntracks */
- if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) {
- err = -EINVAL;
- goto out_unlock;
- }
-
/* We manipulate the conntrack inside the global conntrack table lock,
* so there's no need to increase the refcount */
err = -EEXIST;
- if (!(nlh->nlmsg_flags & NLM_F_EXCL))
- err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h), cda);
+ if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
+ /* we only allow nat config for new conntracks */
+ if (cda[CTA_NAT_SRC-1] || cda[CTA_NAT_DST-1]) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ err = ctnetlink_change_conntrack(nf_ct_tuplehash_to_ctrack(h),
+ cda);
+ }
out_unlock:
write_unlock_bh(&nf_conntrack_lock);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 3335dd5be96..06cff1d1369 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -42,13 +42,13 @@ struct xt_connlimit_data {
static u_int32_t connlimit_rnd;
static bool connlimit_rnd_inited;
-static inline unsigned int connlimit_iphash(u_int32_t addr)
+static inline unsigned int connlimit_iphash(__be32 addr)
{
if (unlikely(!connlimit_rnd_inited)) {
get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
connlimit_rnd_inited = true;
}
- return jhash_1word(addr, connlimit_rnd) & 0xFF;
+ return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
}
static inline unsigned int
@@ -66,7 +66,7 @@ connlimit_iphash6(const union nf_conntrack_address *addr,
for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
res.ip6[i] = addr->ip6[i] & mask->ip6[i];
- return jhash2(res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
+ return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
}
static inline bool already_closed(const struct nf_conn *conn)
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 04b677ae8da..74f9b14c012 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -21,6 +21,7 @@ static bool u32_match_it(const struct xt_u32 *data,
unsigned int nnums;
unsigned int nvals;
unsigned int i;
+ __be32 n;
u_int32_t pos;
u_int32_t val;
u_int32_t at;
@@ -38,9 +39,9 @@ static bool u32_match_it(const struct xt_u32 *data,
if (skb->len < 4 || pos > skb->len - 4);
return false;
- ret = skb_copy_bits(skb, pos, &val, sizeof(val));
+ ret = skb_copy_bits(skb, pos, &n, sizeof(n));
BUG_ON(ret < 0);
- val = ntohl(val);
+ val = ntohl(n);
nnums = ct->nnums;
/* Inner loop runs over "&", "<<", ">>" and "@" operands */
@@ -65,10 +66,10 @@ static bool u32_match_it(const struct xt_u32 *data,
pos > skb->len - at - 4)
return false;
- ret = skb_copy_bits(skb, at + pos, &val,
- sizeof(val));
+ ret = skb_copy_bits(skb, at + pos, &n,
+ sizeof(n));
BUG_ON(ret < 0);
- val = ntohl(val);
+ val = ntohl(n);
break;
}
}
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index f46a0aeec44..b6c844b7e1c 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -126,7 +126,9 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain, u32 def)
if (domain != NULL) {
bkt = netlbl_domhsh_hash(domain);
- list_for_each_entry_rcu(iter, &netlbl_domhsh->tbl[bkt], list)
+ list_for_each_entry_rcu(iter,
+ &rcu_dereference(netlbl_domhsh)->tbl[bkt],
+ list)
if (iter->valid && strcmp(iter->domain, domain) == 0)
return iter;
}
@@ -227,7 +229,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
spin_lock(&netlbl_domhsh_lock);
if (netlbl_domhsh_search(entry->domain, 0) == NULL)
list_add_tail_rcu(&entry->list,
- &netlbl_domhsh->tbl[bkt]);
+ &rcu_dereference(netlbl_domhsh)->tbl[bkt]);
else
ret_val = -EEXIST;
spin_unlock(&netlbl_domhsh_lock);
@@ -423,8 +425,8 @@ int netlbl_domhsh_walk(u32 *skip_bkt,
iter_bkt < rcu_dereference(netlbl_domhsh)->size;
iter_bkt++, chain_cnt = 0) {
list_for_each_entry_rcu(iter_entry,
- &netlbl_domhsh->tbl[iter_bkt],
- list)
+ &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt],
+ list)
if (iter_entry->valid) {
if (chain_cnt++ < *skip_chain)
continue;
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 89dcc485653..85a96a3fdda 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -113,8 +113,10 @@ struct audit_buffer *netlbl_audit_start_common(int type,
if (audit_info->secid != 0 &&
security_secid_to_secctx(audit_info->secid,
&secctx,
- &secctx_len) == 0)
+ &secctx_len) == 0) {
audit_log_format(audit_buf, " subj=%s", secctx);
+ security_release_secctx(secctx, secctx_len);
+ }
return audit_buf;
}
diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
index 372b24466dc..d6667f7bc85 100644
--- a/net/rxrpc/ar-connection.c
+++ b/net/rxrpc/ar-connection.c
@@ -71,7 +71,7 @@ struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
struct rb_node *p, *parent, **pp;
_enter("%p{%x},%x,%hx,",
- rx, key_serial(key), trans->debug_id, ntohl(service_id));
+ rx, key_serial(key), trans->debug_id, ntohs(service_id));
if (rx->trans == trans && rx->bundle) {
atomic_inc(&rx->bundle->usage);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index bf90e60f841..6085be57845 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -16,7 +16,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
-#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <net/act_api.h>
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 13c09bc32aa..dee0d5fb39c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -380,6 +380,10 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
return;
while ((parentid = sch->parent)) {
sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
+ if (sch == NULL) {
+ WARN_ON(parentid != TC_H_ROOT);
+ return;
+ }
cops = sch->ops->cl_ops;
if (cops->qlen_notify) {
cl = cops->get(sch, parentid);
@@ -420,8 +424,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
unsigned long cl = cops->get(parent, classid);
if (cl) {
err = cops->graft(parent, cl, new, old);
- if (new)
- new->parent = classid;
cops->put(parent, cl);
}
}
@@ -436,7 +438,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
*/
static struct Qdisc *
-qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
+qdisc_create(struct net_device *dev, u32 parent, u32 handle,
+ struct rtattr **tca, int *errp)
{
int err;
struct rtattr *kind = tca[TCA_KIND-1];
@@ -482,6 +485,8 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
goto err_out2;
}
+ sch->parent = parent;
+
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
sch->stats_lock = &dev->ingress_lock;
@@ -758,9 +763,11 @@ create_n_graft:
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS)
- q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
+ q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent,
+ tca, &err);
else
- q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
+ q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle,
+ tca, &err);
if (q == NULL) {
if (err == -EAGAIN)
goto replay;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 51f16b0af19..2d32fd27496 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -158,9 +158,8 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
- case TC_ACT_UNSPEC:
- default:
skb->tc_index = TC_H_MIN(res.classid);
+ default:
result = TC_ACT_OK;
break;
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2d8c08493d6..4a49db65772 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,9 +38,11 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority;
struct tcf_result res;
+ int err;
*qerr = NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
+ err = tc_classify(skb, q->filter_list, &res);
#ifdef CONFIG_NET_CLS_ACT
switch (tc_classify(skb, q->filter_list, &res)) {
case TC_ACT_STOLEN:
@@ -49,11 +51,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
case TC_ACT_SHOT:
return NULL;
}
-
- if (!q->filter_list ) {
-#else
- if (!q->filter_list || tc_classify(skb, q->filter_list, &res)) {
#endif
+ if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band))
band = 0;
band = q->prio2band[band&TC_PRIO_MAX];
@@ -239,11 +238,13 @@ static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
/* If we're multiqueue, make sure the number of incoming bands
* matches the number of queues on the device we're associating with.
* If the number of bands requested is zero, then set q->bands to
- * dev->egress_subqueue_count.
+ * dev->egress_subqueue_count. Also, the root qdisc must be the
+ * only one that is enabled for multiqueue, since it's the only one
+ * that interacts with the underlying device.
*/
q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]);
if (q->mq) {
- if (sch->handle != TC_H_ROOT)
+ if (sch->parent != TC_H_ROOT)
return -EINVAL;
if (netif_is_multiqueue(sch->dev)) {
if (q->bands == 0)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index d57ff7f3c57..47e56017f4c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -590,7 +590,7 @@ out_unlock:
* Return 0 - If further processing is needed.
* Return 1 - If the packet can be discarded right away.
*/
-int sctp_rcv_ootb(struct sk_buff *skb)
+static int sctp_rcv_ootb(struct sk_buff *skb)
{
sctp_chunkhdr_t *ch;
__u8 *ch_end;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 2c29394fd92..f8aa23dda1c 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -641,6 +641,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
newsctp6sk = (struct sctp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newsctp6sk->inet6;
+ sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped;
+
newinet = inet_sk(newsk);
newnp = inet6_sk(newsk);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 8d18f570c2e..51c4d7fef1d 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -65,8 +65,6 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-extern struct kmem_cache *sctp_chunk_cachep;
-
SCTP_STATIC
struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
__u8 type, __u8 flags, int paylen);
@@ -115,15 +113,12 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
const void *payload, size_t paylen)
{
sctp_errhdr_t err;
- int padlen;
__u16 len;
/* Cause code constants are now defined in network order. */
err.cause = cause_code;
len = sizeof(sctp_errhdr_t) + paylen;
- padlen = len % 4;
err.length = htons(len);
- len += padlen;
chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
sctp_addto_chunk(chunk, paylen, payload);
}
@@ -1454,7 +1449,6 @@ no_hmac:
do_gettimeofday(&tv);
if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
- __u16 len;
/*
* Section 3.3.10.3 Stale Cookie Error (3)
*
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index fd2dfdd7d7f..71cad56dd73 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -97,6 +97,13 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands,
const struct sctp_association *asoc,
struct sctp_transport *transport);
+static sctp_disposition_t sctp_sf_abort_violation(
+ const struct sctp_association *asoc,
+ void *arg,
+ sctp_cmd_seq_t *commands,
+ const __u8 *payload,
+ const size_t paylen);
+
static sctp_disposition_t sctp_sf_violation_chunklen(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
@@ -104,6 +111,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
void *arg,
sctp_cmd_seq_t *commands);
+static sctp_disposition_t sctp_sf_violation_ctsn(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands);
+
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
@@ -2880,6 +2894,13 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_DISCARD;
}
+ /* If Cumulative TSN Ack beyond the max tsn currently
+ * send, terminating the association and respond to the
+ * sender with an ABORT.
+ */
+ if (!TSN_lt(ctsn, asoc->next_tsn))
+ return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
+
/* Return this SACK for further processing. */
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh));
@@ -3691,40 +3712,21 @@ sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_VIOLATION;
}
-
/*
- * Handle a protocol violation when the chunk length is invalid.
- * "Invalid" length is identified as smaller then the minimal length a
- * given chunk can be. For example, a SACK chunk has invalid length
- * if it's length is set to be smaller then the size of sctp_sack_chunk_t.
- *
- * We inform the other end by sending an ABORT with a Protocol Violation
- * error code.
- *
- * Section: Not specified
- * Verification Tag: Nothing to do
- * Inputs
- * (endpoint, asoc, chunk)
- *
- * Outputs
- * (reply_msg, msg_up, counters)
- *
- * Generate an ABORT chunk and terminate the association.
+ * Common function to handle a protocol violation.
*/
-static sctp_disposition_t sctp_sf_violation_chunklen(
- const struct sctp_endpoint *ep,
+static sctp_disposition_t sctp_sf_abort_violation(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
void *arg,
- sctp_cmd_seq_t *commands)
+ sctp_cmd_seq_t *commands,
+ const __u8 *payload,
+ const size_t paylen)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
- char err_str[]="The following chunk had invalid length:";
/* Make the abort chunk. */
- abort = sctp_make_abort_violation(asoc, chunk, err_str,
- sizeof(err_str));
+ abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
@@ -3756,6 +3758,57 @@ nomem:
return SCTP_DISPOSITION_NOMEM;
}
+/*
+ * Handle a protocol violation when the chunk length is invalid.
+ * "Invalid" length is identified as smaller then the minimal length a
+ * given chunk can be. For example, a SACK chunk has invalid length
+ * if it's length is set to be smaller then the size of sctp_sack_chunk_t.
+ *
+ * We inform the other end by sending an ABORT with a Protocol Violation
+ * error code.
+ *
+ * Section: Not specified
+ * Verification Tag: Nothing to do
+ * Inputs
+ * (endpoint, asoc, chunk)
+ *
+ * Outputs
+ * (reply_msg, msg_up, counters)
+ *
+ * Generate an ABORT chunk and terminate the association.
+ */
+static sctp_disposition_t sctp_sf_violation_chunklen(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ char err_str[]="The following chunk had invalid length:";
+
+ return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
+/* Handle a protocol violation when the peer trying to advance the
+ * cumulative tsn ack to a point beyond the max tsn currently sent.
+ *
+ * We inform the other end by sending an ABORT with a Protocol Violation
+ * error code.
+ */
+static sctp_disposition_t sctp_sf_violation_ctsn(
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ char err_str[]="The cumulative tsn ack beyond the max tsn currently sent:";
+
+ return sctp_sf_abort_violation(asoc, arg, commands, err_str,
+ sizeof(err_str));
+}
+
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ee88f2ea510..01c6364245b 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -107,8 +107,6 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
struct sctp_association *, sctp_socket_type_t);
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
-extern struct kmem_cache *sctp_bucket_cachep;
-
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
{
@@ -433,7 +431,7 @@ out:
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
-int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
+static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
int cnt;
int retval = 0;
@@ -602,7 +600,7 @@ out:
*
* Only sctp_setsockopt_bindx() is supposed to call this function.
*/
-int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
+static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
{
struct sctp_sock *sp = sctp_sk(sk);
struct sctp_endpoint *ep = sp->ep;
@@ -977,7 +975,7 @@ static int __sctp_connect(struct sock* sk,
int err = 0;
int addrcnt = 0;
int walk_size = 0;
- union sctp_addr *sa_addr;
+ union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
@@ -1011,7 +1009,10 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
- err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len);
+ /* Save current address so we can work with it */
+ memcpy(&to, sa_addr, af->sockaddr_len);
+
+ err = sctp_verify_addr(sk, &to, af->sockaddr_len);
if (err)
goto out_free;
@@ -1021,12 +1022,11 @@ static int __sctp_connect(struct sock* sk,
if (asoc && asoc->peer.port && asoc->peer.port != port)
goto out_free;
- memcpy(&to, sa_addr, af->sockaddr_len);
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
- asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport);
+ asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
@@ -1039,7 +1039,7 @@ static int __sctp_connect(struct sock* sk,
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
- if (sctp_endpoint_is_peeled_off(ep, sa_addr)) {
+ if (sctp_endpoint_is_peeled_off(ep, &to)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
@@ -1070,7 +1070,7 @@ static int __sctp_connect(struct sock* sk,
}
}
- scope = sctp_scope(sa_addr);
+ scope = sctp_scope(&to);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
@@ -1079,7 +1079,7 @@ static int __sctp_connect(struct sock* sk,
}
/* Prime the peer's transport structures. */
- transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL,
+ transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
@@ -1103,8 +1103,8 @@ static int __sctp_connect(struct sock* sk,
/* Initialize sk's dport and daddr for getpeername() */
inet_sk(sk)->dport = htons(asoc->peer.port);
- af = sctp_get_af_specific(to.sa.sa_family);
- af->to_sk_daddr(&to, sk);
+ af = sctp_get_af_specific(sa_addr->sa.sa_family);
+ af->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
@@ -1531,7 +1531,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
}
if (sinfo_flags & SCTP_ABORT) {
- struct sctp_chunk *chunk;
chunk = sctp_make_abort_user(asoc, msg, msg_len);
if (!chunk) {
@@ -4353,7 +4352,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
space_left, &bytes_copied);
if (cnt < 0) {
err = cnt;
- goto error;
+ goto error_lock;
}
goto copy_getaddrs;
}
@@ -4367,7 +4366,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if (space_left < addrlen) {
err = -ENOMEM; /*fixme: right error?*/
- goto error;
+ goto error_lock;
}
memcpy(buf, &temp, addrlen);
buf += addrlen;
@@ -4381,15 +4380,21 @@ copy_getaddrs:
if (copy_to_user(to, addrs, bytes_copied)) {
err = -EFAULT;
- goto error;
+ goto out;
}
if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) {
err = -EFAULT;
- goto error;
+ goto out;
}
if (put_user(bytes_copied, optlen))
err = -EFAULT;
-error:
+
+ goto out;
+
+error_lock:
+ sctp_read_unlock(addr_lock);
+
+out:
kfree(addrs);
return err;
}
@@ -5964,7 +5969,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
return err;
}
-void sctp_wait_for_close(struct sock *sk, long timeout)
+static void sctp_wait_for_close(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index d3192a1babc..1ff0daade30 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -161,7 +161,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
__u16 *start, __u16 *end)
{
int started, ended;
- __u16 _start, _end, offset;
+ __u16 start_, end_, offset;
/* We haven't found a gap yet. */
started = ended = 0;
@@ -175,7 +175,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
offset = iter->start - map->base_tsn;
sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0,
- &started, &_start, &ended, &_end);
+ &started, &start_, &ended, &end_);
}
/* Do we need to check the overflow map? */
@@ -193,8 +193,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
offset,
map->len,
map->len,
- &started, &_start,
- &ended, &_end);
+ &started, &start_,
+ &ended, &end_);
}
/* The Gap Ack Block happens to end at the end of the
@@ -202,7 +202,7 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
*/
if (started && !ended) {
ended++;
- _end = map->len + map->len - 1;
+ end_ = map->len + map->len - 1;
}
/* If we found a Gap Ack Block, return the start and end and
@@ -215,8 +215,8 @@ SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map,
int gap = map->cumulative_tsn_ack_point -
map->base_tsn;
- *start = _start - gap;
- *end = _end - gap;
+ *start = start_ - gap;
+ *end = end_ - gap;
/* Move the iterator forward. */
iter->start = map->cumulative_tsn_ack_point + *end + 1;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 4bbc59cc237..53995af9ca4 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -736,9 +736,6 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx)
{
dprintk("RPC: gss_free_ctx\n");
- if (ctx->gc_gss_ctx)
- gss_delete_sec_context(&ctx->gc_gss_ctx);
-
kfree(ctx->gc_wire_ctx.data);
kfree(ctx);
}
@@ -753,7 +750,13 @@ gss_free_ctx_callback(struct rcu_head *head)
static void
gss_free_ctx(struct gss_cl_ctx *ctx)
{
+ struct gss_ctx *gc_gss_ctx;
+
+ gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
+ rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
+ if (gc_gss_ctx)
+ gss_delete_sec_context(&gc_gss_ctx);
}
static void
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index dc2f41e9f57..7da7050f06c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -42,7 +42,6 @@
#include <linux/pagemap.h>
#include <linux/sunrpc/auth_gss.h>
-#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/svcauth_gss.h>
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 01c3c410520..ebe344f34d1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -371,8 +371,7 @@ int cache_unregister(struct cache_detail *cd)
}
if (list_empty(&cache_list)) {
/* module must be being unloaded so its safe to kill the worker */
- cancel_delayed_work(&cache_cleaner);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&cache_cleaner);
}
return 0;
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 650af064ff8..669e12a4ed1 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -132,8 +132,7 @@ rpc_close_pipes(struct inode *inode)
rpci->nwriters = 0;
if (ops->release_pipe)
ops->release_pipe(inode);
- cancel_delayed_work(&rpci->queue_timeout);
- flush_workqueue(rpciod_workqueue);
+ cancel_delayed_work_sync(&rpci->queue_timeout);
}
rpc_inode_setowner(inode, NULL);
mutex_unlock(&inode->i_mutex);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b5723c262a3..954d7ec86c7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -50,8 +50,6 @@ static RPC_WAITQ(delay_queue, "delayq");
/*
* rpciod-related stuff
*/
-static DEFINE_MUTEX(rpciod_mutex);
-static atomic_t rpciod_users = ATOMIC_INIT(0);
struct workqueue_struct *rpciod_workqueue;
/*
@@ -961,60 +959,49 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
spin_unlock(&clnt->cl_lock);
}
+int rpciod_up(void)
+{
+ return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
+}
+
+void rpciod_down(void)
+{
+ module_put(THIS_MODULE);
+}
+
/*
- * Start up the rpciod process if it's not already running.
+ * Start up the rpciod workqueue.
*/
-int
-rpciod_up(void)
+static int rpciod_start(void)
{
struct workqueue_struct *wq;
- int error = 0;
-
- if (atomic_inc_not_zero(&rpciod_users))
- return 0;
-
- mutex_lock(&rpciod_mutex);
- /* Guard against races with rpciod_down() */
- if (rpciod_workqueue != NULL)
- goto out_ok;
/*
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- error = -ENOMEM;
wq = create_workqueue("rpciod");
- if (wq == NULL)
- goto out;
-
rpciod_workqueue = wq;
- error = 0;
-out_ok:
- atomic_inc(&rpciod_users);
-out:
- mutex_unlock(&rpciod_mutex);
- return error;
+ return rpciod_workqueue != NULL;
}
-void
-rpciod_down(void)
+static void rpciod_stop(void)
{
- if (!atomic_dec_and_test(&rpciod_users))
- return;
+ struct workqueue_struct *wq = NULL;
- mutex_lock(&rpciod_mutex);
+ if (rpciod_workqueue == NULL)
+ return;
dprintk("RPC: destroying workqueue rpciod\n");
- if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) {
- destroy_workqueue(rpciod_workqueue);
- rpciod_workqueue = NULL;
- }
- mutex_unlock(&rpciod_mutex);
+ wq = rpciod_workqueue;
+ rpciod_workqueue = NULL;
+ destroy_workqueue(wq);
}
void
rpc_destroy_mempool(void)
{
+ rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
@@ -1048,6 +1035,8 @@ rpc_init_mempool(void)
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
+ if (!rpciod_start())
+ goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 64b9b8c743c..12ff5da8160 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -131,13 +131,13 @@ static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
case AF_INET:
snprintf(buf, len, "%u.%u.%u.%u, port=%u",
NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
- htons(((struct sockaddr_in *) addr)->sin_port));
+ ntohs(((struct sockaddr_in *) addr)->sin_port));
break;
case AF_INET6:
snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
- htons(((struct sockaddr_in6 *) addr)->sin6_port));
+ ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
break;
default:
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 5adfdfd49d6..1b17fecee74 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -423,6 +423,17 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
return NULL;
}
+ if (LINK_LOG_BUF_SIZE) {
+ char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+ if (!pb) {
+ kfree(l_ptr);
+ warn("Link creation failed, no memory for print buffer\n");
+ return NULL;
+ }
+ tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+ }
+
l_ptr->addr = peer;
if_name = strchr(b_ptr->publ.name, ':') + 1;
sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
@@ -432,8 +443,6 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
/* note: peer i/f is appended to link name by reset/activate */
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
- k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
- list_add_tail(&l_ptr->link_list, &b_ptr->links);
l_ptr->checkpoint = 1;
l_ptr->b_ptr = b_ptr;
link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
@@ -459,21 +468,14 @@ struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
l_ptr->owner = tipc_node_attach_link(l_ptr);
if (!l_ptr->owner) {
+ if (LINK_LOG_BUF_SIZE)
+ kfree(l_ptr->print_buf.buf);
kfree(l_ptr);
return NULL;
}
- if (LINK_LOG_BUF_SIZE) {
- char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
-
- if (!pb) {
- kfree(l_ptr);
- warn("Link creation failed, no memory for print buffer\n");
- return NULL;
- }
- tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
- }
-
+ k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+ list_add_tail(&l_ptr->link_list, &b_ptr->links);
tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
@@ -2381,10 +2383,10 @@ void tipc_link_changeover(struct link *l_ptr)
struct tipc_msg *msg = buf_msg(crs);
if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
- u32 msgcount = msg_msgcnt(msg);
struct tipc_msg *m = msg_get_wrapped(msg);
unchar* pos = (unchar*)m;
+ msgcount = msg_msgcnt(msg);
while (msgcount--) {
msg_set_seqno(m,msg_seqno(msg));
tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 35d5ba1d4f4..ce265983637 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -72,10 +72,8 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
u32 pos, u32 mask, u32 val)
{
val = (val & mask) << pos;
- val = htonl(val);
- mask = htonl(mask << pos);
- m->hdr[w] &= ~mask;
- m->hdr[w] |= val;
+ m->hdr[w] &= ~htonl(mask << pos);
+ m->hdr[w] |= htonl(val);
}
/*
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index d8473eefcd2..ac7dfdda797 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -501,7 +501,7 @@ end_node:
* sequence overlapping with the requested sequence
*/
-void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
{
struct sub_seq *sseq = nseq->sseqs;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index e2e452a62ba..598f4d3a009 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -241,8 +241,6 @@ struct node *tipc_node_attach_link(struct link *l_ptr)
char addr_string[16];
if (n_ptr->link_cnt >= 2) {
- char addr_string[16];
-
err("Attempt to create third link to %s\n",
addr_string_fill(addr_string, n_ptr->addr));
return NULL;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 5d2b9ce84d0..76088153524 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -41,7 +41,6 @@
#include "addr.h"
#include "link.h"
#include "node.h"
-#include "port.h"
#include "name_table.h"
#include "user_reg.h"
#include "msg.h"
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 65ebccc0a69..a05c34260e7 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -118,14 +118,40 @@
int sysctl_unix_max_dgram_qlen __read_mostly = 10;
-struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
-DEFINE_SPINLOCK(unix_table_lock);
+static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+static DEFINE_SPINLOCK(unix_table_lock);
static atomic_t unix_nr_socks = ATOMIC_INIT(0);
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+static struct sock *first_unix_socket(int *i)
+{
+ for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
+ if (!hlist_empty(&unix_socket_table[*i]))
+ return __sk_head(&unix_socket_table[*i]);
+ }
+ return NULL;
+}
+
+static struct sock *next_unix_socket(int *i, struct sock *s)
+{
+ struct sock *next = sk_next(s);
+ /* More in this chain? */
+ if (next)
+ return next;
+ /* Look for next non-empty chain. */
+ for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
+ if (!hlist_empty(&unix_socket_table[*i]))
+ return __sk_head(&unix_socket_table[*i]);
+ }
+ return NULL;
+}
+
+#define forall_unix_sockets(i, s) \
+ for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
+
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 849cc06bd91..9ab31a3ce3a 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -46,7 +46,6 @@
#include <linux/capability.h>
#include <linux/errno.h> /* return codes */
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h> /* support for loadable modules */
#include <linux/slab.h> /* kmalloc(), kfree() */
#include <linux/mm.h>
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c3a4b0a1868..7012891d39f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -23,10 +23,9 @@
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/cache.h>
+#include <linux/audit.h>
#include <net/xfrm.h>
#include <net/ip.h>
-#include <linux/audit.h>
-#include <linux/cache.h>
#include "xfrm_hash.h"
@@ -1299,7 +1298,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
xfrm_address_t *local = saddr;
struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
- if (tmpl->mode == XFRM_MODE_TUNNEL) {
+ if (tmpl->mode == XFRM_MODE_TUNNEL ||
+ tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
family = tmpl->encap_family;
@@ -2194,9 +2194,10 @@ void xfrm_audit_log(uid_t auid, u32 sid, int type, int result,
}
if (sid != 0 &&
- security_secid_to_secctx(sid, &secctx, &secctx_len) == 0)
+ security_secid_to_secctx(sid, &secctx, &secctx_len) == 0) {
audit_log_format(audit_buf, " subj=%s", secctx);
- else
+ security_release_secctx(secctx, secctx_len);
+ } else
audit_log_task_context(audit_buf);
if (xp) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 38f90ca75b1..d4356e6f7f9 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -19,9 +19,8 @@
#include <linux/ipsec.h>
#include <linux/module.h>
#include <linux/cache.h>
-#include <asm/uaccess.h>
#include <linux/audit.h>
-#include <linux/cache.h>
+#include <asm/uaccess.h>
#include "xfrm_hash.h"
@@ -611,7 +610,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
selector.
*/
if (x->km.state == XFRM_STATE_VALID) {
- if (!xfrm_selector_match(&x->sel, fl, family) ||
+ if (!xfrm_selector_match(&x->sel, fl, x->sel.family) ||
!security_xfrm_state_pol_flow_match(x, pol, fl))
continue;
if (!best ||
@@ -623,7 +622,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
acquire_in_progress = 1;
} else if (x->km.state == XFRM_STATE_ERROR ||
x->km.state == XFRM_STATE_EXPIRED) {
- if (xfrm_selector_match(&x->sel, fl, family) &&
+ if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
security_xfrm_state_pol_flow_match(x, pol, fl))
error = -ESRCH;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c06883bf620..61339e17a0f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -322,6 +322,13 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
x->props.family = p->family;
memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
x->props.flags = p->flags;
+
+ /*
+ * Set inner address family if the KM left it as zero.
+ * See comment in validate_tmpl.
+ */
+ if (!x->sel.family)
+ x->sel.family = p->family;
}
/*