summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/8021q/vlan.c5
-rw-r--r--net/8021q/vlan.h5
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/9p/Kconfig8
-rw-r--r--net/9p/client.c30
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c3
-rw-r--r--net/9p/util.c2
-rw-r--r--net/atm/atm_sysfs.c10
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/caif/chnl_net.c9
-rw-r--r--net/can/bcm.c6
-rw-r--r--net/can/proc.c7
-rw-r--r--net/ceph/messenger.c82
-rw-r--r--net/ceph/osd_client.c19
-rw-r--r--net/ceph/osdmap.c13
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c25
-rw-r--r--net/core/fib_rules.c1
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/net_namespace.c65
-rw-r--r--net/core/rtnetlink.c14
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/core/utils.c1
-rw-r--r--net/dns_resolver/dns_key.c10
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ip_options.c15
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/iface.c4
-rw-r--r--net/mac80211/main.c22
-rw-r--r--net/mac80211/mesh.h7
-rw-r--r--net/mac80211/mesh_pathtbl.c204
-rw-r--r--net/mac80211/mlme.c7
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c27
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c17
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/rdma_transport.c3
-rw-r--r--net/rfkill/Kconfig9
-rw-r--r--net/rfkill/Makefile1
-rw-r--r--net/rfkill/rfkill-gpio.c227
-rw-r--r--net/sched/sch_sfq.c22
-rw-r--r--net/sctp/associola.c33
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/sm_sideeffect.c3
-rw-r--r--net/sctp/sm_statefuns.c14
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/clnt.c29
-rw-r--r--net/sunrpc/rpcb_clnt.c97
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcsock.c336
-rw-r--r--net/sunrpc/xdr.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtsock.c435
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/nl80211.c16
-rw-r--r--net/wireless/scan.c43
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/util.c2
87 files changed, 1615 insertions, 502 deletions
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 21cde8fd579..db6baf7cf6e 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc,
out:
spin_unlock_bh(&snap_lock);
- synchronize_net();
return proto;
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b2274d1fd60..c7a581a9689 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -46,8 +46,6 @@ int vlan_net_id __read_mostly;
const char vlan_fullname[] = "802.1Q VLAN Support";
const char vlan_version[] = DRV_VERSION;
-static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
-static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
/* End of global variables definitions. */
@@ -673,8 +671,7 @@ static int __init vlan_proto_init(void)
{
int err;
- pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
- pr_info("All bugs added by %s\n", vlan_buggyright);
+ pr_info("%s v%s\n", vlan_fullname, vlan_version);
err = register_pernet_subsys(&vlan_net_ops);
if (err < 0)
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index c3408def8a1..9da07e30d1a 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -118,11 +118,6 @@ extern void vlan_netlink_fini(void);
extern struct rtnl_link_ops vlan_link_ops;
-static inline int is_vlan_dev(struct net_device *dev)
-{
- return dev->priv_flags & IFF_802_1Q_VLAN;
-}
-
extern int vlan_net_id;
struct proc_dir_entry;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f247f5bff88..7ea5cf9ea08 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -165,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_end(&stats->syncp);
} else {
this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
}
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 7ed75c7bd5d..d9ea09b11cf 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -3,8 +3,8 @@
#
menuconfig NET_9P
- depends on NET && EXPERIMENTAL
- tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)"
+ depends on NET
+ tristate "Plan 9 Resource Sharing Support (9P2000)"
help
If you say Y here, you will get experimental support for
Plan 9 resource sharing via the 9P2000 protocol.
@@ -16,8 +16,8 @@ menuconfig NET_9P
if NET_9P
config NET_9P_VIRTIO
- depends on EXPERIMENTAL && VIRTIO
- tristate "9P Virtio Transport (Experimental)"
+ depends on VIRTIO
+ tristate "9P Virtio Transport"
help
This builds support for a transports between
guest partitions and a host partition.
diff --git a/net/9p/client.c b/net/9p/client.c
index ceab943dfc4..9e3b0e640da 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -92,9 +92,6 @@ static int get_protocol_version(const substring_t *name)
return version;
}
-static struct p9_req_t *
-p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
-
/**
* parse_options - parse mount options into client structure
* @opts: options string passed from mount
@@ -307,12 +304,13 @@ static int p9_tag_init(struct p9_client *c)
c->tagpool = p9_idpool_create();
if (IS_ERR(c->tagpool)) {
err = PTR_ERR(c->tagpool);
- c->tagpool = NULL;
goto error;
}
-
- p9_idpool_get(c->tagpool); /* reserve tag 0 */
-
+ err = p9_idpool_get(c->tagpool); /* reserve tag 0 */
+ if (err < 0) {
+ p9_idpool_destroy(c->tagpool);
+ goto error;
+ }
c->max_tag = 0;
error:
return err;
@@ -518,12 +516,15 @@ out_err:
return err;
}
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
+
/**
* p9_client_flush - flush (cancel) a request
* @c: client state
* @oldreq: request to cancel
*
- * This sents a flush for a particular requests and links
+ * This sents a flush for a particular request and links
* the flush request to the original request. The current
* code only supports a single flush request although the protocol
* allows for multiple flush requests to be sent for a single request.
@@ -789,11 +790,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
spin_lock_init(&clnt->lock);
INIT_LIST_HEAD(&clnt->fidlist);
- p9_tag_init(clnt);
+ err = p9_tag_init(clnt);
+ if (err < 0)
+ goto free_client;
err = parse_opts(options, clnt);
if (err < 0)
- goto free_client;
+ goto destroy_tagpool;
if (!clnt->trans_mod)
clnt->trans_mod = v9fs_get_default_trans();
@@ -802,13 +805,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
err = -EPROTONOSUPPORT;
P9_DPRINTK(P9_DEBUG_ERROR,
"No transport defined or default transport\n");
- goto free_client;
+ goto destroy_tagpool;
}
clnt->fidpool = p9_idpool_create();
if (IS_ERR(clnt->fidpool)) {
err = PTR_ERR(clnt->fidpool);
- clnt->fidpool = NULL;
goto put_trans;
}
@@ -834,6 +836,8 @@ destroy_fidpool:
p9_idpool_destroy(clnt->fidpool);
put_trans:
v9fs_put_trans(clnt->trans_mod);
+destroy_tagpool:
+ p9_idpool_destroy(clnt->tagpool);
free_client:
kfree(clnt);
return ERR_PTR(err);
@@ -1298,7 +1302,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
if (count < rsize)
rsize = count;
- /* Don't bother zerocopy form small IO (< 1024) */
+ /* Don't bother zerocopy for small IO (< 1024) */
if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
diff --git a/net/9p/mod.c b/net/9p/mod.c
index cf8a4128cd5..72c39827505 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -139,7 +139,7 @@ void v9fs_put_trans(struct p9_trans_module *m)
}
/**
- * v9fs_init - Initialize module
+ * init_p9 - Initialize module
*
*/
static int __init init_p9(void)
@@ -154,7 +154,7 @@ static int __init init_p9(void)
}
/**
- * v9fs_init - shutdown module
+ * exit_p9 - shutdown module
*
*/
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 4a9084395d3..fdfdb5747f6 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -916,8 +916,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
sin_server.sin_port = htons(opts.port);
- err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
-
+ err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
+ SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
if (err) {
P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
return err;
@@ -954,7 +954,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
sun_server.sun_family = PF_UNIX;
strcpy(sun_server.sun_path, addr);
- err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
+ err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
+ SOCK_STREAM, 0, &csocket, 1);
if (err < 0) {
P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
return err;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 844a7a5607e..159c50f1c6b 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
return -ENOMEM;
/* Create the RDMA CM ID */
- rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
+ rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(rdma->cm_id))
goto error;
diff --git a/net/9p/util.c b/net/9p/util.c
index da6af81e59d..9c1c9348ac3 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -93,7 +93,7 @@ int p9_idpool_get(struct p9_idpool *p)
retry:
if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
- return 0;
+ return -1;
spin_lock_irqsave(&p->lock, flags);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f7fa67c7876..f49da5814bc 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
return pos - buf;
}
+static ssize_t show_atmindex(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct atm_dev *adev = to_atm_dev(cdev);
+
+ return sprintf(buf, "%d\n", adev->number);
+}
+
static ssize_t show_carrier(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
+static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
static struct device_attribute *atm_attrs[] = {
&dev_attr_atmaddress,
&dev_attr_address,
+ &dev_attr_atmindex,
&dev_attr_carrier,
&dev_attr_type,
&dev_attr_link_rate,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 25073b6ef47..ba48daa68c1 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1171,7 +1171,7 @@ static int __init lane_module_init(void)
#endif
register_atm_ioctl(&lane_ioctl_ops);
- pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
+ pr_info("lec.c: initialized\n");
return 0;
}
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 644cdf07164..3ccca42e6f9 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void)
if (mpc_proc_init() != 0)
pr_info("failed to initialize /proc/mpoa\n");
- pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
+ pr_info("mpc.c: initialized\n");
return 0;
}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index f85da0779e5..be3afdefec5 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
struct sock *sk = sk_atm(vcc);
- seq_printf(seq, "%p ", vcc);
+ seq_printf(seq, "%pK ", vcc);
if (!vcc->dev)
seq_printf(seq, "Unassigned ");
else
@@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
if (!vcc->dev)
seq_printf(seq, sizeof(void *) == 4 ?
- "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
+ "N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
else
seq_printf(seq, "%3d %3d %5d ",
vcc->dev->number, vcc->vpi, vcc->vci);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a86f9ba4f05..e64a1c2df23 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
if (c->psm == psm) {
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src)) {
- read_unlock_bh(&chan_list_lock);
+ read_unlock(&chan_list_lock);
return c;
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e1f5ec75e91..3fa123185e8 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -117,6 +117,10 @@ static struct dst_ops fake_dst_ops = {
* ipt_REJECT needs it. Future netfilter modules might
* require us to fill additional fields.
*/
+static const u32 br_dst_default_metrics[RTAX_MAX] = {
+ [RTAX_MTU - 1] = 1500,
+};
+
void br_netfilter_rtable_init(struct net_bridge *br)
{
struct rtable *rt = &br->fake_rtable;
@@ -124,7 +128,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
atomic_set(&rt->dst.__refcnt, 1);
rt->dst.dev = br->dev;
rt->dst.path = &rt->dst;
- dst_metric_set(&rt->dst, RTAX_MTU, 1500);
+ dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
rt->dst.flags = DST_NOXFRM;
rt->dst.ops = &fake_dst_ops;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1a92b369c82..2b5ca1a0054 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
struct xt_target *wt;
void *dst = NULL;
int off, pad = 0;
- unsigned int size_kern, entry_offset, match_size = mwt->match_size;
+ unsigned int size_kern, match_size = mwt->match_size;
strlcpy(name, mwt->u.name, sizeof(name));
if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset;
- entry_offset = (unsigned char *) mwt - base;
switch (compat_mwt) {
case EBT_COMPAT_MATCH:
match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
size_kern = wt->targetsize;
module_put(wt->me);
break;
+
+ default:
+ return -EINVAL;
}
state->buf_kern_offset += match_size + off;
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 649ebacaf6b..adbb424403d 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -139,17 +139,14 @@ static void close_work(struct work_struct *work)
struct chnl_net *dev = NULL;
struct list_head *list_node;
struct list_head *_tmp;
- /* May be called with or without RTNL lock held */
- int islocked = rtnl_is_locked();
- if (!islocked)
- rtnl_lock();
+
+ rtnl_lock();
list_for_each_safe(list_node, _tmp, &chnl_net_list) {
dev = list_entry(list_node, struct chnl_net, list_field);
if (dev->state == CAIF_SHUTDOWN)
dev_close(dev->netdev);
}
- if (!islocked)
- rtnl_unlock();
+ rtnl_unlock();
}
static DECLARE_WORK(close_worker, close_work);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index cced806098a..184a6572b67 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
- seq_printf(m, ">>> socket %p", sk->sk_socket);
- seq_printf(m, " / sk %p", sk);
- seq_printf(m, " / bo %p", bo);
+ seq_printf(m, ">>> socket %pK", sk->sk_socket);
+ seq_printf(m, " / sk %pK", sk);
+ seq_printf(m, " / bo %pK", bo);
seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
seq_printf(m, " <<<\n");
diff --git a/net/can/proc.c b/net/can/proc.c
index f4265cc9c3f..0016f733969 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
hlist_for_each_entry_rcu(r, n, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
- " %-5s %08X %08x %08x %08x %8ld %s\n" :
- " %-5s %03X %08x %08lx %08lx %8ld %s\n";
+ " %-5s %08x %08x %pK %pK %8ld %s\n" :
+ " %-5s %03x %08x %pK %pK %8ld %s\n";
seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
- (unsigned long)r->func, (unsigned long)r->data,
- r->matches, r->ident);
+ r->func, r->data, r->matches, r->ident);
}
}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index e15a82ccc05..78b55f49de7 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -76,7 +76,8 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
break;
default:
- sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
+ snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %d)",
+ (int)ss->ss_family);
}
return s;
@@ -598,7 +599,7 @@ static void prepare_write_keepalive(struct ceph_connection *con)
* Connection negotiation.
*/
-static void prepare_connect_authorizer(struct ceph_connection *con)
+static int prepare_connect_authorizer(struct ceph_connection *con)
{
void *auth_buf;
int auth_len = 0;
@@ -612,13 +613,20 @@ static void prepare_connect_authorizer(struct ceph_connection *con)
con->auth_retry);
mutex_lock(&con->mutex);
+ if (test_bit(CLOSED, &con->state) ||
+ test_bit(OPENING, &con->state))
+ return -EAGAIN;
+
con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
con->out_connect.authorizer_len = cpu_to_le32(auth_len);
- con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
- con->out_kvec[con->out_kvec_left].iov_len = auth_len;
- con->out_kvec_left++;
- con->out_kvec_bytes += auth_len;
+ if (auth_len) {
+ con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
+ con->out_kvec[con->out_kvec_left].iov_len = auth_len;
+ con->out_kvec_left++;
+ con->out_kvec_bytes += auth_len;
+ }
+ return 0;
}
/*
@@ -640,9 +648,9 @@ static void prepare_write_banner(struct ceph_messenger *msgr,
set_bit(WRITE_PENDING, &con->state);
}
-static void prepare_write_connect(struct ceph_messenger *msgr,
- struct ceph_connection *con,
- int after_banner)
+static int prepare_write_connect(struct ceph_messenger *msgr,
+ struct ceph_connection *con,
+ int after_banner)
{
unsigned global_seq = get_global_seq(con->msgr, 0);
int proto;
@@ -683,7 +691,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
con->out_more = 0;
set_bit(WRITE_PENDING, &con->state);
- prepare_connect_authorizer(con);
+ return prepare_connect_authorizer(con);
}
@@ -1065,8 +1073,10 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
switch (ss->ss_family) {
case AF_INET:
((struct sockaddr_in *)ss)->sin_port = htons(p);
+ break;
case AF_INET6:
((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
+ break;
}
}
@@ -1216,6 +1226,7 @@ static int process_connect(struct ceph_connection *con)
u64 sup_feat = con->msgr->supported_features;
u64 req_feat = con->msgr->required_features;
u64 server_feat = le64_to_cpu(con->in_reply.features);
+ int ret;
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
@@ -1250,7 +1261,9 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
- prepare_write_connect(con->msgr, con, 0);
+ ret = prepare_write_connect(con->msgr, con, 0);
+ if (ret < 0)
+ return ret;
prepare_read_connect(con);
break;
@@ -1277,6 +1290,9 @@ static int process_connect(struct ceph_connection *con)
if (con->ops->peer_reset)
con->ops->peer_reset(con);
mutex_lock(&con->mutex);
+ if (test_bit(CLOSED, &con->state) ||
+ test_bit(OPENING, &con->state))
+ return -EAGAIN;
break;
case CEPH_MSGR_TAG_RETRY_SESSION:
@@ -1341,7 +1357,9 @@ static int process_connect(struct ceph_connection *con)
* to WAIT. This shouldn't happen if we are the
* client.
*/
- pr_err("process_connect peer connecting WAIT\n");
+ pr_err("process_connect got WAIT as client\n");
+ con->error_msg = "protocol error, got WAIT as client";
+ return -1;
default:
pr_err("connect protocol error, will retry\n");
@@ -1810,6 +1828,17 @@ static int try_read(struct ceph_connection *con)
more:
dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
con->in_base_pos);
+
+ /*
+ * process_connect and process_message drop and re-take
+ * con->mutex. make sure we handle a racing close or reopen.
+ */
+ if (test_bit(CLOSED, &con->state) ||
+ test_bit(OPENING, &con->state)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
if (test_bit(CONNECTING, &con->state)) {
if (!test_bit(NEGOTIATING, &con->state)) {
dout("try_read connecting\n");
@@ -1938,8 +1967,10 @@ static void con_work(struct work_struct *work)
{
struct ceph_connection *con = container_of(work, struct ceph_connection,
work.work);
+ int ret;
mutex_lock(&con->mutex);
+restart:
if (test_and_clear_bit(BACKOFF, &con->state)) {
dout("con_work %p backing off\n", con);
if (queue_delayed_work(ceph_msgr_wq, &con->work,
@@ -1969,18 +2000,31 @@ static void con_work(struct work_struct *work)
con_close_socket(con);
}
- if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
- try_read(con) < 0 ||
- try_write(con) < 0) {
- mutex_unlock(&con->mutex);
- ceph_fault(con); /* error/fault path */
- goto done_unlocked;
- }
+ if (test_and_clear_bit(SOCK_CLOSED, &con->state))
+ goto fault;
+
+ ret = try_read(con);
+ if (ret == -EAGAIN)
+ goto restart;
+ if (ret < 0)
+ goto fault;
+
+ ret = try_write(con);
+ if (ret == -EAGAIN)
+ goto restart;
+ if (ret < 0)
+ goto fault;
done:
mutex_unlock(&con->mutex);
done_unlocked:
con->ops->put(con);
+ return;
+
+fault:
+ mutex_unlock(&con->mutex);
+ ceph_fault(con); /* error/fault path */
+ goto done_unlocked;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6b5dda1cb5d..6ea2b892f44 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -124,7 +124,7 @@ static void calc_layout(struct ceph_osd_client *osdc,
ceph_calc_raw_layout(osdc, layout, vino.snap, off,
plen, &bno, req, op);
- sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
+ snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno);
req->r_oid_len = strlen(req->r_oid);
}
@@ -1421,6 +1421,15 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
done:
downgrade_write(&osdc->map_sem);
ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
+
+ /*
+ * subscribe to subsequent osdmap updates if full to ensure
+ * we find out when we are no longer full and stop returning
+ * ENOSPC.
+ */
+ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+
send_queued(osdc);
up_read(&osdc->map_sem);
wake_up_all(&osdc->client->auth_wq);
@@ -1677,8 +1686,14 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
*/
if (req->r_sent == 0) {
rc = __map_request(osdc, req);
- if (rc < 0)
+ if (rc < 0) {
+ if (nofail) {
+ dout("osdc_start_request failed map, "
+ " will retry %lld\n", req->r_tid);
+ rc = 0;
+ }
goto out_unlock;
+ }
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 71603ac3dff..e97c3588c3e 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -765,7 +765,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
}
map->epoch++;
- map->modified = map->modified;
+ map->modified = modified;
if (newcrush) {
if (map->crush)
crush_destroy(map->crush);
@@ -830,15 +830,20 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
map->osd_addr[osd] = addr;
}
- /* new_down */
+ /* new_state */
ceph_decode_32_safe(p, end, len, bad);
while (len--) {
u32 osd;
+ u8 xorstate;
ceph_decode_32_safe(p, end, osd, bad);
+ xorstate = **(u8 **)p;
(*p)++; /* clean flag */
- pr_info("osd%d down\n", osd);
+ if (xorstate == 0)
+ xorstate = CEPH_OSD_UP;
+ if (xorstate & CEPH_OSD_UP)
+ pr_info("osd%d down\n", osd);
if (osd < map->max_osd)
- map->osd_state[osd] &= ~CEPH_OSD_UP;
+ map->osd_state[osd] ^= xorstate;
}
/* new_weight */
diff --git a/net/core/dev.c b/net/core/dev.c
index bcb05cb799c..939307891e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev)
{
u32 flags;
+ /*
+ * If we're trying to disable lro on a vlan device
+ * use the underlying physical device instead
+ */
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+
if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
flags = dev->ethtool_ops->get_flags(dev);
else
@@ -2089,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
+ unsigned int skb_len;
if (likely(!skb->next)) {
u32 features;
@@ -2139,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
+ skb_len = skb->len;
rc = ops->ndo_start_xmit(skb, dev);
- trace_net_dev_xmit(skb, rc);
+ trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
@@ -2160,8 +2169,9 @@ gso:
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
+ skb_len = nskb->len;
rc = ops->ndo_start_xmit(nskb, dev);
- trace_net_dev_xmit(nskb, rc);
+ trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
@@ -5954,7 +5964,10 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
- synchronize_rcu();
+ if (rtnl_is_locked())
+ synchronize_rcu_expedited();
+ else
+ synchronize_rcu();
}
EXPORT_SYMBOL(synchronize_net);
diff --git a/net/core/dst.c b/net/core/dst.c
index 81a4fa1c95e..9ccca038444 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -315,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{
unsigned long prev, new;
- new = (unsigned long) dst_default_metrics;
+ new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 84e7304532e..fd14116ad7f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -233,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
return 1;
}
+static int ethtool_set_flags_compat(struct net_device *dev,
+ int (*legacy_set)(struct net_device *, u32),
+ struct ethtool_set_features_block *features, u32 mask)
+{
+ u32 value;
+
+ if (!legacy_set)
+ return 0;
+
+ if (!(features[0].valid & mask))
+ return 0;
+
+ value = dev->features & ~features[0].valid;
+ value |= features[0].requested;
+
+ features[0].valid &= ~mask;
+
+ if (legacy_set(dev, value & mask) < 0)
+ netdev_info(dev, "Legacy flags change failed\n");
+
+ return 1;
+}
+
static int ethtool_set_features_compat(struct net_device *dev,
struct ethtool_set_features_block *features)
{
@@ -249,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
features, NETIF_F_ALL_TSO);
compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
features, NETIF_F_RXCSUM);
- compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+ compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
features, flags_dup_features);
return compat;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3911586e12e..008dc70b064 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -602,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
skip:
idx++;
}
+ rcu_read_unlock();
cb->args[1] = idx;
rules_ops_put(ops);
diff --git a/net/core/filter.c b/net/core/filter.c
index 0eb8c4466ea..36f975fa87c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,6 +38,7 @@
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/reciprocal_div.h>
+#include <linux/ratelimit.h>
/* No hurry in this branch */
static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
@@ -350,7 +351,9 @@ load_b:
continue;
}
default:
- WARN_ON(1);
+ WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
+ fentry->code, fentry->jt,
+ fentry->jf, fentry->k);
return 0;
}
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 2e2dce6583e..6c6b86d0da1 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -8,6 +8,8 @@
#include <linux/idr.h>
#include <linux/rculist.h>
#include <linux/nsproxy.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -302,6 +304,28 @@ void __put_net(struct net *net)
}
EXPORT_SYMBOL_GPL(__put_net);
+struct net *get_net_ns_by_fd(int fd)
+{
+ struct proc_inode *ei;
+ struct file *file;
+ struct net *net;
+
+ net = ERR_PTR(-EINVAL);
+ file = proc_ns_fget(fd);
+ if (!file)
+ goto out;
+
+ ei = PROC_I(file->f_dentry->d_inode);
+ if (ei->ns_ops != &netns_operations)
+ goto out;
+
+ net = get_net(ei->ns);
+out:
+ if (file)
+ fput(file);
+ return net;
+}
+
#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
@@ -309,6 +333,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
return ERR_PTR(-EINVAL);
return old_net;
}
+
+struct net *get_net_ns_by_fd(int fd)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif
struct net *get_net_ns_by_pid(pid_t pid)
@@ -561,3 +590,39 @@ void unregister_pernet_device(struct pernet_operations *ops)
mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
+
+#ifdef CONFIG_NET_NS
+static void *netns_get(struct task_struct *task)
+{
+ struct net *net = NULL;
+ struct nsproxy *nsproxy;
+
+ rcu_read_lock();
+ nsproxy = task_nsproxy(task);
+ if (nsproxy)
+ net = get_net(nsproxy->net_ns);
+ rcu_read_unlock();
+
+ return net;
+}
+
+static void netns_put(void *ns)
+{
+ put_net(ns);
+}
+
+static int netns_install(struct nsproxy *nsproxy, void *ns)
+{
+ put_net(nsproxy->net_ns);
+ nsproxy->net_ns = get_net(ns);
+ return 0;
+}
+
+const struct proc_ns_operations netns_operations = {
+ .name = "net",
+ .type = CLONE_NEWNET,
+ .get = netns_get,
+ .put = netns_put,
+ .install = netns_install,
+};
+#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d1644e317e7..abd936d8a71 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct nlattr *attr, *af_spec;
struct rtnl_af_ops *af_ops;
+ ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -1045,6 +1046,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKMODE] = { .type = NLA_U8 },
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
+ [IFLA_NET_NS_FD] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
@@ -1093,6 +1095,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
*/
if (tb[IFLA_NET_NS_PID])
net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
+ else if (tb[IFLA_NET_NS_FD])
+ net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
else
net = get_net(src_net);
return net;
@@ -1223,7 +1227,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
int send_addr_notify = 0;
int err;
- if (tb[IFLA_NET_NS_PID]) {
+ if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
struct net *net = rtnl_link_get_net(dev_net(dev), tb);
if (IS_ERR(net)) {
err = PTR_ERR(net);
@@ -1876,6 +1880,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
int min_len;
int family;
int type;
+ int err;
type = nlh->nlmsg_type;
if (type > RTM_MAX)
@@ -1902,8 +1907,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (dumpit == NULL)
return -EOPNOTSUPP;
+ __rtnl_unlock();
rtnl = net->rtnl;
- return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+ err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+ rtnl_lock();
+ return err;
}
memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
@@ -1975,7 +1983,7 @@ static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
- rtnetlink_rcv, NULL, THIS_MODULE);
+ rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
if (!sk)
return -ENOMEM;
net->rtnl = sk;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a829e3f60ae..77a65f03148 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -17,6 +17,7 @@
#include <net/ip.h>
#include <net/sock.h>
+#include <net/net_ratelimit.h>
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(ctl_table *table, int write,
diff --git a/net/core/utils.c b/net/core/utils.c
index 2012bc797f9..386e263f606 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -27,6 +27,7 @@
#include <linux/ratelimit.h>
#include <net/sock.h>
+#include <net/net_ratelimit.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index cfa7a5e1c5c..fa000d26dc6 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -212,10 +212,12 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
int err = key->type_data.x[0];
seq_puts(m, key->description);
- if (err)
- seq_printf(m, ": %d", err);
- else
- seq_printf(m, ": %u", key->datalen);
+ if (key_is_instantiated(key)) {
+ if (err)
+ seq_printf(m, ": %d", err);
+ else
+ seq_printf(m, ": %u", key->datalen);
+ }
}
/*
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index cc1463156cd..9c1926027a2 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,6 +465,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < sizeof(struct sockaddr_in))
goto out;
+ if (addr->sin_family != AF_INET)
+ goto out;
+
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
/* Not specified by any standard per-se, however it breaks too
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 672e476c8c8..f1d27f6c935 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1155,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
if (!in_dev->dead) {
if (IGMP_V1_SEEN(in_dev))
- goto done;
+ return;
if (IGMP_V2_SEEN(in_dev)) {
if (reporter)
igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
- goto done;
+ return;
}
/* IGMPv3 */
igmpv3_add_delrec(in_dev, im);
igmp_ifc_event(in_dev);
}
-done:
#endif
- ip_mc_clear_src(im);
}
static void igmp_group_added(struct ip_mc_list *im)
@@ -1305,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
*ip = i->next_rcu;
in_dev->mc_count--;
igmp_group_dropped(i);
+ ip_mc_clear_src(i);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
@@ -1414,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
- igmp_group_dropped(i);
+ /* We've dropped the groups in ip_mc_down already */
+ ip_mc_clear_src(i);
ip_ma_put(i);
}
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 61fac4cabc7..c14d88ad348 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
* This struct holds the first and last local port number.
*/
struct local_ports sysctl_local_ports __read_mostly = {
- .lock = SEQLOCK_UNLOCKED,
+ .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
.range = { 32768, 61000 },
};
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9df4e635fb5..ce616d92cc5 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
/* Called with or without local BH being disabled. */
static void unlink_from_unused(struct inet_peer *p)
{
- if (!list_empty(&p->unused)) {
- spin_lock_bh(&unused_peers.lock);
- list_del_init(&p->unused);
- spin_unlock_bh(&unused_peers.lock);
- }
+ spin_lock_bh(&unused_peers.lock);
+ list_del_init(&p->unused);
+ spin_unlock_bh(&unused_peers.lock);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
u; \
})
+static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
+{
+ int cur, old = atomic_read(ptr);
+
+ while (old != u) {
+ *newv = old + a;
+ cur = atomic_cmpxchg(ptr, old, *newv);
+ if (cur == old)
+ return true;
+ old = cur;
+ }
+ return false;
+}
+
/*
* Called with rcu_read_lock()
* Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
* We exit from this function if number of links exceeds PEER_MAXDEPTH
*/
static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
- struct inet_peer_base *base)
+ struct inet_peer_base *base,
+ int *newrefcnt)
{
struct inet_peer *u = rcu_dereference(base->root);
int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
* distinction between an unused entry (refcnt=0) and
* a freed one.
*/
- if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+ if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
u = NULL;
return u;
}
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
unsigned int sequence;
- int invalidated;
+ int invalidated, newrefcnt = 0;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
*/
rcu_read_lock();
sequence = read_seqbegin(&base->lock);
- p = lookup_rcu(daddr, base);
+ p = lookup_rcu(daddr, base, &newrefcnt);
invalidated = read_seqretry(&base->lock, sequence);
rcu_read_unlock();
if (p) {
- /* The existing node has been found.
+found: /* The existing node has been found.
* Remove the entry from unused list if it was there.
*/
- unlink_from_unused(p);
+ if (newrefcnt == 1)
+ unlink_from_unused(p);
return p;
}
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
write_seqlock_bh(&base->lock);
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
- atomic_inc(&p->refcnt);
+ newrefcnt = atomic_inc_return(&p->refcnt);
write_sequnlock_bh(&base->lock);
- /* Remove the entry from unused list if it was there. */
- unlink_from_unused(p);
- return p;
+ goto found;
}
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index c3118e1cd3b..ec93335901d 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
@@ -350,7 +351,7 @@ int ip_options_compile(struct net *net,
goto error;
}
if (optptr[2] <= optlen) {
- __be32 *timeptr = NULL;
+ unsigned char *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
@@ -359,7 +360,7 @@ int ip_options_compile(struct net *net,
case IPOPT_TS_TSONLY:
opt->ts = optptr - iph;
if (skb)
- timeptr = (__be32*)&optptr[optptr[2]-1];
+ timeptr = &optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
@@ -371,7 +372,7 @@ int ip_options_compile(struct net *net,
opt->ts = optptr - iph;
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
- timeptr = (__be32*)&optptr[optptr[2]+3];
+ timeptr = &optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
opt->ts_needtime = 1;
@@ -389,7 +390,7 @@ int ip_options_compile(struct net *net,
if (inet_addr_type(net, addr) == RTN_UNICAST)
break;
if (skb)
- timeptr = (__be32*)&optptr[optptr[2]+3];
+ timeptr = &optptr[optptr[2]+3];
}
opt->ts_needtime = 1;
optptr[2] += 8;
@@ -403,10 +404,10 @@ int ip_options_compile(struct net *net,
}
if (timeptr) {
struct timespec tv;
- __be32 midtime;
+ u32 midtime;
getnstimeofday(&tv);
- midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
- memcpy(timeptr, &midtime, sizeof(__be32));
+ midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
+ put_unaligned_be32(midtime, timeptr);
opt->is_changed = 1;
}
} else {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1f3bb11490c..9aaa67165f4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -137,9 +137,6 @@ static void ping_v4_unhash(struct sock *sk)
struct inet_sock *isk = inet_sk(sk);
pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
if (sk_hashed(sk)) {
- struct hlist_nulls_head *hslot;
-
- hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num);
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sock_put(sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11e1780455f..c9893d43242 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -979,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c8d9b6f1ea..a7d6671e33b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2371,7 +2371,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
int ttd = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
i,
ireq->loc_addr,
ntohs(inet_sk(sk)->inet_sport),
@@ -2426,7 +2426,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
- "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
+ "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
rx_queue,
@@ -2461,7 +2461,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
srcp = ntohs(tw->tw_sport);
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw, len);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 599374f65c7..abca870d8ff 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2090,7 +2090,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ae64984f81a..cc7313b8f7e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet_sk(sp)->inet_num;
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 868366470b4..d1fd28711ba 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2036,7 +2036,7 @@ static void get_openreq6(struct seq_file *seq,
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
@@ -2087,7 +2087,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -2129,7 +2129,7 @@ static void get_timewait6_sock(struct seq_file *seq,
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index fc0c42a88e5..41f8c9c08db 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1391,7 +1391,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
srcp = ntohs(inet->inet_sport);
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index a6770a04e3b..4fe1db12d2a 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -241,7 +241,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index a15c0152495..7f9124914b1 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -54,7 +54,7 @@
#include <asm/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
#include <asm/smp.h>
/*
diff --git a/net/key/af_key.c b/net/key/af_key.c
index d62401c2568..8f92cf8116e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
if (v == SEQ_START_TOKEN)
seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
else
- seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
+ seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7dfbe71dc63..49d4f869e0b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
int i;
enum nl80211_channel_type orig_ct;
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+
if (local->scan_sdata == sdata)
ieee80211_scan_cancel(local);
- clear_bit(SDATA_STATE_RUNNING, &sdata->state);
-
/*
* Stop TX on this interface first.
*/
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d7b08db8e5..866f269183c 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -752,11 +752,25 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
- /* mac80211 doesn't support more than 1 channel */
- for (i = 0; i < hw->wiphy->n_iface_combinations; i++)
- if (hw->wiphy->iface_combinations[i].num_different_channels > 1)
+ /*
+ * mac80211 doesn't support more than 1 channel, and also not more
+ * than one IBSS interface
+ */
+ for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
+ const struct ieee80211_iface_combination *c;
+ int j;
+
+ c = &hw->wiphy->iface_combinations[i];
+
+ if (c->num_different_channels > 1)
return -EINVAL;
+ for (j = 0; j < c->n_limits; j++)
+ if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
+ c->limits[j].max > 1)
+ return -EINVAL;
+ }
+
#ifndef CONFIG_MAC80211_MESH
/* mesh depends on Kconfig, but drivers should set it if they want */
local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
@@ -1076,6 +1090,8 @@ static void __exit ieee80211_exit(void)
ieee80211s_stop();
ieee80211_iface_exit();
+
+ rcu_barrier();
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e7c5fddb480..249e733362e 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -120,6 +120,7 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
+ * rcu_head: RCU head to free the table
*/
struct mesh_table {
/* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
+
+ struct rcu_head rcu_head;
};
/* Recent multicast cache */
@@ -286,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
}
-#define for_each_mesh_entry(x, p, node, i) \
- for (i = 0; i <= x->hash_mask; i++) \
- hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
-
void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 83ce48e3191..0d2faacc3e8 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,8 +36,8 @@ struct mpath_node {
struct mesh_path *mpath;
};
-static struct mesh_table *mesh_paths;
-static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+static struct mesh_table __rcu *mesh_paths;
+static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
@@ -48,17 +48,40 @@ int mesh_paths_generation;
static DEFINE_RWLOCK(pathtbl_resize_lock);
+static inline struct mesh_table *resize_dereference_mesh_paths(void)
+{
+ return rcu_dereference_protected(mesh_paths,
+ lockdep_is_held(&pathtbl_resize_lock));
+}
+
+static inline struct mesh_table *resize_dereference_mpp_paths(void)
+{
+ return rcu_dereference_protected(mpp_paths,
+ lockdep_is_held(&pathtbl_resize_lock));
+}
+
+/*
+ * CAREFUL -- "tbl" must not be an expression,
+ * in particular not an rcu_dereference(), since
+ * it's used twice. So it is illegal to do
+ * for_each_mesh_entry(rcu_dereference(...), ...)
+ */
+#define for_each_mesh_entry(tbl, p, node, i) \
+ for (i = 0; i <= tbl->hash_mask; i++) \
+ hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
+
+
static struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;
- newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
+ newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
- (1 << size_order), GFP_KERNEL);
+ (1 << size_order), GFP_ATOMIC);
if (!newtbl->hash_buckets) {
kfree(newtbl);
@@ -66,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
}
newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
- (1 << size_order), GFP_KERNEL);
+ (1 << size_order), GFP_ATOMIC);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
*/
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
{
+ struct mesh_table *tbl = rcu_dereference(mesh_paths);
struct mpath_node *node;
struct hlist_node *p;
int i;
int j = 0;
- for_each_mesh_entry(mesh_paths, p, node, i) {
+ for_each_mesh_entry(tbl, p, node, i) {
if (sdata && node->mpath->sdata != sdata)
continue;
if (j++ == idx) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
+ struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
spin_lock_init(&new_mpath->state_lock);
init_timer(&new_mpath->timer);
- hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
- bucket = &mesh_paths->hash_buckets[hash_idx];
+ tbl = resize_dereference_mesh_paths();
+
+ hash_idx = mesh_table_hash(dst, sdata, tbl);
+ bucket = &tbl->hash_buckets[hash_idx];
- spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
+ spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
}
hlist_add_head_rcu(&new_node->list, bucket);
- if (atomic_inc_return(&mesh_paths->entries) >=
- mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
+ if (atomic_inc_return(&tbl->entries) >=
+ tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
mesh_paths_generation++;
- spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return 0;
err_exists:
- spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@@ -370,58 +397,59 @@ err_path_alloc:
return err;
}
+static void mesh_table_free_rcu(struct rcu_head *rcu)
+{
+ struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
+
+ mesh_table_free(tbl, false);
+}
+
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
- rcu_read_lock();
- newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
- if (!newtbl)
- return;
write_lock_bh(&pathtbl_resize_lock);
- oldtbl = mesh_paths;
- if (mesh_table_grow(mesh_paths, newtbl) < 0) {
- rcu_read_unlock();
+ oldtbl = resize_dereference_mesh_paths();
+ newtbl = mesh_table_alloc(oldtbl->size_order + 1);
+ if (!newtbl)
+ goto out;
+ if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- return;
+ goto out;
}
- rcu_read_unlock();
rcu_assign_pointer(mesh_paths, newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
+
+ out:
+ write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
- rcu_read_lock();
- newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
- if (!newtbl)
- return;
write_lock_bh(&pathtbl_resize_lock);
- oldtbl = mpp_paths;
- if (mesh_table_grow(mpp_paths, newtbl) < 0) {
- rcu_read_unlock();
+ oldtbl = resize_dereference_mpp_paths();
+ newtbl = mesh_table_alloc(oldtbl->size_order + 1);
+ if (!newtbl)
+ goto out;
+ if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
- return;
+ goto out;
}
- rcu_read_unlock();
rcu_assign_pointer(mpp_paths, newtbl);
- write_unlock_bh(&pathtbl_resize_lock);
+ call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ out:
+ write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
+ struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
- hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
- bucket = &mpp_paths->hash_buckets[hash_idx];
+ tbl = resize_dereference_mpp_paths();
- spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
+ hash_idx = mesh_table_hash(dst, sdata, tbl);
+ bucket = &tbl->hash_buckets[hash_idx];
+
+ spin_lock_bh(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
}
hlist_add_head_rcu(&new_node->list, bucket);
- if (atomic_inc_return(&mpp_paths->entries) >=
- mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
+ if (atomic_inc_return(&tbl->entries) >=
+ tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
- spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
+ spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return 0;
err_exists:
- spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
+ spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
@@ -502,6 +532,7 @@ err_path_alloc:
*/
void mesh_plink_broken(struct sta_info *sta)
{
+ struct mesh_table *tbl;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
int i;
rcu_read_lock();
- for_each_mesh_entry(mesh_paths, p, node, i) {
+ tbl = rcu_dereference(mesh_paths);
+ for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
spin_lock_bh(&mpath->state_lock);
- if (mpath->next_hop == sta &&
+ if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
+ struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- for_each_mesh_entry(mesh_paths, p, node, i) {
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (mpath->next_hop == sta)
+ if (rcu_dereference(mpath->next_hop) == sta)
mesh_path_del(mpath->dst, mpath->sdata);
}
+ rcu_read_unlock();
}
void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
{
+ struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- for_each_mesh_entry(mesh_paths, p, node, i) {
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (mpath->sdata == sdata)
mesh_path_del(mpath->dst, mpath->sdata);
}
+ rcu_read_unlock();
}
static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
*/
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
{
+ struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
int err = 0;
read_lock_bh(&pathtbl_resize_lock);
- hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
- bucket = &mesh_paths->hash_buckets[hash_idx];
+ tbl = resize_dereference_mesh_paths();
+ hash_idx = mesh_table_hash(addr, sdata, tbl);
+ bucket = &tbl->hash_buckets[hash_idx];
- spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
+ spin_lock_bh(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
+ memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
spin_lock_bh(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
- atomic_dec(&mesh_paths->entries);
+ atomic_dec(&tbl->entries);
spin_unlock_bh(&mpath->state_lock);
goto enddel;
}
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
err = -ENXIO;
enddel:
mesh_paths_generation++;
- spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
+ spin_unlock_bh(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}
@@ -719,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
- if (free_leafs)
+ if (free_leafs) {
+ del_timer_sync(&mpath->timer);
kfree(mpath);
+ }
kfree(node);
}
@@ -745,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
int mesh_pathtbl_init(void)
{
- mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
- if (!mesh_paths)
+ struct mesh_table *tbl_path, *tbl_mpp;
+
+ tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+ if (!tbl_path)
return -ENOMEM;
- mesh_paths->free_node = &mesh_path_node_free;
- mesh_paths->copy_node = &mesh_path_node_copy;
- mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_path->free_node = &mesh_path_node_free;
+ tbl_path->copy_node = &mesh_path_node_copy;
+ tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
- mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
- if (!mpp_paths) {
- mesh_table_free(mesh_paths, true);
+ tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
+ if (!tbl_mpp) {
+ mesh_table_free(tbl_path, true);
return -ENOMEM;
}
- mpp_paths->free_node = &mesh_path_node_free;
- mpp_paths->copy_node = &mesh_path_node_copy;
- mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_mpp->free_node = &mesh_path_node_free;
+ tbl_mpp->copy_node = &mesh_path_node_copy;
+ tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
+
+ /* Need no locking since this is during init */
+ RCU_INIT_POINTER(mesh_paths, tbl_path);
+ RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
+ struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- read_lock_bh(&pathtbl_resize_lock);
- for_each_mesh_entry(mesh_paths, p, node, i) {
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ for_each_mesh_entry(tbl, p, node, i) {
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
spin_lock_bh(&mpath->state_lock);
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
- time_after(jiffies,
- mpath->exp_time + MESH_PATH_EXPIRE)) {
+ time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
spin_unlock_bh(&mpath->state_lock);
mesh_path_del(mpath->dst, mpath->sdata);
} else
spin_unlock_bh(&mpath->state_lock);
}
- read_unlock_bh(&pathtbl_resize_lock);
+ rcu_read_unlock();
}
void mesh_pathtbl_unregister(void)
{
- mesh_table_free(mesh_paths, true);
- mesh_table_free(mpp_paths, true);
+ /* no need for locking during exit path */
+ mesh_table_free(rcu_dereference_raw(mesh_paths), true);
+ mesh_table_free(rcu_dereference_raw(mpp_paths), true);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4f6b2675e41..456cccf26b5 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,6 +232,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@@ -245,6 +248,9 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
}
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
ht_opmode = le16_to_cpu(hti->operation_mode);
/* if bss configuration changed store the new one */
@@ -1089,6 +1095,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
config_changed |= IEEE80211_CONF_CHANGE_PS;
}
+ local->ps_sdata = NULL;
ieee80211_hw_config(local, config_changed);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index d20046b5d8f..58ffa7d069c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -15,7 +15,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/pm_qos_params.h>
-#include <linux/slab.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
#include <net/mac80211.h>
@@ -719,6 +718,11 @@ void ieee80211_scan_work(struct work_struct *work)
* without scheduling a new work
*/
do {
+ if (!ieee80211_sdata_running(sdata)) {
+ aborted = true;
+ goto out_complete;
+ }
+
switch (local->next_scan_state) {
case SCAN_DECISION:
/* if no more bands/channels left, complete scan */
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 72d1ac611fd..8041befc655 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
ip_set_id_t i;
if (unlikely(protocol_failed(attr)))
- return -EPROTO;
+ return -IPSET_ERR_PROTOCOL;
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 6b5dd6ddaae..af63553fa33 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = {
static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
- struct ip_vs_app *app = &ip_vs_ftp;
+ struct ip_vs_app *app;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
+ if (!app)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&app->a_list);
+ INIT_LIST_HEAD(&app->incs_list);
+ ipvs->ftp_app = app;
ret = register_ip_vs_app(net, app);
if (ret)
- return ret;
+ goto err_exit;
for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
if (ret)
- break;
+ goto err_unreg;
pr_info("%s: loaded support on port[%d] = %d\n",
app->name, i, ports[i]);
}
+ return 0;
- if (ret)
- unregister_ip_vs_app(net, app);
-
+err_unreg:
+ unregister_ip_vs_app(net, app);
+err_exit:
+ kfree(ipvs->ftp_app);
return ret;
}
/*
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
*/
static void __ip_vs_ftp_exit(struct net *net)
{
- struct ip_vs_app *app = &ip_vs_ftp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- unregister_ip_vs_app(net, app);
+ unregister_ip_vs_app(net, ipvs->ftp_app);
+ kfree(ipvs->ftp_app);
}
static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5fe4f3b04ed..6ef64adf736 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1985,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
+ seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->pid,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 549527bca87..ba248d93399 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -798,7 +798,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
getnstimeofday(&ts);
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
- h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+ if (vlan_tx_tag_present(skb)) {
+ h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
+ status |= TP_STATUS_VLAN_VALID;
+ } else {
+ h.h2->tp_vlan_tci = 0;
+ }
hdrlen = sizeof(*h.h2);
break;
default:
@@ -1725,8 +1730,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
- aux.tp_vlan_tci = vlan_tx_tag_get(skb);
-
+ if (vlan_tx_tag_present(skb)) {
+ aux.tp_vlan_tci = vlan_tx_tag_get(skb);
+ aux.tp_status |= TP_STATUS_VLAN_VALID;
+ } else {
+ aux.tp_vlan_tci = 0;
+ }
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
@@ -2706,7 +2715,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
const struct packet_sock *po = pkt_sk(s);
seq_printf(seq,
- "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
+ "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
s->sk_type,
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 8c5bfcef92c..ab07711cf2f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -607,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
struct pn_sock *pn = pn_sk(sk);
seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
- "%d %p %d%n",
+ "%d %pK %d%n",
sk->sk_protocol, pn->sobject, pn->dobject,
pn->resource, sk->sk_state,
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
diff --git a/net/rds/ib.c b/net/rds/ib.c
index cce19f95c62..3b83086bcc3 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
- cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
+ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ee369d201a6..fd453dd5124 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
- RDMA_PS_TCP);
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5a9676fe594..f7474844f09 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
- cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
+ cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3a60a15d1b4..c12db66f24c 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
/* XXX I wonder what affect the port space has */
/* delegate cm event handler to rdma_transport */
ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
- RDMA_PS_TCP);
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ic->i_cm_id)) {
ret = PTR_ERR(ic->i_cm_id);
ic->i_cm_id = NULL;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 4195a053982..f8760e1b668 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
struct rdma_cm_id *cm_id;
int ret;
- cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP);
+ cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 48464ca13b2..78efe895b66 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -33,3 +33,12 @@ config RFKILL_REGULATOR
To compile this driver as a module, choose M here: the module will
be called rfkill-regulator.
+
+config RFKILL_GPIO
+ tristate "GPIO RFKILL driver"
+ depends on RFKILL && GPIOLIB && HAVE_CLK
+ default n
+ help
+ If you say yes here you get support of a generic gpio RFKILL
+ driver. The platform should fill in the appropriate fields in the
+ rfkill_gpio_platform_data structure and pass that to the driver.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index d9a5a58ffd8..311768783f4 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -6,3 +6,4 @@ rfkill-y += core.o
rfkill-$(CONFIG_RFKILL_INPUT) += input.o
obj-$(CONFIG_RFKILL) += rfkill.o
obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
+obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
new file mode 100644
index 00000000000..256c5ddd2d7
--- /dev/null
+++ b/net/rfkill/rfkill-gpio.c
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rfkill.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <linux/rfkill-gpio.h>
+
+enum rfkill_gpio_clk_state {
+ UNSPECIFIED = 0,
+ PWR_ENABLED,
+ PWR_DISABLED
+};
+
+#define PWR_CLK_SET(_RF, _EN) \
+ ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
+#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
+#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
+
+struct rfkill_gpio_data {
+ struct rfkill_gpio_platform_data *pdata;
+ struct rfkill *rfkill_dev;
+ char *reset_name;
+ char *shutdown_name;
+ enum rfkill_gpio_clk_state pwr_clk_enabled;
+ struct clk *pwr_clk;
+};
+
+static int rfkill_gpio_set_power(void *data, bool blocked)
+{
+ struct rfkill_gpio_data *rfkill = data;
+
+ if (blocked) {
+ if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
+ gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
+ if (gpio_is_valid(rfkill->pdata->reset_gpio))
+ gpio_direction_output(rfkill->pdata->reset_gpio, 0);
+ if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
+ clk_disable(rfkill->pwr_clk);
+ } else {
+ if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
+ clk_enable(rfkill->pwr_clk);
+ if (gpio_is_valid(rfkill->pdata->reset_gpio))
+ gpio_direction_output(rfkill->pdata->reset_gpio, 1);
+ if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
+ gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
+ }
+
+ if (rfkill->pwr_clk)
+ PWR_CLK_SET(rfkill, blocked);
+
+ return 0;
+}
+
+static const struct rfkill_ops rfkill_gpio_ops = {
+ .set_block = rfkill_gpio_set_power,
+};
+
+static int rfkill_gpio_probe(struct platform_device *pdev)
+{
+ struct rfkill_gpio_data *rfkill;
+ struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int ret = 0;
+ int len = 0;
+
+ if (!pdata) {
+ pr_warn("%s: No platform data specified\n", __func__);
+ return -EINVAL;
+ }
+
+ /* make sure at-least one of the GPIO is defined and that
+ * a name is specified for this instance */
+ if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
+ !gpio_is_valid(pdata->shutdown_gpio))) {
+ pr_warn("%s: invalid platform data\n", __func__);
+ return -EINVAL;
+ }
+
+ rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
+ if (!rfkill)
+ return -ENOMEM;
+
+ rfkill->pdata = pdata;
+
+ len = strlen(pdata->name);
+ rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
+ if (!rfkill->reset_name) {
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
+ if (!rfkill->shutdown_name) {
+ ret = -ENOMEM;
+ goto fail_reset_name;
+ }
+
+ snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
+ snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
+
+ if (pdata->power_clk_name) {
+ rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
+ if (IS_ERR(rfkill->pwr_clk)) {
+ pr_warn("%s: can't find pwr_clk.\n", __func__);
+ goto fail_shutdown_name;
+ }
+ }
+
+ if (gpio_is_valid(pdata->reset_gpio)) {
+ ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
+ if (ret) {
+ pr_warn("%s: failed to get reset gpio.\n", __func__);
+ goto fail_clock;
+ }
+ }
+
+ if (gpio_is_valid(pdata->shutdown_gpio)) {
+ ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
+ if (ret) {
+ pr_warn("%s: failed to get shutdown gpio.\n", __func__);
+ goto fail_reset;
+ }
+ }
+
+ rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
+ &rfkill_gpio_ops, rfkill);
+ if (!rfkill->rfkill_dev)
+ goto fail_shutdown;
+
+ ret = rfkill_register(rfkill->rfkill_dev);
+ if (ret < 0)
+ goto fail_rfkill;
+
+ platform_set_drvdata(pdev, rfkill);
+
+ dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
+
+ return 0;
+
+fail_rfkill:
+ rfkill_destroy(rfkill->rfkill_dev);
+fail_shutdown:
+ if (gpio_is_valid(pdata->shutdown_gpio))
+ gpio_free(pdata->shutdown_gpio);
+fail_reset:
+ if (gpio_is_valid(pdata->reset_gpio))
+ gpio_free(pdata->reset_gpio);
+fail_clock:
+ if (rfkill->pwr_clk)
+ clk_put(rfkill->pwr_clk);
+fail_shutdown_name:
+ kfree(rfkill->shutdown_name);
+fail_reset_name:
+ kfree(rfkill->reset_name);
+fail_alloc:
+ kfree(rfkill);
+
+ return ret;
+}
+
+static int rfkill_gpio_remove(struct platform_device *pdev)
+{
+ struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
+
+ rfkill_unregister(rfkill->rfkill_dev);
+ rfkill_destroy(rfkill->rfkill_dev);
+ if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
+ gpio_free(rfkill->pdata->shutdown_gpio);
+ if (gpio_is_valid(rfkill->pdata->reset_gpio))
+ gpio_free(rfkill->pdata->reset_gpio);
+ if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
+ clk_disable(rfkill->pwr_clk);
+ if (rfkill->pwr_clk)
+ clk_put(rfkill->pwr_clk);
+ kfree(rfkill->shutdown_name);
+ kfree(rfkill->reset_name);
+ kfree(rfkill);
+
+ return 0;
+}
+
+static struct platform_driver rfkill_gpio_driver = {
+ .probe = rfkill_gpio_probe,
+ .remove = __devexit_p(rfkill_gpio_remove),
+ .driver = {
+ .name = "rfkill_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rfkill_gpio_init(void)
+{
+ return platform_driver_register(&rfkill_gpio_driver);
+}
+
+static void __exit rfkill_gpio_exit(void)
+{
+ platform_driver_unregister(&rfkill_gpio_driver);
+}
+
+module_init(rfkill_gpio_init);
+module_exit(rfkill_gpio_exit);
+
+MODULE_DESCRIPTION("gpio rfkill");
+MODULE_AUTHOR("NVIDIA");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7ef87f9eb67..b6ea6afa55b 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
unsigned int hash;
- sfq_index x;
+ sfq_index x, qlen;
struct sfq_slot *slot;
int uninitialized_var(ret);
@@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
+ qlen = slot->qlen;
sfq_drop(sch);
- return NET_XMIT_CN;
-}
-
-static struct sk_buff *
-sfq_peek(struct Qdisc *sch)
-{
- struct sfq_sched_data *q = qdisc_priv(sch);
-
- /* No active slots */
- if (q->tail == NULL)
- return NULL;
-
- return q->slots[q->tail->next].skblist_next;
+ /* Return Congestion Notification only if we dropped a packet
+ * from this flow.
+ */
+ return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
}
static struct sk_buff *
@@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct sfq_sched_data),
.enqueue = sfq_enqueue,
.dequeue = sfq_dequeue,
- .peek = sfq_peek,
+ .peek = qdisc_peek_dequeued,
.drop = sfq_drop,
.init = sfq_init,
.reset = sfq_reset,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1a21c571aa0..4a62888f2e4 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -64,6 +64,7 @@
/* Forward declarations for internal functions. */
static void sctp_assoc_bh_rcv(struct work_struct *work);
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
+static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
/* Keep track of the new idr low so that we don't re-use association id
* numbers too fast. It is protected by they idr spin lock is in the
@@ -443,12 +444,7 @@ void sctp_association_free(struct sctp_association *asoc)
asoc->peer.transport_count = 0;
- /* Free any cached ASCONF_ACK chunk. */
- sctp_assoc_free_asconf_acks(asoc);
-
- /* Free any cached ASCONF chunk. */
- if (asoc->addip_last_asconf)
- sctp_chunk_free(asoc->addip_last_asconf);
+ sctp_asconf_queue_teardown(asoc);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1578,6 +1574,18 @@ retry:
return error;
}
+/* Free the ASCONF queue */
+static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
+{
+ struct sctp_chunk *asconf;
+ struct sctp_chunk *tmp;
+
+ list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
+ list_del_init(&asconf->list);
+ sctp_chunk_free(asconf);
+ }
+}
+
/* Free asconf_ack cache */
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
{
@@ -1630,3 +1638,16 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
return NULL;
}
+
+void sctp_asconf_queue_teardown(struct sctp_association *asoc)
+{
+ /* Free any cached ASCONF_ACK chunk. */
+ sctp_assoc_free_asconf_acks(asoc);
+
+ /* Free the ASCONF queue. */
+ sctp_assoc_free_asconf_queue(asoc);
+
+ /* Free any cached ASCONF chunk. */
+ if (asoc->addip_last_asconf)
+ sctp_chunk_free(asoc->addip_last_asconf);
+}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 61aacfbbaa9..05a6ce21471 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
sk = epb->sk;
- seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
+ seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
sock_i_uid(sk), sock_i_ino(sk));
@@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc = sctp_assoc(epb);
sk = epb->sk;
seq_printf(seq,
- "%8p %8p %-3d %-3d %-2d %-4d "
+ "%8pK %8pK %-3d %-3d %-2d %-4d "
"%4d %8d %8d %7d %5lu %-5d %5d ",
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
assoc->state, hash,
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index d612ca1ca6c..534c2e5feb0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1670,6 +1670,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
break;
+ case SCTP_CMD_PURGE_ASCONF_QUEUE:
+ sctp_asconf_queue_teardown(asoc);
+ break;
default:
pr_warn("Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7f4a4f8368e..a297283154d 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1718,11 +1718,21 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
return SCTP_DISPOSITION_CONSUME;
}
- /* For now, fail any unsent/unacked data. Consider the optional
- * choice of resending of this data.
+ /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
+ * data. Consider the optional choice of resending of this data.
*/
+ sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
+ /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
+ * and ASCONF-ACK cache.
+ */
+ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+ SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
+ sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
+
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682..cd6e4aa19db 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
* Run memory cache shrinker.
*/
static int
-rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
{
LIST_HEAD(free);
int res;
+ int nr_to_scan = sc->nr_to_scan;
+ gfp_t gfp_mask = sc->gfp_mask;
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
return (nr_to_scan == 0) ? 0 : -1;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8d83f9d4871..b84d7395535 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -13,10 +13,6 @@
* and need to be refreshed, or when a packet was damaged in transit.
* This may be have to be moved to the VFS layer.
*
- * NB: BSD uses a more intelligent approach to guessing when a request
- * or reply has been lost by keeping the RTO estimate for each procedure.
- * We currently make do with a constant timeout value.
- *
* Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
* Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
*/
@@ -32,7 +28,9 @@
#include <linux/slab.h>
#include <linux/utsname.h>
#include <linux/workqueue.h>
+#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/un.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -298,22 +296,27 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
* up a string representation of the passed-in address.
*/
if (args->servername == NULL) {
+ struct sockaddr_un *sun =
+ (struct sockaddr_un *)args->address;
+ struct sockaddr_in *sin =
+ (struct sockaddr_in *)args->address;
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)args->address;
+
servername[0] = '\0';
switch (args->address->sa_family) {
- case AF_INET: {
- struct sockaddr_in *sin =
- (struct sockaddr_in *)args->address;
+ case AF_LOCAL:
+ snprintf(servername, sizeof(servername), "%s",
+ sun->sun_path);
+ break;
+ case AF_INET:
snprintf(servername, sizeof(servername), "%pI4",
&sin->sin_addr.s_addr);
break;
- }
- case AF_INET6: {
- struct sockaddr_in6 *sin =
- (struct sockaddr_in6 *)args->address;
+ case AF_INET6:
snprintf(servername, sizeof(servername), "%pI6",
- &sin->sin6_addr);
+ &sin6->sin6_addr);
break;
- }
default:
/* caller wants default server name, but
* address family isn't recognized. */
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c652e4cc9fe..9a80a922c52 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/socket.h>
+#include <linux/un.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/kernel.h>
@@ -32,6 +33,8 @@
# define RPCDBG_FACILITY RPCDBG_BIND
#endif
+#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
+
#define RPCBIND_PROGRAM (100000u)
#define RPCBIND_PORT (111u)
@@ -158,20 +161,69 @@ static void rpcb_map_release(void *data)
kfree(map);
}
-static const struct sockaddr_in rpcb_inaddr_loopback = {
- .sin_family = AF_INET,
- .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
- .sin_port = htons(RPCBIND_PORT),
-};
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local_unix(void)
+{
+ static const struct sockaddr_un rpcb_localaddr_rpcbind = {
+ .sun_family = AF_LOCAL,
+ .sun_path = RPCBIND_SOCK_PATHNAME,
+ };
+ struct rpc_create_args args = {
+ .net = &init_net,
+ .protocol = XPRT_TRANSPORT_LOCAL,
+ .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
+ .addrsize = sizeof(rpcb_localaddr_rpcbind),
+ .servername = "localhost",
+ .program = &rpcb_program,
+ .version = RPCBVERS_2,
+ .authflavor = RPC_AUTH_NULL,
+ };
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
+
+ /*
+ * Because we requested an RPC PING at transport creation time,
+ * this works only if the user space portmapper is rpcbind, and
+ * it's listening on AF_LOCAL on the named socket.
+ */
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("RPC: failed to create AF_LOCAL rpcbind "
+ "client (errno %ld).\n", PTR_ERR(clnt));
+ result = -PTR_ERR(clnt);
+ goto out;
+ }
+
+ clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
+ if (IS_ERR(clnt4)) {
+ dprintk("RPC: failed to bind second program to "
+ "rpcbind v4 client (errno %ld).\n",
+ PTR_ERR(clnt4));
+ clnt4 = NULL;
+ }
+
+ /* Protected by rpcb_create_local_mutex */
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
-static DEFINE_MUTEX(rpcb_create_local_mutex);
+out:
+ return result;
+}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
-static int rpcb_create_local(void)
+static int rpcb_create_local_net(void)
{
+ static const struct sockaddr_in rpcb_inaddr_loopback = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
+ .sin_port = htons(RPCBIND_PORT),
+ };
struct rpc_create_args args = {
.net = &init_net,
.protocol = XPRT_TRANSPORT_TCP,
@@ -186,13 +238,6 @@ static int rpcb_create_local(void)
struct rpc_clnt *clnt, *clnt4;
int result = 0;
- if (rpcb_local_clnt)
- return result;
-
- mutex_lock(&rpcb_create_local_mutex);
- if (rpcb_local_clnt)
- goto out;
-
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
@@ -214,10 +259,34 @@ static int rpcb_create_local(void)
clnt4 = NULL;
}
+ /* Protected by rpcb_create_local_mutex */
rpcb_local_clnt = clnt;
rpcb_local_clnt4 = clnt4;
out:
+ return result;
+}
+
+/*
+ * Returns zero on success, otherwise a negative errno value
+ * is returned.
+ */
+static int rpcb_create_local(void)
+{
+ static DEFINE_MUTEX(rpcb_create_local_mutex);
+ int result = 0;
+
+ if (rpcb_local_clnt)
+ return result;
+
+ mutex_lock(&rpcb_create_local_mutex);
+ if (rpcb_local_clnt)
+ goto out;
+
+ if (rpcb_create_local_unix() != 0)
+ result = rpcb_create_local_net();
+
+out:
mutex_unlock(&rpcb_create_local_mutex);
return result;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 08e05a8ce02..2b90292e950 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -942,6 +942,8 @@ static void svc_unregister(const struct svc_serv *serv)
if (progp->pg_vers[i]->vs_hidden)
continue;
+ dprintk("svc: attempting to unregister %sv%u\n",
+ progp->pg_name, i);
__svc_unregister(progp->pg_prog, i, progp->pg_name);
}
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b7d435c3f19..af04f779ce9 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -387,6 +387,33 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
return len;
}
+static int svc_partial_recvfrom(struct svc_rqst *rqstp,
+ struct kvec *iov, int nr,
+ int buflen, unsigned int base)
+{
+ size_t save_iovlen;
+ void __user *save_iovbase;
+ unsigned int i;
+ int ret;
+
+ if (base == 0)
+ return svc_recvfrom(rqstp, iov, nr, buflen);
+
+ for (i = 0; i < nr; i++) {
+ if (iov[i].iov_len > base)
+ break;
+ base -= iov[i].iov_len;
+ }
+ save_iovlen = iov[i].iov_len;
+ save_iovbase = iov[i].iov_base;
+ iov[i].iov_len -= base;
+ iov[i].iov_base += base;
+ ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen);
+ iov[i].iov_len = save_iovlen;
+ iov[i].iov_base = save_iovbase;
+ return ret;
+}
+
/*
* Set socket snd and rcv buffer lengths
*/
@@ -409,7 +436,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
- sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
#endif
@@ -884,6 +910,56 @@ failed:
return NULL;
}
+static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ return 0;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ if (rqstp->rq_pages[i] != NULL)
+ put_page(rqstp->rq_pages[i]);
+ BUG_ON(svsk->sk_pages[i] == NULL);
+ rqstp->rq_pages[i] = svsk->sk_pages[i];
+ svsk->sk_pages[i] = NULL;
+ }
+ rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]);
+ return len;
+}
+
+static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ return;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ svsk->sk_pages[i] = rqstp->rq_pages[i];
+ rqstp->rq_pages[i] = NULL;
+ }
+}
+
+static void svc_tcp_clear_pages(struct svc_sock *svsk)
+{
+ unsigned int i, len, npages;
+
+ if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
+ goto out;
+ len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
+ npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ BUG_ON(svsk->sk_pages[i] == NULL);
+ put_page(svsk->sk_pages[i]);
+ svsk->sk_pages[i] = NULL;
+ }
+out:
+ svsk->sk_tcplen = 0;
+}
+
/*
* Receive data.
* If we haven't gotten the record length yet, get the next four bytes.
@@ -893,31 +969,15 @@ failed:
static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ unsigned int want;
int len;
- if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
- /* sndbuf needs to have room for one request
- * per thread, otherwise we can stall even when the
- * network isn't a bottleneck.
- *
- * We count all threads rather than threads in a
- * particular pool, which provides an upper bound
- * on the number of threads which will access the socket.
- *
- * rcvbuf just needs to be able to hold a few requests.
- * Normally they will be removed from the queue
- * as soon a a complete request arrives.
- */
- svc_sock_setbufsize(svsk->sk_sock,
- (serv->sv_nrthreads+3) * serv->sv_max_mesg,
- 3 * serv->sv_max_mesg);
-
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
- int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
struct kvec iov;
+ want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
iov.iov_len = want;
if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
@@ -927,7 +987,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < want) {
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
- goto err_again; /* record header not complete */
+ return -EAGAIN;
}
svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -954,83 +1014,75 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
}
}
- /* Check whether enough data is available */
- len = svc_recv_available(svsk);
- if (len < 0)
- goto error;
+ if (svsk->sk_reclen < 8)
+ goto err_delete; /* client is nuts. */
- if (len < svsk->sk_reclen) {
- dprintk("svc: incomplete TCP record (%d of %d)\n",
- len, svsk->sk_reclen);
- goto err_again; /* record not complete */
- }
len = svsk->sk_reclen;
- set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
return len;
- error:
- if (len == -EAGAIN)
- dprintk("RPC: TCP recv_record got EAGAIN\n");
+error:
+ dprintk("RPC: TCP recv_record got %d\n", len);
return len;
- err_delete:
+err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- err_again:
return -EAGAIN;
}
-static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
- struct rpc_rqst **reqpp, struct kvec *vec)
+static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
+ struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
struct rpc_rqst *req = NULL;
- u32 *p;
- u32 xid;
- u32 calldir;
- int len;
-
- len = svc_recvfrom(rqstp, vec, 1, 8);
- if (len < 0)
- goto error;
+ struct kvec *src, *dst;
+ __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ __be32 xid;
+ __be32 calldir;
- p = (u32 *)rqstp->rq_arg.head[0].iov_base;
xid = *p++;
calldir = *p;
- if (calldir == 0) {
- /* REQUEST is the most common case */
- vec[0] = rqstp->rq_arg.head[0];
- } else {
- /* REPLY */
- struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
-
- if (bc_xprt)
- req = xprt_lookup_rqst(bc_xprt, xid);
-
- if (!req) {
- printk(KERN_NOTICE
- "%s: Got unrecognized reply: "
- "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
- __func__, ntohl(calldir),
- bc_xprt, xid);
- vec[0] = rqstp->rq_arg.head[0];
- goto out;
- }
+ if (bc_xprt)
+ req = xprt_lookup_rqst(bc_xprt, xid);
- memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
- sizeof(struct xdr_buf));
- /* copy the xid and call direction */
- memcpy(req->rq_private_buf.head[0].iov_base,
- rqstp->rq_arg.head[0].iov_base, 8);
- vec[0] = req->rq_private_buf.head[0];
+ if (!req) {
+ printk(KERN_NOTICE
+ "%s: Got unrecognized reply: "
+ "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
+ __func__, ntohl(calldir),
+ bc_xprt, xid);
+ return -EAGAIN;
}
- out:
- vec[0].iov_base += 8;
- vec[0].iov_len -= 8;
- len = svsk->sk_reclen - 8;
- error:
- *reqpp = req;
- return len;
+
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
+ /*
+ * XXX!: cheating for now! Only copying HEAD.
+ * But we know this is good enough for now (in fact, for any
+ * callback reply in the forseeable future).
+ */
+ dst = &req->rq_private_buf.head[0];
+ src = &rqstp->rq_arg.head[0];
+ if (dst->iov_len < src->iov_len)
+ return -EAGAIN; /* whatever; just giving up. */
+ memcpy(dst->iov_base, src->iov_base, src->iov_len);
+ xprt_complete_rqst(req->rq_task, svsk->sk_reclen);
+ rqstp->rq_arg.len = 0;
+ return 0;
}
+static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
+{
+ int i = 0;
+ int t = 0;
+
+ while (t < len) {
+ vec[i].iov_base = page_address(pages[i]);
+ vec[i].iov_len = PAGE_SIZE;
+ i++;
+ t += PAGE_SIZE;
+ }
+ return i;
+}
+
+
/*
* Receive data from a TCP socket.
*/
@@ -1041,8 +1093,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
int len;
struct kvec *vec;
- int pnum, vlen;
- struct rpc_rqst *req = NULL;
+ unsigned int want, base;
+ __be32 *p;
+ __be32 calldir;
+ int pnum;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
@@ -1053,87 +1107,73 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (len < 0)
goto error;
+ base = svc_tcp_restore_pages(svsk, rqstp);
+ want = svsk->sk_reclen - base;
+
vec = rqstp->rq_vec;
- vec[0] = rqstp->rq_arg.head[0];
- vlen = PAGE_SIZE;
- /*
- * We have enough data for the whole tcp record. Let's try and read the
- * first 8 bytes to get the xid and the call direction. We can use this
- * to figure out if this is a call or a reply to a callback. If
- * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
- * In that case, don't bother with the calldir and just read the data.
- * It will be rejected in svc_process.
- */
- if (len >= 8) {
- len = svc_process_calldir(svsk, rqstp, &req, vec);
- if (len < 0)
- goto err_again;
- vlen -= 8;
- }
+ pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
+ svsk->sk_reclen);
- pnum = 1;
- while (vlen < len) {
- vec[pnum].iov_base = (req) ?
- page_address(req->rq_private_buf.pages[pnum - 1]) :
- page_address(rqstp->rq_pages[pnum]);
- vec[pnum].iov_len = PAGE_SIZE;
- pnum++;
- vlen += PAGE_SIZE;
- }
rqstp->rq_respages = &rqstp->rq_pages[pnum];
/* Now receive data */
- len = svc_recvfrom(rqstp, vec, pnum, len);
- if (len < 0)
- goto err_again;
-
- /*
- * Account for the 8 bytes we read earlier
- */
- len += 8;
-
- if (req) {
- xprt_complete_rqst(req->rq_task, len);
- len = 0;
- goto out;
+ len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
+ if (len >= 0)
+ svsk->sk_tcplen += len;
+ if (len != want) {
+ if (len < 0 && len != -EAGAIN)
+ goto err_other;
+ svc_tcp_save_pages(svsk, rqstp);
+ dprintk("svc: incomplete TCP record (%d of %d)\n",
+ svsk->sk_tcplen, svsk->sk_reclen);
+ goto err_noclose;
}
- dprintk("svc: TCP complete record (%d bytes)\n", len);
- rqstp->rq_arg.len = len;
+
+ rqstp->rq_arg.len = svsk->sk_reclen;
rqstp->rq_arg.page_base = 0;
- if (len <= rqstp->rq_arg.head[0].iov_len) {
- rqstp->rq_arg.head[0].iov_len = len;
+ if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
+ rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
rqstp->rq_arg.page_len = 0;
- } else {
- rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
- }
+ } else
+ rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
-out:
+ p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
+ calldir = p[1];
+ if (calldir)
+ len = receive_cb_reply(svsk, rqstp);
+
/* Reset TCP read info */
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
+ /* If we have more data, signal svc_xprt_enqueue() to try again */
+ if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
+ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+
+ if (len < 0)
+ goto error;
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
- return len;
+ dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
+ return rqstp->rq_arg.len;
-err_again:
- if (len == -EAGAIN) {
- dprintk("RPC: TCP recvfrom got EAGAIN\n");
- return len;
- }
error:
- if (len != -EAGAIN) {
- printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
- svsk->sk_xprt.xpt_server->sv_name, -len);
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- }
+ if (len != -EAGAIN)
+ goto err_other;
+ dprintk("RPC: TCP recvfrom got EAGAIN\n");
return -EAGAIN;
+err_other:
+ printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
+ svsk->sk_xprt.xpt_server->sv_name, -len);
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+err_noclose:
+ return -EAGAIN; /* record not complete */
}
/*
@@ -1304,18 +1344,10 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
+ memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
- /* initialise setting must have enough space to
- * receive and respond to one request.
- * svc_tcp_recvfrom will re-adjust if necessary
- */
- svc_sock_setbufsize(svsk->sk_sock,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
- 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
-
- set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state != TCP_ESTABLISHED)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1379,8 +1411,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
/* Initialize the socket */
if (sock->type == SOCK_DGRAM)
svc_udp_init(svsk, serv);
- else
+ else {
+ /* initialise setting must have enough space to
+ * receive and respond to one request.
+ */
+ svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
+ 4 * serv->sv_max_mesg);
svc_tcp_init(svsk, serv);
+ }
dprintk("svc: svc_setup_socket created %p (inet %p)\n",
svsk, svsk->sk_sk);
@@ -1562,8 +1600,10 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
svc_sock_detach(xprt);
- if (!test_bit(XPT_LISTENER, &xprt->xpt_flags))
+ if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ svc_tcp_clear_pages(svsk);
kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
+ }
}
/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 679cd674b81..f008c14ad34 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -638,6 +638,25 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
+/**
+ * xdr_init_decode - Initialize an xdr_stream for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to XDR buffer from which to decode data
+ * @pages: list of pages to decode into
+ * @len: length in bytes of buffer in pages
+ */
+void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, unsigned int len)
+{
+ memset(buf, 0, sizeof(*buf));
+ buf->pages = pages;
+ buf->page_len = len;
+ buf->buflen = len;
+ buf->len = len;
+ xdr_init_decode(xdr, buf, NULL);
+}
+EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
+
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
__be32 *p = xdr->p;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 6c014dd3a20..c3c232a88d9 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
return ERR_PTR(-ENOMEM);
xprt = &cma_xprt->sc_xprt;
- listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
+ listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(listen_id)) {
ret = PTR_ERR(listen_id);
dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d4297dc43dc..80f8da344df 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
init_completion(&ia->ri_done);
- id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP);
+ id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
rc = PTR_ERR(id);
dprintk("RPC: %s: rdma_create_id() failed %i\n",
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf005d3c65e..72abb735893 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -19,6 +19,7 @@
*/
#include <linux/types.h>
+#include <linux/string.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/capability.h>
@@ -28,6 +29,7 @@
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
+#include <linux/un.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
@@ -45,6 +47,9 @@
#include <net/tcp.h>
#include "sunrpc.h"
+
+static void xs_close(struct rpc_xprt *xprt);
+
/*
* xprtsock tunables
*/
@@ -261,6 +266,11 @@ static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
return (struct sockaddr *) &xprt->addr;
}
+static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
+{
+ return (struct sockaddr_un *) &xprt->addr;
+}
+
static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
{
return (struct sockaddr_in *) &xprt->addr;
@@ -276,23 +286,34 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
struct sockaddr *sap = xs_addr(xprt);
struct sockaddr_in6 *sin6;
struct sockaddr_in *sin;
+ struct sockaddr_un *sun;
char buf[128];
- (void)rpc_ntop(sap, buf, sizeof(buf));
- xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
-
switch (sap->sa_family) {
+ case AF_LOCAL:
+ sun = xs_addr_un(xprt);
+ strlcpy(buf, sun->sun_path, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
+ break;
case AF_INET:
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
sin = xs_addr_in(xprt);
snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
break;
case AF_INET6:
+ (void)rpc_ntop(sap, buf, sizeof(buf));
+ xprt->address_strings[RPC_DISPLAY_ADDR] =
+ kstrdup(buf, GFP_KERNEL);
sin6 = xs_addr_in6(xprt);
snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
break;
default:
BUG();
}
+
xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
}
@@ -495,6 +516,70 @@ static int xs_nospace(struct rpc_task *task)
return ret;
}
+/*
+ * Construct a stream transport record marker in @buf.
+ */
+static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
+{
+ u32 reclen = buf->len - sizeof(rpc_fraghdr);
+ rpc_fraghdr *base = buf->head[0].iov_base;
+ *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
+}
+
+/**
+ * xs_local_send_request - write an RPC request to an AF_LOCAL socket
+ * @task: RPC task that manages the state of an RPC request
+ *
+ * Return values:
+ * 0: The request has been sent
+ * EAGAIN: The socket was blocked, please call again later to
+ * complete the request
+ * ENOTCONN: Caller needs to invoke connect logic then call again
+ * other: Some other error occured, the request was not sent
+ */
+static int xs_local_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct xdr_buf *xdr = &req->rq_snd_buf;
+ int status;
+
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
+
+ xs_pktdump("packet data:",
+ req->rq_svec->iov_base, req->rq_svec->iov_len);
+
+ status = xs_sendpages(transport->sock, NULL, 0,
+ xdr, req->rq_bytes_sent);
+ dprintk("RPC: %s(%u) = %d\n",
+ __func__, xdr->len - req->rq_bytes_sent, status);
+ if (likely(status >= 0)) {
+ req->rq_bytes_sent += status;
+ req->rq_xmit_bytes_sent += status;
+ if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+ req->rq_bytes_sent = 0;
+ return 0;
+ }
+ status = -EAGAIN;
+ }
+
+ switch (status) {
+ case -EAGAIN:
+ status = xs_nospace(task);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
+ case -EPIPE:
+ xs_close(xprt);
+ status = -ENOTCONN;
+ }
+
+ return status;
+}
+
/**
* xs_udp_send_request - write an RPC request to a UDP socket
* @task: address of RPC task that manages the state of an RPC request
@@ -574,13 +659,6 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
kernel_sock_shutdown(sock, SHUT_WR);
}
-static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
-{
- u32 reclen = buf->len - sizeof(rpc_fraghdr);
- rpc_fraghdr *base = buf->head[0].iov_base;
- *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
-}
-
/**
* xs_tcp_send_request - write an RPC request to a TCP socket
* @task: address of RPC task that manages the state of an RPC request
@@ -603,7 +681,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
struct xdr_buf *xdr = &req->rq_snd_buf;
int status;
- xs_encode_tcp_record_marker(&req->rq_snd_buf);
+ xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
req->rq_svec->iov_base,
@@ -785,6 +863,88 @@ static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
return (struct rpc_xprt *) sk->sk_user_data;
}
+static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
+{
+ struct xdr_skb_reader desc = {
+ .skb = skb,
+ .offset = sizeof(rpc_fraghdr),
+ .count = skb->len - sizeof(rpc_fraghdr),
+ };
+
+ if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
+ return -1;
+ if (desc.count)
+ return -1;
+ return 0;
+}
+
+/**
+ * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
+ * @sk: socket with data to read
+ * @len: how much data to read
+ *
+ * Currently this assumes we can read the whole reply in a single gulp.
+ */
+static void xs_local_data_ready(struct sock *sk, int len)
+{
+ struct rpc_task *task;
+ struct rpc_xprt *xprt;
+ struct rpc_rqst *rovr;
+ struct sk_buff *skb;
+ int err, repsize, copied;
+ u32 _xid;
+ __be32 *xp;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ dprintk("RPC: %s...\n", __func__);
+ xprt = xprt_from_sock(sk);
+ if (xprt == NULL)
+ goto out;
+
+ skb = skb_recv_datagram(sk, 0, 1, &err);
+ if (skb == NULL)
+ goto out;
+
+ if (xprt->shutdown)
+ goto dropit;
+
+ repsize = skb->len - sizeof(rpc_fraghdr);
+ if (repsize < 4) {
+ dprintk("RPC: impossible RPC reply size %d\n", repsize);
+ goto dropit;
+ }
+
+ /* Copy the XID from the skb... */
+ xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
+ if (xp == NULL)
+ goto dropit;
+
+ /* Look up and lock the request corresponding to the given XID */
+ spin_lock(&xprt->transport_lock);
+ rovr = xprt_lookup_rqst(xprt, *xp);
+ if (!rovr)
+ goto out_unlock;
+ task = rovr->rq_task;
+
+ copied = rovr->rq_private_buf.buflen;
+ if (copied > repsize)
+ copied = repsize;
+
+ if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
+ dprintk("RPC: sk_buff copy failed\n");
+ goto out_unlock;
+ }
+
+ xprt_complete_rqst(task, copied);
+
+ out_unlock:
+ spin_unlock(&xprt->transport_lock);
+ dropit:
+ skb_free_datagram(sk, skb);
+ out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
/**
* xs_udp_data_ready - "data ready" callback for UDP sockets
* @sk: socket with data to read
@@ -1344,7 +1504,6 @@ static void xs_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
/* The server initiated a shutdown of the socket */
xprt_force_disconnect(xprt);
- case TCP_SYN_SENT:
xprt->connect_cookie++;
case TCP_CLOSING:
/*
@@ -1571,11 +1730,31 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
return err;
}
+/*
+ * We don't support autobind on AF_LOCAL sockets
+ */
+static void xs_local_rpcbind(struct rpc_task *task)
+{
+ xprt_set_bound(task->tk_xprt);
+}
+
+static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
+{
+}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key xs_key[2];
static struct lock_class_key xs_slock_key[2];
+static inline void xs_reclassify_socketu(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ BUG_ON(sock_owned_by_user(sk));
+ sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
+ &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
+}
+
static inline void xs_reclassify_socket4(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -1597,6 +1776,9 @@ static inline void xs_reclassify_socket6(struct socket *sock)
static inline void xs_reclassify_socket(int family, struct socket *sock)
{
switch (family) {
+ case AF_LOCAL:
+ xs_reclassify_socketu(sock);
+ break;
case AF_INET:
xs_reclassify_socket4(sock);
break;
@@ -1606,6 +1788,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
}
}
#else
+static inline void xs_reclassify_socketu(struct socket *sock)
+{
+}
+
static inline void xs_reclassify_socket4(struct socket *sock)
{
}
@@ -1644,6 +1830,94 @@ out:
return ERR_PTR(err);
}
+static int xs_local_finish_connecting(struct rpc_xprt *xprt,
+ struct socket *sock)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+
+ if (!transport->inet) {
+ struct sock *sk = sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ xs_save_old_callbacks(transport, sk);
+
+ sk->sk_user_data = xprt;
+ sk->sk_data_ready = xs_local_data_ready;
+ sk->sk_write_space = xs_udp_write_space;
+ sk->sk_error_report = xs_error_report;
+ sk->sk_allocation = GFP_ATOMIC;
+
+ xprt_clear_connected(xprt);
+
+ /* Reset to new socket */
+ transport->sock = sock;
+ transport->inet = sk;
+
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ /* Tell the socket layer to start connecting... */
+ xprt->stat.connect_count++;
+ xprt->stat.connect_start = jiffies;
+ return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
+}
+
+/**
+ * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
+ * @xprt: RPC transport to connect
+ * @transport: socket transport to connect
+ * @create_sock: function to create a socket of the correct type
+ *
+ * Invoked by a work queue tasklet.
+ */
+static void xs_local_setup_socket(struct work_struct *work)
+{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
+ struct rpc_xprt *xprt = &transport->xprt;
+ struct socket *sock;
+ int status = -EIO;
+
+ if (xprt->shutdown)
+ goto out;
+
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+ if (status < 0) {
+ dprintk("RPC: can't create AF_LOCAL "
+ "transport socket (%d).\n", -status);
+ goto out;
+ }
+ xs_reclassify_socketu(sock);
+
+ dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+ status = xs_local_finish_connecting(xprt, sock);
+ switch (status) {
+ case 0:
+ dprintk("RPC: xprt %p connected to %s\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ xprt_set_connected(xprt);
+ break;
+ case -ENOENT:
+ dprintk("RPC: xprt %p: socket %s does not exist\n",
+ xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ break;
+ default:
+ printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
+ __func__, -status,
+ xprt->address_strings[RPC_DISPLAY_ADDR]);
+ }
+
+out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
+}
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1758,6 +2032,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ int ret = -ENOTCONN;
if (!transport->inet) {
struct sock *sk = sock->sk;
@@ -1789,12 +2064,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
}
if (!xprt_bound(xprt))
- return -ENOTCONN;
+ goto out;
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
- return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ switch (ret) {
+ case 0:
+ case -EINPROGRESS:
+ /* SYN_SENT! */
+ xprt->connect_cookie++;
+ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ }
+out:
+ return ret;
}
/**
@@ -1917,6 +2202,32 @@ static void xs_connect(struct rpc_task *task)
}
/**
+ * xs_local_print_stats - display AF_LOCAL socket-specifc stats
+ * @xprt: rpc_xprt struct containing statistics
+ * @seq: output file
+ *
+ */
+static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
+{
+ long idle_time = 0;
+
+ if (xprt_connected(xprt))
+ idle_time = (long)(jiffies - xprt->last_used) / HZ;
+
+ seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
+ "%llu %llu\n",
+ xprt->stat.bind_count,
+ xprt->stat.connect_count,
+ xprt->stat.connect_time,
+ idle_time,
+ xprt->stat.sends,
+ xprt->stat.recvs,
+ xprt->stat.bad_xids,
+ xprt->stat.req_u,
+ xprt->stat.bklog_u);
+}
+
+/**
* xs_udp_print_stats - display UDP socket-specifc stats
* @xprt: rpc_xprt struct containing statistics
* @seq: output file
@@ -2014,10 +2325,7 @@ static int bc_sendto(struct rpc_rqst *req)
unsigned long headoff;
unsigned long tailoff;
- /*
- * Set up the rpc header and record marker stuff
- */
- xs_encode_tcp_record_marker(xbufp);
+ xs_encode_stream_record_marker(xbufp);
tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
@@ -2089,6 +2397,21 @@ static void bc_destroy(struct rpc_xprt *xprt)
{
}
+static struct rpc_xprt_ops xs_local_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xs_tcp_release_xprt,
+ .rpcbind = xs_local_rpcbind,
+ .set_port = xs_local_set_port,
+ .connect = xs_connect,
+ .buf_alloc = rpc_malloc,
+ .buf_free = rpc_free,
+ .send_request = xs_local_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_def,
+ .close = xs_close,
+ .destroy = xs_destroy,
+ .print_stats = xs_local_print_stats,
+};
+
static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
@@ -2150,6 +2473,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
};
switch (family) {
+ case AF_LOCAL:
+ break;
case AF_INET:
memcpy(sap, &sin, sizeof(sin));
break;
@@ -2197,6 +2522,70 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
return xprt;
}
+static const struct rpc_timeout xs_local_default_timeout = {
+ .to_initval = 10 * HZ,
+ .to_maxval = 10 * HZ,
+ .to_retries = 2,
+};
+
+/**
+ * xs_setup_local - Set up transport to use an AF_LOCAL socket
+ * @args: rpc transport creation arguments
+ *
+ * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
+ */
+static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
+{
+ struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
+ struct sock_xprt *transport;
+ struct rpc_xprt *xprt;
+ struct rpc_xprt *ret;
+
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ if (IS_ERR(xprt))
+ return xprt;
+ transport = container_of(xprt, struct sock_xprt, xprt);
+
+ xprt->prot = 0;
+ xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
+ xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
+
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_local_ops;
+ xprt->timeout = &xs_local_default_timeout;
+
+ switch (sun->sun_family) {
+ case AF_LOCAL:
+ if (sun->sun_path[0] != '/') {
+ dprintk("RPC: bad AF_LOCAL address: %s\n",
+ sun->sun_path);
+ ret = ERR_PTR(-EINVAL);
+ goto out_err;
+ }
+ xprt_set_bound(xprt);
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ xs_local_setup_socket);
+ xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
+ break;
+ default:
+ ret = ERR_PTR(-EAFNOSUPPORT);
+ goto out_err;
+ }
+
+ dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR]);
+
+ if (try_module_get(THIS_MODULE))
+ return xprt;
+ ret = ERR_PTR(-EINVAL);
+out_err:
+ xprt_free(xprt);
+ return ret;
+}
+
static const struct rpc_timeout xs_udp_default_timeout = {
.to_initval = 5 * HZ,
.to_maxval = 30 * HZ,
@@ -2438,6 +2827,14 @@ out_err:
return ret;
}
+static struct xprt_class xs_local_transport = {
+ .list = LIST_HEAD_INIT(xs_local_transport.list),
+ .name = "named UNIX socket",
+ .owner = THIS_MODULE,
+ .ident = XPRT_TRANSPORT_LOCAL,
+ .setup = xs_setup_local,
+};
+
static struct xprt_class xs_udp_transport = {
.list = LIST_HEAD_INIT(xs_udp_transport.list),
.name = "udp",
@@ -2473,6 +2870,7 @@ int init_socket_xprt(void)
sunrpc_table_header = register_sysctl_table(sunrpc_table);
#endif
+ xprt_register_transport(&xs_local_transport);
xprt_register_transport(&xs_udp_transport);
xprt_register_transport(&xs_tcp_transport);
xprt_register_transport(&xs_bc_tcp_transport);
@@ -2493,6 +2891,7 @@ void cleanup_socket_xprt(void)
}
#endif
+ xprt_unregister_transport(&xs_local_transport);
xprt_unregister_transport(&xs_udp_transport);
xprt_unregister_transport(&xs_tcp_transport);
xprt_unregister_transport(&xs_bc_tcp_transport);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b1d75beb7e2..0722a25a3a3 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
struct unix_sock *u = unix_sk(s);
unix_state_lock(s);
- seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
+ seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
atomic_read(&s->sk_refcnt),
0,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index bf0fb40e3c8..3dce1f167eb 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -245,6 +245,7 @@ struct cfg80211_event {
u16 status;
} cr;
struct {
+ struct ieee80211_channel *channel;
u8 bssid[ETH_ALEN];
const u8 *req_ie;
const u8 *resp_ie;
@@ -392,7 +393,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason,
bool wextev);
-void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
+void __cfg80211_roamed(struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len);
int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 2222ce08ee9..88a565f130a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3294,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct cfg80211_scan_request *request;
- struct cfg80211_ssid *ssid;
- struct ieee80211_channel *channel;
struct nlattr *attr;
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
@@ -3342,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
request = kzalloc(sizeof(*request)
- + sizeof(*ssid) * n_ssids
- + sizeof(channel) * n_channels
+ + sizeof(*request->ssids) * n_ssids
+ + sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
return -ENOMEM;
@@ -3408,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
+ request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
}
memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
- request->ssids[i].ssid_len = nla_len(attr);
i++;
}
}
@@ -3449,8 +3447,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
struct cfg80211_sched_scan_request *request;
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct cfg80211_ssid *ssid;
- struct ieee80211_channel *channel;
struct nlattr *attr;
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
@@ -3507,8 +3503,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
return -EINVAL;
request = kzalloc(sizeof(*request)
- + sizeof(*ssid) * n_ssids
- + sizeof(channel) * n_channels
+ + sizeof(*request->ssids) * n_ssids
+ + sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
return -ENOMEM;
@@ -3576,6 +3572,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
tmp) {
+ request->ssids[i].ssid_len = nla_len(attr);
if (request->ssids[i].ssid_len >
IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -3583,7 +3580,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
}
memcpy(request->ssids[i].ssid, nla_data(attr),
nla_len(attr));
- request->ssids[i].ssid_len = nla_len(attr);
i++;
}
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 73a441d237b..7a6c67667d7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -267,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
return memcmp(ssidie + 2, ssid, ssid_len) == 0;
}
+static bool is_mesh_bss(struct cfg80211_bss *a)
+{
+ const u8 *ie;
+
+ if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
+ return false;
+
+ ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
+ a->information_elements,
+ a->len_information_elements);
+ if (!ie)
+ return false;
+
+ ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+ a->information_elements,
+ a->len_information_elements);
+ if (!ie)
+ return false;
+
+ return true;
+}
+
static bool is_mesh(struct cfg80211_bss *a,
const u8 *meshid, size_t meshidlen,
const u8 *meshcfg)
{
const u8 *ie;
- if (!WLAN_CAPABILITY_IS_MBSS(a->capability))
+ if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
return false;
ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -311,7 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
if (a->channel != b->channel)
return b->channel->center_freq - a->channel->center_freq;
- if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) {
+ if (is_mesh_bss(a) && is_mesh_bss(b)) {
r = cmp_ies(WLAN_EID_MESH_ID,
a->information_elements,
a->len_information_elements,
@@ -457,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
struct cfg80211_internal_bss *res)
{
struct cfg80211_internal_bss *found = NULL;
- const u8 *meshid, *meshcfg;
/*
* The reference to "res" is donated to this function.
@@ -470,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
res->ts = jiffies;
- if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
- /* must be mesh, verify */
- meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
- res->pub.information_elements,
- res->pub.len_information_elements);
- meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
- res->pub.information_elements,
- res->pub.len_information_elements);
- if (!meshid || !meshcfg ||
- meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
- /* bogus mesh */
- kref_put(&res->ref, bss_release);
- return NULL;
- }
- }
-
spin_lock_bh(&dev->bss_lock);
found = rb_find_bss(dev, res);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e17b0bee6bd..b7b6ff8be55 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
if (wdev->conn->params.privacy)
capa |= WLAN_CAPABILITY_PRIVACY;
- bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid,
+ bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
+ wdev->conn->params.bssid,
wdev->conn->params.ssid,
wdev->conn->params.ssid_len,
WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
@@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
}
if (!bss)
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ bss = cfg80211_get_bss(wdev->wiphy,
+ wdev->conn ? wdev->conn->params.channel :
+ NULL,
+ bssid,
wdev->ssid, wdev->ssid_len,
WLAN_CAPABILITY_ESS,
WLAN_CAPABILITY_ESS);
@@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
}
EXPORT_SYMBOL(cfg80211_connect_result);
-void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
+void __cfg80211_roamed(struct wireless_dev *wdev,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len)
{
@@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
wdev->ssid, wdev->ssid_len,
WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
#endif
}
-void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
+void cfg80211_roamed(struct net_device *dev,
+ struct ieee80211_channel *channel,
+ const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
{
@@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
return;
ev->type = EVENT_ROAMED;
+ ev->rm.channel = channel;
memcpy(ev->rm.bssid, bssid, ETH_ALEN);
ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
ev->rm.req_ie_len = req_ie_len;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index f0536d44d43..4d7b83fbc32 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -746,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
NULL);
break;
case EVENT_ROAMED:
- __cfg80211_roamed(wdev, ev->rm.bssid,
+ __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
ev->rm.req_ie, ev->rm.req_ie_len,
ev->rm.resp_ie, ev->rm.resp_ie_len);
break;