summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:05 +0200
committerJiri Kosina <jkosina@suse.cz>2011-09-15 15:08:18 +0200
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /net
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/9p/trans_virtio.c17
-rw-r--r--net/atm/atm_misc.c2
-rw-r--r--net/atm/br2684.c9
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/bnep.h1
-rw-r--r--net/bluetooth/bnep/core.c13
-rw-r--r--net/bluetooth/bnep/netdev.c1
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/cmtp/cmtp.h1
-rw-r--r--net/bluetooth/cmtp/core.c20
-rw-r--r--net/bluetooth/hci_core.c8
-rw-r--r--net/bluetooth/hidp/core.c19
-rw-r--r--net/bluetooth/l2cap_core.c6
-rw-r--r--net/bluetooth/l2cap_sock.c30
-rw-r--r--net/bluetooth/rfcomm/core.c17
-rw-r--r--net/bluetooth/rfcomm/sock.c28
-rw-r--r--net/bluetooth/sco.c28
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/bridge/br_if.c12
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_notify.c7
-rw-r--r--net/bridge/netfilter/ebtables.c3
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/ceph/msgpool.c40
-rw-r--r--net/ceph/osd_client.c28
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/flow.c2
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/secure_seq.c184
-rw-r--r--net/core/skbuff.c17
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c9
-rw-r--r--net/decnet/dn_fib.c2
-rw-r--r--net/decnet/dn_neigh.c2
-rw-r--r--net/decnet/dn_table.c2
-rw-r--r--net/decnet/dn_timer.c2
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c16
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_hashtables.c1
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_output.c11
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/netfilter.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/inet6_hashtables.c1
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c9
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/route.c35
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c1
-rw-r--r--net/netfilter/nf_queue.c1
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netlabel/Makefile2
-rw-r--r--net/netlabel/netlabel_addrlist.c2
-rw-r--r--net/netlabel/netlabel_addrlist.h2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_cipso_v4.h2
-rw-r--r--net/netlabel/netlabel_domainhash.c2
-rw-r--r--net/netlabel/netlabel_domainhash.h2
-rw-r--r--net/netlabel/netlabel_kapi.c24
-rw-r--r--net/netlabel/netlabel_mgmt.c4
-rw-r--r--net/netlabel/netlabel_mgmt.h4
-rw-r--r--net/netlabel/netlabel_unlabeled.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.h2
-rw-r--r--net/netlabel/netlabel_user.c2
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/socket.c79
-rw-r--r--net/sunrpc/Kconfig4
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/backchannel_rqst.c7
-rw-r--r--net/sunrpc/bc_svc.c3
-rw-r--r--net/sunrpc/clnt.c15
-rw-r--r--net/sunrpc/sched.c38
-rw-r--r--net/sunrpc/svc.c6
-rw-r--r--net/sunrpc/svcsock.c14
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/sunrpc/xprt.c258
-rw-r--r--net/sunrpc/xprtrdma/transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h4
-rw-r--r--net/sunrpc/xprtsock.c57
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/wireless/core.c7
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/sysfs.c6
-rw-r--r--net/xfrm/xfrm_algo.c4
119 files changed, 940 insertions, 447 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 5f27f8e3025..f1f2f7bb666 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
if (unlikely(!skb))
goto err_free;
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
return skb;
err_free:
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 934e221c1d0..9d40a071d03 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev)
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 175b5135bdc..e317583fcc7 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
int in, out, inp, outp;
struct virtio_chan *chan = client->trans;
- char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
unsigned long flags;
size_t pdata_off = 0;
struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
* Arrange in such a way that server places header in the
* alloced memory and payload onto the user buffer.
*/
- inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+ inp = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, 11);
/*
* Running executables in the filesystem may result in
* a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
}
in += inp;
} else {
- in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
- req->rc->capacity);
+ in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
+ req->rc->sdata, req->rc->capacity);
}
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
.close = p9_virtio_close,
.request = p9_virtio_request,
.cancel = p9_virtio_cancel,
- .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
+
+ /*
+ * We leave one entry for input and one entry for response
+ * headers. We also skip one more entry to accomodate, address
+ * that are not at page boundary, that can result in an extra
+ * page in zero copy.
+ */
+ .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
.pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index fc63526d869..f41f02656ff 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -9,7 +9,7 @@
#include <linux/sonet.h>
#include <linux/bitops.h>
#include <linux/errno.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
int atm_charge(struct atm_vcc *vcc, int truesize)
{
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 2252c2085da..d07223c834a 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -242,8 +242,6 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
if (brdev->payload == p_bridged) {
skb_push(skb, 2);
memset(skb->data, 0, 2);
- } else { /* p_routed */
- skb_pull(skb, ETH_HLEN);
}
}
skb_debug(skb);
@@ -560,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
spin_unlock_irqrestore(&rq->lock, flags);
skb_queue_walk_safe(&queue, skb, tmp) {
- struct net_device *dev = skb->dev;
+ struct net_device *dev;
+
+ br2684_push(atmvcc, skb);
+ dev = skb->dev;
dev->stats.rx_bytes -= skb->len;
dev->stats.rx_packets--;
-
- br2684_push(atmvcc, skb);
}
/* initialize netdev carrier state */
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 4bc8c67ecb1..852394072fa 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
#include <asm/system.h> /* save/restore_flags */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "common.h"
#include "resources.h"
diff --git a/net/atm/common.c b/net/atm/common.c
index 22b963d06a1..14ff9fe3998 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/poll.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ba48daa68c1..215c9fad7cd 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1335,7 +1335,7 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/param.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/inetdevice.h>
#include <net/route.h>
diff --git a/net/atm/proc.c b/net/atm/proc.c
index be3afdefec5..0d020de8d23 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -27,7 +27,7 @@
#include <net/atmclip.h>
#include <linux/uaccess.h>
#include <linux/param.h> /* for HZ */
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "resources.h"
#include "common.h" /* atm_proc_init prototype */
#include "signaling.h" /* to get sigd - ugly too */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 8add9b49991..117e0d16178 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -494,9 +494,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
- set_current_state(TASK_INTERRUPTIBLE);
-
if (!timeo) {
err = -EINPROGRESS;
break;
@@ -510,12 +509,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 8e6c06158f8..e7ee5314f39 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -155,6 +155,7 @@ struct bnep_session {
unsigned int role;
unsigned long state;
unsigned long flags;
+ atomic_t terminate;
struct task_struct *task;
struct ethhdr eh;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index ca39fcf010c..d9edfe8bf9d 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,9 +484,11 @@ static int bnep_session(void *arg)
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&s->terminate))
+ break;
/* RX */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
@@ -504,7 +506,7 @@ static int bnep_session(void *arg)
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req)
down_read(&bnep_session_sem);
s = __bnep_get_session(req->dst);
- if (s)
- kthread_stop(s->task);
- else
+ if (s) {
+ atomic_inc(&s->terminate);
+ wake_up_process(s->task);
+ } else
err = -ENOENT;
up_read(&bnep_session_sem);
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 8c100c9dae2..d4f5dff7c95 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &bnep_netdev_ops;
dev->watchdog_timeo = HZ * 2;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 040f67b1297..50f0d135eb8 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -386,7 +386,8 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
capi_ctr_down(ctrl);
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
}
static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index db43b54ac9a..c32638dddbf 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -81,6 +81,7 @@ struct cmtp_session {
char name[BTNAMSIZ];
+ atomic_t terminate;
struct task_struct *task;
wait_queue_head_t wait;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index c5b11af908b..521baa4fe83 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -292,9 +292,11 @@ static int cmtp_session(void *arg)
init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (atomic_read(&session->terminate))
+ break;
if (sk->sk_state != BT_CONNECTED)
break;
@@ -307,7 +309,7 @@ static int cmtp_session(void *arg)
schedule();
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@@ -380,16 +382,17 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
if (!(session->flags & (1 << CMTP_LOOPBACK))) {
err = cmtp_attach_device(session);
- if (err < 0)
- goto detach;
+ if (err < 0) {
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+ up_write(&cmtp_session_sem);
+ return err;
+ }
}
up_write(&cmtp_session_sem);
return 0;
-detach:
- cmtp_detach_device(session);
-
unlink:
__cmtp_unlink_session(session);
@@ -414,7 +417,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
skb_queue_purge(&session->transmit);
/* Stop session thread */
- kthread_stop(session->task);
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
} else
err = -ENOENT;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ec0bc3f60f2..56943add45c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1209,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg)
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
- clear_bit(HCI_RESET, &hdev->flags);
tasklet_schedule(&hdev->cmd_task);
}
@@ -1327,7 +1326,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
if (!entry) {
- return -ENOMEM;
+ err = -ENOMEM;
goto err;
}
@@ -2408,7 +2407,10 @@ static void hci_cmd_task(unsigned long arg)
if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
- mod_timer(&hdev->cmd_timer,
+ if (test_bit(HCI_RESET, &hdev->flags))
+ del_timer(&hdev->cmd_timer);
+ else
+ mod_timer(&hdev->cmd_timer,
jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
} else {
skb_queue_head(&hdev->cmd_q, skb);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 43b4c2deb7c..fb68f344c34 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -764,6 +764,7 @@ static int hidp_session(void *arg)
up_write(&hidp_session_sem);
+ kfree(session->rd_data);
kfree(session);
return 0;
}
@@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session,
err = input_register_device(input);
if (err < 0) {
- hci_conn_put_device(session->conn);
+ input_free_device(input);
+ session->input = NULL;
return err;
}
@@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
}
err = hid_add_device(session->hid);
- if (err < 0)
- goto err_add_device;
+ if (err < 0) {
+ atomic_inc(&session->terminate);
+ wake_up_process(session->task);
+ up_write(&hidp_session_sem);
+ return err;
+ }
if (session->input) {
hidp_send_ctrl_message(session,
@@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
up_write(&hidp_session_sem);
return 0;
-err_add_device:
- hid_destroy_device(session->hid);
- session->hid = NULL;
- atomic_inc(&session->terminate);
- wake_up_process(session->task);
-
unlink:
hidp_del_timer(session);
@@ -1090,7 +1090,6 @@ purge:
failed:
up_write(&hidp_session_sem);
- input_free_device(session->input);
kfree(session);
return err;
}
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 3204ba8a701..b3bdb482bbe 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1159,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk)
int timeo = HZ/5;
add_wait_queue(sk_sleep(sk), &wait);
- while ((chan->unacked_frames > 0 && chan->conn)) {
- set_current_state(TASK_INTERRUPTIBLE);
-
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (chan->unacked_frames > 0 && chan->conn) {
if (!timeo)
timeo = HZ/5;
@@ -1173,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk)
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
+ set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5c36b3e8739..61f1f623091 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -235,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
@@ -266,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
@@ -993,7 +993,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk->sk_destruct = l2cap_sock_destruct;
- sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
+ sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5759bb7054f..5ba3f6df665 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex);
#define rfcomm_lock() mutex_lock(&rfcomm_mutex)
#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex)
-static unsigned long rfcomm_event;
static LIST_HEAD(session_list);
@@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
- set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
wake_up_process(rfcomm_thread);
}
@@ -2038,19 +2036,18 @@ static int rfcomm_run(void *unused)
rfcomm_add_listener(BDADDR_ANY);
- while (!kthread_should_stop()) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) {
- /* No pending events. Let's sleep.
- * Incoming connections and data will wake us up. */
- schedule();
- }
- set_current_state(TASK_RUNNING);
+
+ if (kthread_should_stop())
+ break;
/* Process stuff */
- clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event);
rfcomm_process_sessions();
+
+ schedule();
}
+ __set_current_state(TASK_RUNNING);
rfcomm_kill_listener();
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8f01e6b11a7..482722bbc7a 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
lock_sock(sk);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
@@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(nsk = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
+ nsk = bt_accept_dequeue(sk, newsock);
+ if (nsk)
+ break;
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
@@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 4c3621b5e0a..8270f05e3f1 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -564,30 +564,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
lock_sock(sk);
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
- goto done;
- }
-
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
- while (!(ch = bt_accept_dequeue(sk, newsock))) {
+ while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!timeo) {
- err = -EAGAIN;
+
+ if (sk->sk_state != BT_LISTEN) {
+ err = -EBADFD;
break;
}
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
+ ch = bt_accept_dequeue(sk, newsock);
+ if (ch)
+ break;
- if (sk->sk_state != BT_LISTEN) {
- err = -EBADFD;
+ if (!timeo) {
+ err = -EAGAIN;
break;
}
@@ -595,8 +591,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
err = sock_intr_errno(timeo);
break;
}
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
}
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e0dfbc151dd..68def3b7fb4 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -21,7 +21,7 @@
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/unaligned.h>
#include "br_private.h"
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 3176e2e13d9..e73815456ad 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
int br_add_bridge(struct net *net, const char *name)
{
struct net_device *dev;
+ int res;
dev = alloc_netdev(sizeof(struct net_bridge), name,
br_dev_setup);
@@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name)
dev_net_set(dev, net);
- return register_netdev(dev);
+ res = register_netdev(dev);
+ if (res)
+ free_netdev(dev);
+ return res;
}
int br_del_bridge(struct net *net, const char *name)
@@ -417,6 +421,7 @@ put_back:
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
+ bool changed_addr;
p = br_port_get_rtnl(dev);
if (!p || p->br != br)
@@ -425,9 +430,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
del_nbp(p);
spin_lock_bh(&br->lock);
- br_stp_recalculate_bridge_id(br);
+ changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
+ if (changed_addr)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
+
netdev_update_features(br->dev);
return 0;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2d85ca7111d..995cbe0ac0b 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
{
struct sk_buff *skb2;
const struct ipv6hdr *ip6h;
- struct icmp6hdr *icmp6h;
+ u8 icmp6_type;
u8 nexthdr;
unsigned len;
int offset;
@@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
__skb_pull(skb2, offset);
skb_reset_transport_header(skb2);
- icmp6h = icmp6_hdr(skb2);
+ icmp6_type = icmp6_hdr(skb2)->icmp6_type;
- switch (icmp6h->icmp6_type) {
+ switch (icmp6_type) {
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
@@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
err = pskb_trim_rcsum(skb2, len);
if (err)
goto out;
+ err = -EINVAL;
}
+ ip6h = ipv6_hdr(skb2);
+
switch (skb2->ip_summed) {
case CHECKSUM_COMPLETE:
- if (!csum_fold(skb2->csum))
+ if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
+ IPPROTO_ICMPV6, skb2->csum))
break;
/*FALLTHROUGH*/
case CHECKSUM_NONE:
- skb2->csum = 0;
- if (skb_checksum_complete(skb2))
+ skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
+ &ip6h->daddr,
+ skb2->len,
+ IPPROTO_ICMPV6, 0));
+ if (__skb_checksum_complete(skb2))
goto out;
}
@@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
BR_INPUT_SKB_CB(skb)->igmp = 1;
- switch (icmp6h->icmp6_type) {
+ switch (icmp6_type) {
case ICMPV6_MGM_REPORT:
{
struct mld_msg *mld;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 6545ee9591d..a76b6213555 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
struct net_device *dev = ptr;
struct net_bridge_port *p;
struct net_bridge *br;
+ bool changed_addr;
int err;
/* register of bridge completed, add sysfs entries */
@@ -57,8 +58,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
- br_stp_recalculate_bridge_id(br);
+ changed_addr = br_stp_recalculate_bridge_id(br);
spin_unlock_bh(&br->lock);
+
+ if (changed_addr)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
+
break;
case NETDEV_CHANGE:
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 2b5ca1a0054..5864cc49136 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1198,7 +1198,8 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
if (table->check && table->check(newinfo, table->valid_hooks)) {
BUGPRINT("The table doesn't like its own initial data, lol\n");
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto free_chainstack;
}
table->private = newinfo;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 78b55f49de7..c340e2e0765 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -486,13 +486,10 @@ static void prepare_write_message(struct ceph_connection *con)
m = list_first_entry(&con->out_queue,
struct ceph_msg, list_head);
con->out_msg = m;
- if (test_bit(LOSSYTX, &con->state)) {
- list_del_init(&m->list_head);
- } else {
- /* put message on sent list */
- ceph_msg_get(m);
- list_move_tail(&m->list_head, &con->out_sent);
- }
+
+ /* put message on sent list */
+ ceph_msg_get(m);
+ list_move_tail(&m->list_head, &con->out_sent);
/*
* only assign outgoing seq # if we haven't sent this message
@@ -1399,6 +1396,7 @@ static void process_ack(struct ceph_connection *con)
break;
dout("got ack for seq %llu type %d at %p\n", seq,
le16_to_cpu(m->hdr.type), m);
+ m->ack_stamp = jiffies;
ceph_msg_remove(m);
}
prepare_read_tag(con);
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index d5f2d97ac05..1f4cb30a42c 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -7,27 +7,37 @@
#include <linux/ceph/msgpool.h>
-static void *alloc_fn(gfp_t gfp_mask, void *arg)
+static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
{
struct ceph_msgpool *pool = arg;
- void *p;
+ struct ceph_msg *msg;
- p = ceph_msg_new(0, pool->front_len, gfp_mask);
- if (!p)
- pr_err("msgpool %s alloc failed\n", pool->name);
- return p;
+ msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+ if (!msg) {
+ dout("msgpool_alloc %s failed\n", pool->name);
+ } else {
+ dout("msgpool_alloc %s %p\n", pool->name, msg);
+ msg->pool = pool;
+ }
+ return msg;
}
-static void free_fn(void *element, void *arg)
+static void msgpool_free(void *element, void *arg)
{
- ceph_msg_put(element);
+ struct ceph_msgpool *pool = arg;
+ struct ceph_msg *msg = element;
+
+ dout("msgpool_release %s %p\n", pool->name, msg);
+ msg->pool = NULL;
+ ceph_msg_put(msg);
}
int ceph_msgpool_init(struct ceph_msgpool *pool,
int front_len, int size, bool blocking, const char *name)
{
+ dout("msgpool %s init\n", name);
pool->front_len = front_len;
- pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+ pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
if (!pool->pool)
return -ENOMEM;
pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
void ceph_msgpool_destroy(struct ceph_msgpool *pool)
{
+ dout("msgpool %s destroy\n", pool->name);
mempool_destroy(pool->pool);
}
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
int front_len)
{
+ struct ceph_msg *msg;
+
if (front_len > pool->front_len) {
- pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+ dout("msgpool_get %s need front %d, pool size is %d\n",
pool->name, front_len, pool->front_len);
WARN_ON(1);
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
return ceph_msg_new(0, front_len, GFP_NOFS);
}
- return mempool_alloc(pool->pool, GFP_NOFS);
+ msg = mempool_alloc(pool->pool, GFP_NOFS);
+ dout("msgpool_get %s %p\n", pool->name, msg);
+ return msg;
}
void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
{
+ dout("msgpool_put %s %p\n", pool->name, msg);
+
/* reset msg front_len; user may have changed it */
msg->front.iov_len = pool->front_len;
msg->hdr.front_len = cpu_to_le32(pool->front_len);
kref_init(&msg->kref); /* retake single ref */
+ mempool_free(msg, pool->pool);
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 7330c2757c0..16836a7df7a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
put_osd(osd);
}
+static void remove_all_osds(struct ceph_osd_client *osdc)
+{
+ dout("__remove_old_osds %p\n", osdc);
+ mutex_lock(&osdc->request_mutex);
+ while (!RB_EMPTY_ROOT(&osdc->osds)) {
+ struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+ struct ceph_osd, o_node);
+ __remove_osd(osdc, osd);
+ }
+ mutex_unlock(&osdc->request_mutex);
+}
+
static void __move_osd_to_lru(struct ceph_osd_client *osdc,
struct ceph_osd *osd)
{
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
list_del_init(&osd->o_osd_lru);
}
-static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
+static void remove_old_osds(struct ceph_osd_client *osdc)
{
struct ceph_osd *osd, *nosd;
dout("__remove_old_osds %p\n", osdc);
mutex_lock(&osdc->request_mutex);
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
- if (!remove_all && time_before(jiffies, osd->lru_ttl))
+ if (time_before(jiffies, osd->lru_ttl))
break;
__remove_osd(osdc, osd);
}
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
struct rb_node *parent = NULL;
struct ceph_osd *osd = NULL;
+ dout("__insert_osd %p osd%d\n", new, new->o_osd);
while (*p) {
parent = *p;
osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1085,9 +1098,15 @@ static void handle_timeout(struct work_struct *work)
req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
r_req_lru_item);
+ /* hasn't been long enough since we sent it? */
if (time_before(jiffies, req->r_stamp + timeout))
break;
+ /* hasn't been long enough since it was acked? */
+ if (req->r_request->ack_stamp == 0 ||
+ time_before(jiffies, req->r_request->ack_stamp + timeout))
+ break;
+
BUG_ON(req == last_req && req->r_stamp == last_stamp);
last_req = req;
last_stamp = req->r_stamp;
@@ -1138,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
dout("osds timeout\n");
down_read(&osdc->map_sem);
- remove_old_osds(osdc, 0);
+ remove_old_osds(osdc);
up_read(&osdc->map_sem);
schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1856,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = NULL;
}
- remove_old_osds(osdc, 1);
- WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
+ remove_all_osds(osdc);
mempool_destroy(osdc->req_mempool);
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
diff --git a/net/core/Makefile b/net/core/Makefile
index 8a04dd22cf7..0d357b1c4e5 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -3,7 +3,7 @@
#
obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
- gen_stats.o gen_estimator.o net_namespace.o
+ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 9444c5cb413..17d67b579be 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4497,10 +4497,10 @@ void __dev_set_rx_mode(struct net_device *dev)
*/
if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1);
- dev->uc_promisc = 1;
+ dev->uc_promisc = true;
} else if (netdev_uc_empty(dev) && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1);
- dev->uc_promisc = 0;
+ dev->uc_promisc = false;
}
if (ops->ndo_set_multicast_list)
diff --git a/net/core/flow.c b/net/core/flow.c
index 990703b8863..bf32c33cad3 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -22,7 +22,7 @@
#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <net/flow.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/security.h>
struct flow_cache_entry {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8fab9b0bb20..1334d7e56f0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1319,11 +1319,15 @@ static void neigh_proxy_process(unsigned long arg)
if (tdif <= 0) {
struct net_device *dev = skb->dev;
+
__skb_unlink(skb, &tbl->proxy_queue);
- if (tbl->proxy_redo && netif_running(dev))
+ if (tbl->proxy_redo && netif_running(dev)) {
+ rcu_read_lock();
tbl->proxy_redo(skb);
- else
+ rcu_read_unlock();
+ } else {
kfree_skb(skb);
+ }
dev_put(dev);
} else if (!sched_next || tdif < sched_next)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index adf84dd8c7b..52622517e0d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb)
if (skb_shared(skb))
goto out;
- iph = (struct iphdr *)skb->data;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
+ iph = (struct iphdr *)skb->data;
if (iph->ihl < 5 || iph->version != 4)
goto out;
if (!pskb_may_pull(skb, iph->ihl*4))
goto out;
+ iph = (struct iphdr *)skb->data;
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
goto out;
@@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (pskb_trim_rcsum(skb, len))
goto out;
+ iph = (struct iphdr *)skb->data;
if (iph->protocol != IPPROTO_UDP)
goto out;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f76079cd750..e35a6fbb811 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1070,7 +1070,9 @@ static ssize_t pktgen_if_write(struct file *file,
len = num_arg(&user_buffer[i], 10, &value);
if (len < 0)
return len;
-
+ if ((value > 0) &&
+ (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+ return -ENOTSUPP;
i += len;
pkt_dev->clone_skb = value;
@@ -3555,7 +3557,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->min_pkt_size = ETH_ZLEN;
pkt_dev->max_pkt_size = ETH_ZLEN;
pkt_dev->nfrags = 0;
- pkt_dev->clone_skb = pg_clone_skb_d;
pkt_dev->delay = pg_delay_d;
pkt_dev->count = pg_count_d;
pkt_dev->sofar = 0;
@@ -3563,7 +3564,6 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->udp_src_max = 9;
pkt_dev->udp_dst_min = 9;
pkt_dev->udp_dst_max = 9;
-
pkt_dev->vlan_p = 0;
pkt_dev->vlan_cfi = 0;
pkt_dev->vlan_id = 0xffff;
@@ -3575,6 +3575,8 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
err = pktgen_setup_dev(pkt_dev, ifname);
if (err)
goto out1;
+ if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)
+ pkt_dev->clone_skb = pg_clone_skb_d;
pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
&pktgen_if_fops, pkt_dev);
diff --git a/net/core/scm.c b/net/core/scm.c
index 4c1ef026d69..811b53fb330 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
goto error;
cred->uid = cred->euid = p->creds.uid;
- cred->gid = cred->egid = p->creds.uid;
+ cred->gid = cred->egid = p->creds.gid;
put_cred(p->cred);
p->cred = cred;
}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
new file mode 100644
index 00000000000..45329d7c9dd
--- /dev/null
+++ b/net/core/secure_seq.c
@@ -0,0 +1,184 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cryptohash.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/random.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/string.h>
+
+#include <net/secure_seq.h>
+
+static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
+
+static int __init net_secret_init(void)
+{
+ get_random_bytes(net_secret, sizeof(net_secret));
+ return 0;
+}
+late_initcall(net_secret_init);
+
+static u32 seq_scale(u32 seq)
+{
+ /*
+ * As close as possible to RFC 793, which
+ * suggests using a 250 kHz clock.
+ * Further reading shows this assumes 2 Mb/s networks.
+ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+ * we also need to limit the resolution so that the u32 seq
+ * overlaps less than one time per MSL (2 minutes).
+ * Choosing a clock of 64 ns period is OK. (period of 274 s)
+ */
+ return seq + (ktime_to_ns(ktime_get_real()) >> 6);
+}
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + daddr[i];
+ secret[4] = net_secret[4] +
+ (((__force u16)sport << 16) + (__force u16)dport);
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ return seq_scale(hash[0]);
+}
+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + (__force u32) daddr[i];
+ secret[4] = net_secret[4] + (__force u32)dport;
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ return hash[0];
+}
+#endif
+
+#ifdef CONFIG_INET
+__u32 secure_ip_id(__be32 daddr)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force __u32) daddr;
+ hash[1] = net_secret[13];
+ hash[2] = net_secret[14];
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+
+__u32 secure_ipv6_id(const __be32 daddr[4])
+{
+ __u32 hash[4];
+
+ memcpy(hash, daddr, 16);
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return seq_scale(hash[0]);
+}
+
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = (__force u32)dport ^ net_secret[14];
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ return hash[0];
+}
+EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
+#endif
+
+#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 hash[MD5_DIGEST_WORDS];
+ u64 seq;
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = net_secret[15];
+
+ md5_transform(hash, net_secret);
+
+ seq = hash[0] | (((u64)hash[1]) << 32);
+ seq += ktime_to_ns(ktime_get_real());
+ seq &= (1ull << 48) - 1;
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_dccp_sequence_number);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ u32 secret[MD5_MESSAGE_BYTES / 4];
+ u32 hash[MD5_DIGEST_WORDS];
+ u64 seq;
+ u32 i;
+
+ memcpy(hash, saddr, 16);
+ for (i = 0; i < 4; i++)
+ secret[i] = net_secret[i] + daddr[i];
+ secret[4] = net_secret[4] +
+ (((__force u16)sport << 16) + (__force u16)dport);
+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
+ secret[i] = net_secret[i];
+
+ md5_transform(hash, secret);
+
+ seq = hash[0] | (((u64)hash[1]) << 32);
+ seq += ktime_to_ns(ktime_get_real());
+ seq &= (1ull << 48) - 1;
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_dccpv6_sequence_number);
+#endif
+#endif
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2beda824636..27002dffe7e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1369,8 +1369,21 @@ pull_pages:
}
EXPORT_SYMBOL(__pskb_pull_tail);
-/* Copy some data bits from skb to kernel buffer. */
-
+/**
+ * skb_copy_bits - copy bits from skb to kernel buffer
+ * @skb: source skb
+ * @offset: offset in source
+ * @to: destination buffer
+ * @len: number of bytes to copy
+ *
+ * Copy the specified number of bytes from the source skb to the
+ * destination buffer.
+ *
+ * CAUTION ! :
+ * If its prototype is ever changed,
+ * check arch/{*}/net/{*}.S files,
+ * since it is called from BPF assembly code.
+ */
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int start = skb_headlen(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8c36adfd191..332639b56f4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -26,6 +26,7 @@
#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
+#include <net/secure_seq.h>
#include "ackvec.h"
#include "ccid.h"
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8dc4348774a..b74f76117dc 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -29,6 +29,7 @@
#include <net/transp_v6.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
+#include <net/secure_seq.h>
#include "dccp.h"
#include "ipv6.h"
@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
}
-static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
- __be16 sport, __be16 dport )
-{
- return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
-}
-
-static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
+static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
{
return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 2bd8e53d777..9e885f180b6 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -30,7 +30,7 @@
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <net/neighbour.h>
#include <net/dst.h>
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 0dc3fe61085..7f0eb087dc1 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -38,7 +38,7 @@
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/jhash.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index cd0354e9bdb..a9a62f225a6 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -25,7 +25,7 @@
#include <linux/netdevice.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/route.h> /* RTF_xxx */
#include <net/neighbour.h>
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index 09825711d58..67f691bd4ac 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -22,7 +22,7 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
#include <net/sock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/flow.h>
#include <net/dn.h>
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 5cffb63f481..27997d35ebd 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -231,6 +231,7 @@ EXPORT_SYMBOL(eth_header_parse);
* eth_header_cache - fill cache entry from neighbour
* @neigh: source neighbour
* @hh: destination cache entry
+ * @type: Ethernet type field
* Create an Ethernet header template from the neighbour.
*/
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
@@ -339,6 +340,7 @@ void ether_setup(struct net_device *dev)
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000; /* Ethernet wants good queues */
dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+ dev->priv_flags = IFF_TX_SKB_SHARING;
memset(dev->broadcast, 0xFF, ETH_ALEN);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2b3c23c287c..2c2a98e402e 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -50,7 +50,7 @@
#include <net/tcp.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/bug.h>
#include <asm/unaligned.h>
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 37b3c188d8b..bc19bd06dd0 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1134,15 +1134,15 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev,
struct in_device *in_dev)
{
- struct in_ifaddr *ifa = in_dev->ifa_list;
-
- if (!ifa)
- return;
+ struct in_ifaddr *ifa;
- arp_send(ARPOP_REQUEST, ETH_P_ARP,
- ifa->ifa_local, dev,
- ifa->ifa_local, NULL,
- dev->dev_addr, NULL);
+ for (ifa = in_dev->ifa_list; ifa;
+ ifa = ifa->ifa_next) {
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ ifa->ifa_local, dev,
+ ifa->ifa_local, NULL,
+ dev->dev_addr, NULL);
+ }
}
/* Called only under RTNL semaphore */
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f1d27f6c935..d577199eabd 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
break;
for (i=0; i<nsrcs; i++) {
/* skip inactive filters */
- if (pmc->sfcount[MCAST_INCLUDE] ||
+ if (psf->sf_count[MCAST_INCLUDE] ||
pmc->sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
continue;
@@ -1718,7 +1718,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
- (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
+ (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 3c0369a3a66..984ec656b03 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -21,6 +21,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
+#include <net/secure_seq.h>
#include <net/ip.h>
/*
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index e38213817d0..86f13c67ea8 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -19,6 +19,7 @@
#include <linux/net.h>
#include <net/ip.h>
#include <net/inetpeer.h>
+#include <net/secure_seq.h>
/*
* Theory of operations.
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ccaaa851ab4..8c6563361ab 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -122,6 +122,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb)
newskb->pkt_type = PACKET_LOOPBACK;
newskb->ip_summed = CHECKSUM_UNNECESSARY;
WARN_ON(!skb_dst(newskb));
+ skb_dst_force(newskb);
netif_rx_ni(newskb);
return 0;
}
@@ -204,9 +205,15 @@ static inline int ip_finish_output2(struct sk_buff *skb)
skb = skb2;
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
- if (neigh)
- return neigh_output(neigh, skb);
+ if (neigh) {
+ int res = neigh_output(neigh, skb);
+
+ rcu_read_unlock();
+ return res;
+ }
+ rcu_read_unlock();
if (net_ratelimit())
printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ab0c9efd1ef..8905e92f896 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1067,7 +1067,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
*/
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+ char __user *optval, int __user *optlen, unsigned flags)
{
struct inet_sock *inet = inet_sk(sk);
int val;
@@ -1240,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
msg.msg_control = optval;
msg.msg_controllen = len;
- msg.msg_flags = 0;
+ msg.msg_flags = flags;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
@@ -1294,7 +1294,7 @@ int ip_getsockopt(struct sock *sk, int level,
{
int err;
- err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+ err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
@@ -1327,7 +1327,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ip_getsockopt);
- err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+ err = do_ip_getsockopt(sk, level, optname, optval, optlen,
+ MSG_CMSG_COMPAT);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 2e97e3ec1eb..929b27bdeb7 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
struct rtable *rt;
struct flowi4 fl4 = {};
__be32 saddr = iph->saddr;
- __u8 flags = 0;
+ __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
unsigned int hh_len;
- if (!skb->sk && addr_type != RTN_LOCAL) {
- if (addr_type == RTN_UNSPEC)
- addr_type = inet_addr_type(net, saddr);
- if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
- flags |= FLOWI_FLAG_ANYSRC;
- else
- saddr = 0;
- }
+ if (addr_type == RTN_UNSPEC)
+ addr_type = inet_addr_type(net, saddr);
+ if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
+ flags |= FLOWI_FLAG_ANYSRC;
+ else
+ saddr = 0;
/* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
* packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
@@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
fl4.flowi4_mark = skb->mark;
- fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
+ fl4.flowi4_flags = flags;
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return -1;
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 3e61faf23a9..f52d41ea069 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
#include <linux/ip.h>
#include <linux/netfilter.h>
+#include <net/secure_seq.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_rule.h>
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 08526786dc3..61714bd5292 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -38,7 +38,7 @@
*/
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
- FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
+ inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+ daddr, saddr, 0, 0);
if (!inet->hdrincl) {
err = raw_probe_proto_opt(&fl4, msg);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1730689f560..075212e41b8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
#include <linux/sysctl.h>
#endif
#include <net/atmclip.h>
+#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
@@ -721,7 +722,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1,
{
return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
- (rt1->rt_iif ^ rt2->rt_iif)) == 0);
+ (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
}
static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
@@ -730,8 +731,8 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
(rt1->rt_mark ^ rt2->rt_mark) |
(rt1->rt_key_tos ^ rt2->rt_key_tos) |
- (rt1->rt_oif ^ rt2->rt_oif) |
- (rt1->rt_iif ^ rt2->rt_iif)) == 0;
+ (rt1->rt_route_iif ^ rt2->rt_route_iif) |
+ (rt1->rt_oif ^ rt2->rt_oif)) == 0;
}
static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
@@ -1628,16 +1629,18 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{
struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway;
- struct neighbour *n;
+ struct neighbour *n, *old_n;
dst_confirm(&rt->dst);
- neigh_release(dst_get_neighbour(&rt->dst));
- dst_set_neighbour(&rt->dst, NULL);
-
rt->rt_gateway = peer->redirect_learned.a4;
- rt_bind_neighbour(rt);
- n = dst_get_neighbour(&rt->dst);
+
+ n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ old_n = xchg(&rt->dst._neighbour, n);
+ if (old_n)
+ neigh_release(old_n);
if (!n || !(n->nud_state & NUD_VALID)) {
if (n)
neigh_event_send(n, NULL);
@@ -2317,8 +2320,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth = rcu_dereference(rth->dst.rt_next)) {
if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
- (rth->rt_iif ^ iif) |
- rth->rt_oif |
+ (rth->rt_route_iif ^ iif) |
(rth->rt_key_tos ^ tos)) == 0 &&
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 92bb9434b33..3bc5c8f7c71 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
int mss;
struct rtable *rt;
__u8 rcv_wscale;
- bool ecn_ok;
+ bool ecn_ok = false;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 955b8e65b69..1c12b8ec849 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -72,6 +72,7 @@
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/netdma.h>
+#include <net/secure_seq.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a06c53c14d8..f012ebd87b4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -656,7 +656,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
* layer address of our nexhop router
*/
- if (dst_get_neighbour(&rt->dst) == NULL)
+ if (dst_get_neighbour_raw(&rt->dst) == NULL)
ifa->flags &= ~IFA_F_OPTIMISTIC;
ifa->idev = idev;
@@ -1481,6 +1481,8 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
+ if (ifp->prefix_len == 127) /* RFC 6164 */
+ return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 16560336eb7..9ef1831746e 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -33,6 +33,11 @@
#include <linux/errqueue.h>
#include <asm/uaccess.h>
+static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
+{
+ return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
+}
+
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
@@ -102,10 +107,12 @@ ipv4_connected:
ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
- if (ipv6_addr_any(&np->saddr))
+ if (ipv6_addr_any(&np->saddr) ||
+ ipv6_mapped_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr)) {
+ if (ipv6_addr_any(&np->rcv_saddr) ||
+ ipv6_mapped_addr_any(&np->rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
if (sk->sk_prot->rehash)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b5319723370..73f1a00a96a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -20,6 +20,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
+#include <net/secure_seq.h>
#include <net/ip.h>
int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 54a4678955b..320d91d20ad 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
RT6_TRACE("aging clone %p\n", rt);
return -1;
} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
- (!(dst_get_neighbour(&rt->dst)->flags & NTF_ROUTER))) {
+ (!(dst_get_neighbour_raw(&rt->dst)->flags & NTF_ROUTER))) {
RT6_TRACE("purging route %p via non-router but gateway\n",
rt);
return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 32e5339db0c..4c882cf4e8a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -135,10 +135,15 @@ static int ip6_finish_output2(struct sk_buff *skb)
skb->len);
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
- if (neigh)
- return neigh_output(neigh, skb);
+ if (neigh) {
+ int res = neigh_output(neigh, skb);
+ rcu_read_unlock();
+ return res;
+ }
+ rcu_read_unlock();
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
@@ -975,12 +980,14 @@ static int ip6_dst_lookup_tail(struct sock *sk,
* dst entry and replace it instead with the
* dst entry of the nexthop router
*/
+ rcu_read_lock();
n = dst_get_neighbour(*dst);
if (n && !(n->nud_state & NUD_VALID)) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
int redirect;
+ rcu_read_unlock();
ifp = ipv6_get_ifaddr(net, &fl6->saddr,
(*dst)->dev, 1);
@@ -1000,6 +1007,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
if ((err = (*dst)->error))
goto out_err_release;
}
+ } else {
+ rcu_read_unlock();
}
#endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36c2842a86b..0bc98886c38 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <net/icmp.h>
#include <net/ip.h>
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 9cb191ecaba..147ede38ab4 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
}
static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
- char __user *optval, int __user *optlen)
+ char __user *optval, int __user *optlen, unsigned flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
int len;
@@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
msg.msg_control = optval;
msg.msg_controllen = len;
- msg.msg_flags = 0;
+ msg.msg_flags = flags;
lock_sock(sk);
skb = np->pktoptions;
@@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
if(level != SOL_IPV6)
return -ENOPROTOOPT;
- err = do_ipv6_getsockopt(sk, level, optname, optval, optlen);
+ err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
@@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ipv6_getsockopt);
- err = do_ipv6_getsockopt(sk, level, optname, optval, optlen);
+ err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
+ MSG_CMSG_COMPAT);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 3e6ebcdb477..ee7839f4d6e 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1059,7 +1059,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
break;
for (i=0; i<nsrcs; i++) {
/* skip inactive filters */
- if (pmc->mca_sfcount[MCAST_INCLUDE] ||
+ if (psf->sf_count[MCAST_INCLUDE] ||
pmc->mca_sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
continue;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8987da0666..9e69eb0ec6d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,7 +364,7 @@ out:
#ifdef CONFIG_IPV6_ROUTER_PREF
static void rt6_probe(struct rt6_info *rt)
{
- struct neighbour *neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
+ struct neighbour *neigh;
/*
* Okay, this does not seem to be appropriate
* for now, however, we need to check if it
@@ -373,8 +373,10 @@ static void rt6_probe(struct rt6_info *rt)
* Router Reachability Probe MUST be rate-limited
* to no more than one per minute.
*/
+ rcu_read_lock();
+ neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
- return;
+ goto out;
read_lock_bh(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
@@ -387,8 +389,11 @@ static void rt6_probe(struct rt6_info *rt)
target = (struct in6_addr *)&neigh->primary_key;
addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
- } else
+ } else {
read_unlock_bh(&neigh->lock);
+ }
+out:
+ rcu_read_unlock();
}
#else
static inline void rt6_probe(struct rt6_info *rt)
@@ -412,8 +417,11 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
static inline int rt6_check_neigh(struct rt6_info *rt)
{
- struct neighbour *neigh = dst_get_neighbour(&rt->dst);
+ struct neighbour *neigh;
int m;
+
+ rcu_read_lock();
+ neigh = dst_get_neighbour(&rt->dst);
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
@@ -430,6 +438,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
read_unlock_bh(&neigh->lock);
} else
m = 0;
+ rcu_read_unlock();
return m;
}
@@ -769,7 +778,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
rt->rt6i_dst.plen = 128;
rt->rt6i_flags |= RTF_CACHE;
rt->dst.flags |= DST_HOST;
- dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour(&ort->dst)));
+ dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst)));
}
return rt;
}
@@ -803,7 +812,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -1587,7 +1596,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
dst_confirm(&rt->dst);
/* Duplicate redirect: silently ignore. */
- if (neigh == dst_get_neighbour(&rt->dst))
+ if (neigh == dst_get_neighbour_raw(&rt->dst))
goto out;
nrt = ip6_rt_copy(rt, dest);
@@ -1682,7 +1691,7 @@ again:
1. It is connected route. Action: COW
2. It is gatewayed route or NONEXTHOP route. Action: clone it.
*/
- if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!dst_get_neighbour_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, daddr, saddr);
else
nrt = rt6_alloc_clone(rt, daddr);
@@ -2326,6 +2335,7 @@ static int rt6_fill_node(struct net *net,
struct nlmsghdr *nlh;
long expires;
u32 table;
+ struct neighbour *n;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2414,8 +2424,11 @@ static int rt6_fill_node(struct net *net,
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
goto nla_put_failure;
- if (dst_get_neighbour(&rt->dst))
- NLA_PUT(skb, RTA_GATEWAY, 16, &dst_get_neighbour(&rt->dst)->primary_key);
+ rcu_read_lock();
+ n = dst_get_neighbour(&rt->dst);
+ if (n)
+ NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
+ rcu_read_unlock();
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2608,12 +2621,14 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
#else
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
+ rcu_read_lock();
n = dst_get_neighbour(&rt->dst);
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
seq_puts(m, "00000000000000000000000000000000");
}
+ rcu_read_unlock();
seq_printf(m, " %08x %08x %08x %08x %8s\n",
rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
rt->dst.__use, rt->rt6i_flags,
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 07bf1085458..00b15ac7a70 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (skb->protocol != htons(ETH_P_IPV6))
goto tx_error;
+ if (tos == 1)
+ tos = ipv6_get_dsfield(iph6);
+
/* ISATAP (RFC4214) - must come before 6to4 */
if (dev->priv_flags & IFF_ISATAP) {
struct neighbour *neigh = NULL;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 89d5bf80622..ac838965ff3 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
int mss;
struct dst_entry *dst;
__u8 rcv_wscale;
- bool ecn_ok;
+ bool ecn_ok = false;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 78aa53492b3..d1fb63f4aeb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -61,6 +61,7 @@
#include <net/timewait_sock.h>
#include <net/netdma.h>
#include <net/inet_common.h>
+#include <net/secure_seq.h>
#include <asm/uaccess.h>
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f2b713847b4..075a3808aa4 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -51,7 +51,7 @@
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ed8a2335442..ad4ac2601a5 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -55,7 +55,7 @@
#include <net/protocol.h>
#include <asm/byteorder.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "l2tp_core.h"
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index a8193f52c13..d2726a74597 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = {
static void l2tp_eth_dev_setup(struct net_device *dev)
{
ether_setup(dev);
-
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->destructor = free_netdev;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 39a21d0c61c..f42cd091596 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -97,7 +97,7 @@
#include <net/xfrm.h>
#include <asm/byteorder.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "l2tp_core.h"
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index cd5fb40d3fd..556e7e6ddf0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->netdev_ops = &ieee80211_dataif_ops;
dev->destructor = free_netdev;
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 866f269183c..acb44230b25 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1012,7 +1012,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
cancel_work_sync(&local->reconfig_filter);
ieee80211_clear_tx_pending(local);
- sta_info_stop(local);
rate_control_deinitialize(local);
if (skb_queue_len(&local->skb_queue) ||
@@ -1024,6 +1023,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
+ sta_info_stop(local);
ieee80211_wep_free(local);
ieee80211_led_exit(local);
kfree(local->int_scan_req);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index be43fd805bd..2b771dc708a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3771,6 +3771,7 @@ err_sock:
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
+ unregister_netdevice_notifier(&ip_vs_dst_notifier);
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5b466cd1272..84d0fd47636 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -312,6 +312,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
}
break;
case NF_STOLEN:
+ break;
default:
kfree_skb(skb);
}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 2e7ccbb43dd..2d8158acf6f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -33,7 +33,7 @@
#include <net/netfilter/nf_log.h>
#include <net/netfilter/nfnetlink_log.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_BRIDGE_NETFILTER
#include "../bridge/br_private.h"
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 49132bddd73..00bd475eab4 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -31,7 +31,7 @@
#include <net/sock.h>
#include <net/netfilter/nf_queue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_BRIDGE_NETFILTER
#include "../bridge/br_private.h"
diff --git a/net/netlabel/Makefile b/net/netlabel/Makefile
index ea750e9df65..d2732fc952e 100644
--- a/net/netlabel/Makefile
+++ b/net/netlabel/Makefile
@@ -1,8 +1,6 @@
#
# Makefile for the NetLabel subsystem.
#
-# Feb 9, 2006, Paul Moore <paul.moore@hp.com>
-#
# base objects
obj-y := netlabel_user.o netlabel_kapi.o
diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c
index c0519139679..96b749dacc3 100644
--- a/net/netlabel/netlabel_addrlist.c
+++ b/net/netlabel/netlabel_addrlist.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 2b9644e19de..fdbc1d2c735 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index bae5756b162..6bf878335d9 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -39,7 +39,7 @@
#include <net/genetlink.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_user.h"
#include "netlabel_cipso_v4.h"
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index af7f3355103..d24d774bfd6 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 2aa975e5452..7d8083cde34 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 0261dda3f2d..bfcc0f7024c 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -6,7 +6,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 1b83e0009d8..9c24de10a65 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -5,7 +5,7 @@
* system manages static and dynamic label mappings for network protocols such
* as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -39,7 +39,7 @@
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <asm/bug.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_domainhash.h"
#include "netlabel_unlabeled.h"
@@ -341,11 +341,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
- return -ENOMEM;
+ goto out_entry;
if (domain != NULL) {
entry->domain = kstrdup(domain, GFP_ATOMIC);
if (entry->domain == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_domain;
}
if (addr == NULL && mask == NULL) {
@@ -354,13 +354,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrmap;
INIT_LIST_HEAD(&addrmap->list4);
INIT_LIST_HEAD(&addrmap->list6);
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrinfo;
addrinfo->type_def.cipsov4 = doi_def;
addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
@@ -374,7 +374,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
entry->type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
- goto cfg_cipsov4_map_add_failure;
+ goto out_addrmap;
}
ret_val = netlbl_domhsh_add(entry, audit_info);
@@ -384,11 +384,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
return 0;
cfg_cipsov4_map_add_failure:
- cipso_v4_doi_putdef(doi_def);
+ kfree(addrinfo);
+out_addrinfo:
+ kfree(addrmap);
+out_addrmap:
kfree(entry->domain);
+out_domain:
kfree(entry);
- kfree(addrmap);
- kfree(addrinfo);
+out_entry:
+ cipso_v4_doi_putdef(doi_def);
return ret_val;
}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 4f251b19fbc..bfa55586977 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -42,7 +42,7 @@
#include <net/ipv6.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_domainhash.h"
#include "netlabel_user.h"
diff --git a/net/netlabel/netlabel_mgmt.h b/net/netlabel/netlabel_mgmt.h
index 0a25838bcf4..5a9f31ce579 100644
--- a/net/netlabel/netlabel_mgmt.h
+++ b/net/netlabel/netlabel_mgmt.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -32,7 +32,7 @@
#define _NETLABEL_MGMT_H
#include <net/netlabel.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/*
* The following NetLabel payloads are supported by the management interface.
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 9a290ef5c17..e6e823656f9 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -52,7 +52,7 @@
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <asm/bug.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "netlabel_user.h"
#include "netlabel_addrlist.h"
diff --git a/net/netlabel/netlabel_unlabeled.h b/net/netlabel/netlabel_unlabeled.h
index 0bc8dc3f9e3..700af49022a 100644
--- a/net/netlabel/netlabel_unlabeled.h
+++ b/net/netlabel/netlabel_unlabeled.h
@@ -5,7 +5,7 @@
* NetLabel system. The NetLabel system manages static and dynamic label
* mappings for network protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index a3fd75ac3fa..9fae63f1029 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index f4fc4c9ad56..81969785e27 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -5,7 +5,7 @@
* NetLabel system manages static and dynamic label mappings for network
* protocols such as CIPSO and RIPSO.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 102fc212cd6..e051398fdf6 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
skb2->skb_iif = skb->dev->ifindex;
skb2->dev = dev;
- dev_queue_xmit(skb2);
- err = 0;
+ err = dev_queue_xmit(skb2);
out:
if (err) {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2a318f2dc3e..b5d56a22b1d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
- struct sk_buff *skb = qdisc->dequeue(qdisc);
+ struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4536ee64383..4f5510e2bd6 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -410,7 +410,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
- return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+ if (qlen != slot->qlen)
+ return NET_XMIT_CN;
+
+ /* As we dropped a packet, better let upper stack know this */
+ qdisc_tree_decrease_qlen(sch, 1);
+ return NET_XMIT_SUCCESS;
}
static struct sk_buff *
diff --git a/net/socket.c b/net/socket.c
index 02dc82db3d2..ffe92ca32f2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -467,7 +467,7 @@ static struct socket *sock_alloc(void)
struct inode *inode;
struct socket *sock;
- inode = new_inode(sock_mnt->mnt_sb);
+ inode = new_inode_pseudo(sock_mnt->mnt_sb);
if (!inode)
return NULL;
@@ -580,7 +580,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
}
EXPORT_SYMBOL(sock_sendmsg);
-int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
+static int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
{
struct kiocb iocb;
struct sock_iocb siocb;
@@ -1871,8 +1871,14 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
+struct used_address {
+ struct sockaddr_storage name;
+ unsigned int name_len;
+};
+
static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
- struct msghdr *msg_sys, unsigned flags, int nosec)
+ struct msghdr *msg_sys, unsigned flags,
+ struct used_address *used_address)
{
struct compat_msghdr __user *msg_compat =
(struct compat_msghdr __user *)msg;
@@ -1953,8 +1959,30 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
if (sock->file->f_flags & O_NONBLOCK)
msg_sys->msg_flags |= MSG_DONTWAIT;
- err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys,
- total_len);
+ /*
+ * If this is sendmmsg() and current destination address is same as
+ * previously succeeded address, omit asking LSM's decision.
+ * used_address->name_len is initialized to UINT_MAX so that the first
+ * destination address never matches.
+ */
+ if (used_address && msg_sys->msg_name &&
+ used_address->name_len == msg_sys->msg_namelen &&
+ !memcmp(&used_address->name, msg_sys->msg_name,
+ used_address->name_len)) {
+ err = sock_sendmsg_nosec(sock, msg_sys, total_len);
+ goto out_freectl;
+ }
+ err = sock_sendmsg(sock, msg_sys, total_len);
+ /*
+ * If this is sendmmsg() and sending to current destination address was
+ * successful, remember it.
+ */
+ if (used_address && err >= 0) {
+ used_address->name_len = msg_sys->msg_namelen;
+ if (msg_sys->msg_name)
+ memcpy(&used_address->name, msg_sys->msg_name,
+ used_address->name_len);
+ }
out_freectl:
if (ctl_buf != ctl)
@@ -1979,7 +2007,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
if (!sock)
goto out;
- err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0);
+ err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
fput_light(sock->file, fput_needed);
out:
@@ -1998,6 +2026,10 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
struct mmsghdr __user *entry;
struct compat_mmsghdr __user *compat_entry;
struct msghdr msg_sys;
+ struct used_address used_address;
+
+ if (vlen > UIO_MAXIOV)
+ vlen = UIO_MAXIOV;
datagrams = 0;
@@ -2005,27 +2037,22 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
if (!sock)
return err;
- err = sock_error(sock->sk);
- if (err)
- goto out_put;
-
+ used_address.name_len = UINT_MAX;
entry = mmsg;
compat_entry = (struct compat_mmsghdr __user *)mmsg;
+ err = 0;
while (datagrams < vlen) {
- /*
- * No need to ask LSM for more than the first datagram.
- */
if (MSG_CMSG_COMPAT & flags) {
err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
- &msg_sys, flags, datagrams);
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = __put_user(err, &compat_entry->msg_len);
++compat_entry;
} else {
err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
- &msg_sys, flags, datagrams);
+ &msg_sys, flags, &used_address);
if (err < 0)
break;
err = put_user(err, &entry->msg_len);
@@ -2037,29 +2064,11 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
++datagrams;
}
-out_put:
fput_light(sock->file, fput_needed);
- if (err == 0)
- return datagrams;
-
- if (datagrams != 0) {
- /*
- * We may send less entries than requested (vlen) if the
- * sock is non blocking...
- */
- if (err != -EAGAIN) {
- /*
- * ... or if sendmsg returns an error after we
- * send some datagrams, where we record the
- * error to return on the next call or if the
- * app asks about it using getsockopt(SO_ERROR).
- */
- sock->sk->sk_err = -err;
- }
-
+ /* We only return an error if no datagrams were able to be sent */
+ if (datagrams != 0)
return datagrams;
- }
return err;
}
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index b2198e65d8b..ffd243d0918 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -4,6 +4,10 @@ config SUNRPC
config SUNRPC_GSS
tristate
+config SUNRPC_BACKCHANNEL
+ bool
+ depends on SUNRPC
+
config SUNRPC_XPRT_RDMA
tristate
depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 9d2fca5ad14..8209a0411bc 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,6 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
addr.o rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
-sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
+sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o
sunrpc-$(CONFIG_PROC_FS) += stats.o
sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index cf06af3b63c..91eaa26e4c4 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -29,8 +29,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define RPCDBG_FACILITY RPCDBG_TRANS
#endif
-#if defined(CONFIG_NFS_V4_1)
-
/*
* Helper routines that track the number of preallocation elements
* on the transport.
@@ -174,7 +172,7 @@ out_free:
dprintk("RPC: setup backchannel transport failed\n");
return -1;
}
-EXPORT_SYMBOL(xprt_setup_backchannel);
+EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
/*
* Destroys the backchannel preallocated structures.
@@ -204,7 +202,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
dprintk("RPC: backchannel list empty= %s\n",
list_empty(&xprt->bc_pa_list) ? "true" : "false");
}
-EXPORT_SYMBOL(xprt_destroy_backchannel);
+EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
/*
* One or more rpc_rqst structure have been preallocated during the
@@ -279,4 +277,3 @@ void xprt_free_bc_request(struct rpc_rqst *req)
spin_unlock_bh(&xprt->bc_pa_lock);
}
-#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 1dd1a689000..0b2eb388cbd 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -27,8 +27,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* reply over an existing open connection previously established by the client.
*/
-#if defined(CONFIG_NFS_V4_1)
-
#include <linux/module.h>
#include <linux/sunrpc/xprt.h>
@@ -63,4 +61,3 @@ int bc_send(struct rpc_rqst *req)
return ret;
}
-#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c50818f0473..c5347d29cfb 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -64,9 +64,9 @@ static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
static void call_bc_transmit(struct rpc_task *task);
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
@@ -715,7 +715,7 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
}
EXPORT_SYMBOL_GPL(rpc_call_async);
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/**
* rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
* rpc_execute against it
@@ -758,7 +758,7 @@ out:
dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
return task;
}
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
void
rpc_call_start(struct rpc_task *task)
@@ -1361,7 +1361,7 @@ call_transmit_status(struct rpc_task *task)
}
}
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/*
* 5b. Send the backchannel RPC reply. On error, drop the reply. In
* addition, disconnect on connectivity errors.
@@ -1425,7 +1425,7 @@ call_bc_transmit(struct rpc_task *task)
}
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
}
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/*
* 6. Sort out the RPC call status
@@ -1550,8 +1550,7 @@ call_decode(struct rpc_task *task)
kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
- dprintk("RPC: %5u call_decode (status %d)\n",
- task->tk_pid, task->tk_status);
+ dprint_status(task);
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
if (clnt->cl_chatty)
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4814e246a87..d12ffa54581 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -97,14 +97,16 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
/*
* Add new request to a priority queue.
*/
-static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
+static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ unsigned char queue_priority)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
- q = &queue->tasks[task->tk_priority];
- if (unlikely(task->tk_priority > queue->maxpriority))
+ q = &queue->tasks[queue_priority];
+ if (unlikely(queue_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
@@ -123,12 +125,14 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
-static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
+static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ unsigned char queue_priority)
{
BUG_ON (RPC_IS_QUEUED(task));
if (RPC_IS_PRIORITY(queue))
- __rpc_add_wait_queue_priority(queue, task);
+ __rpc_add_wait_queue_priority(queue, task, queue_priority);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
@@ -311,13 +315,15 @@ static void rpc_make_runnable(struct rpc_task *task)
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
-static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
- rpc_action action)
+static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
+ struct rpc_task *task,
+ rpc_action action,
+ unsigned char queue_priority)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
- __rpc_add_wait_queue(q, task);
+ __rpc_add_wait_queue(q, task, queue_priority);
BUG_ON(task->tk_callback != NULL);
task->tk_callback = action;
@@ -334,11 +340,25 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
- __rpc_sleep_on(q, task, action);
+ __rpc_sleep_on_priority(q, task, action, task->tk_priority);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
+void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
+ rpc_action action, int priority)
+{
+ /* We shouldn't ever put an inactive task to sleep */
+ BUG_ON(!RPC_IS_ACTIVATED(task));
+
+ /*
+ * Protect the queue operations.
+ */
+ spin_lock_bh(&q->lock);
+ __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
+ spin_unlock_bh(&q->lock);
+}
+
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
* @queue: wait queue
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 2b90292e950..6a69a1131fb 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1252,7 +1252,7 @@ svc_process(struct svc_rqst *rqstp)
}
}
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/*
* Process a backchannel RPC request that arrived over an existing
* outbound connection
@@ -1300,8 +1300,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
return 0;
}
}
-EXPORT_SYMBOL(bc_svc_process);
-#endif /* CONFIG_NFS_V4_1 */
+EXPORT_SYMBOL_GPL(bc_svc_process);
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/*
* Return (transport-specific) limit on the rpc payload.
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f2cb5b881de..767d494de7a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -68,12 +68,12 @@ static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
static void svc_bc_sock_free(struct svc_xprt *xprt);
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
@@ -1243,7 +1243,7 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
struct net *, struct sockaddr *,
int, int);
@@ -1284,7 +1284,7 @@ static void svc_cleanup_bc_xprt_sock(void)
{
svc_unreg_xprt_class(&svc_tcp_bc_class);
}
-#else /* CONFIG_NFS_V4_1 */
+#else /* CONFIG_SUNRPC_BACKCHANNEL */
static void svc_init_bc_xprt_sock(void)
{
}
@@ -1292,7 +1292,7 @@ static void svc_init_bc_xprt_sock(void)
static void svc_cleanup_bc_xprt_sock(void)
{
}
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_create = svc_tcp_create,
@@ -1623,7 +1623,7 @@ static void svc_sock_free(struct svc_xprt *xprt)
kfree(svsk);
}
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/*
* Create a back channel svc_xprt which shares the fore channel socket.
*/
@@ -1662,4 +1662,4 @@ static void svc_bc_sock_free(struct svc_xprt *xprt)
if (xprt)
kfree(container_of(xprt, struct svc_sock, sk_xprt));
}
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index f008c14ad34..277ebd4bf09 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -126,7 +126,7 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
kaddr[buf->page_base + len] = '\0';
kunmap_atomic(kaddr, KM_USER0);
}
-EXPORT_SYMBOL(xdr_terminate_string);
+EXPORT_SYMBOL_GPL(xdr_terminate_string);
void
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index ce5eb68a966..f4385e45a5f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -62,6 +62,7 @@
/*
* Local functions
*/
+static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
@@ -186,15 +187,16 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
/**
* xprt_reserve_xprt - serialize write access to transports
* @task: task that is requesting access to the transport
+ * @xprt: pointer to the target transport
*
* This prevents mixing the payload of separate requests, and prevents
* transport connects from colliding with writes. No congestion control
* is provided.
*/
-int xprt_reserve_xprt(struct rpc_task *task)
+int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_xprt *xprt = req->rq_xprt;
+ int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
@@ -202,8 +204,10 @@ int xprt_reserve_xprt(struct rpc_task *task)
goto out_sleep;
}
xprt->snd_task = task;
- req->rq_bytes_sent = 0;
- req->rq_ntrans++;
+ if (req != NULL) {
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
+ }
return 1;
@@ -212,10 +216,13 @@ out_sleep:
task->tk_pid, xprt);
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (req->rq_ntrans)
- rpc_sleep_on(&xprt->resend, task, NULL);
+ if (req == NULL)
+ priority = RPC_PRIORITY_LOW;
+ else if (!req->rq_ntrans)
+ priority = RPC_PRIORITY_NORMAL;
else
- rpc_sleep_on(&xprt->sending, task, NULL);
+ priority = RPC_PRIORITY_HIGH;
+ rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
@@ -239,22 +246,24 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
* integrated into the decision of whether a request is allowed to be
* woken up and given access to the transport.
*/
-int xprt_reserve_xprt_cong(struct rpc_task *task)
+int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
+ int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
goto out_sleep;
}
+ if (req == NULL) {
+ xprt->snd_task = task;
+ return 1;
+ }
if (__xprt_get_cong(xprt, task)) {
xprt->snd_task = task;
- if (req) {
- req->rq_bytes_sent = 0;
- req->rq_ntrans++;
- }
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
return 1;
}
xprt_clear_locked(xprt);
@@ -262,10 +271,13 @@ out_sleep:
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (req && req->rq_ntrans)
- rpc_sleep_on(&xprt->resend, task, NULL);
+ if (req == NULL)
+ priority = RPC_PRIORITY_LOW;
+ else if (!req->rq_ntrans)
+ priority = RPC_PRIORITY_NORMAL;
else
- rpc_sleep_on(&xprt->sending, task, NULL);
+ priority = RPC_PRIORITY_HIGH;
+ rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -275,7 +287,7 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
int retval;
spin_lock_bh(&xprt->transport_lock);
- retval = xprt->ops->reserve_xprt(task);
+ retval = xprt->ops->reserve_xprt(xprt, task);
spin_unlock_bh(&xprt->transport_lock);
return retval;
}
@@ -288,12 +300,9 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
- task = rpc_wake_up_next(&xprt->resend);
- if (!task) {
- task = rpc_wake_up_next(&xprt->sending);
- if (!task)
- goto out_unlock;
- }
+ task = rpc_wake_up_next(&xprt->sending);
+ if (task == NULL)
+ goto out_unlock;
req = task->tk_rqstp;
xprt->snd_task = task;
@@ -310,24 +319,25 @@ out_unlock:
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
struct rpc_task *task;
+ struct rpc_rqst *req;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
if (RPCXPRT_CONGESTED(xprt))
goto out_unlock;
- task = rpc_wake_up_next(&xprt->resend);
- if (!task) {
- task = rpc_wake_up_next(&xprt->sending);
- if (!task)
- goto out_unlock;
+ task = rpc_wake_up_next(&xprt->sending);
+ if (task == NULL)
+ goto out_unlock;
+
+ req = task->tk_rqstp;
+ if (req == NULL) {
+ xprt->snd_task = task;
+ return;
}
if (__xprt_get_cong(xprt, task)) {
- struct rpc_rqst *req = task->tk_rqstp;
xprt->snd_task = task;
- if (req) {
- req->rq_bytes_sent = 0;
- req->rq_ntrans++;
- }
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
return;
}
out_unlock:
@@ -852,7 +862,7 @@ int xprt_prepare_transmit(struct rpc_task *task)
err = req->rq_reply_bytes_recvd;
goto out_unlock;
}
- if (!xprt->ops->reserve_xprt(task))
+ if (!xprt->ops->reserve_xprt(xprt, task))
err = -EAGAIN;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
@@ -928,28 +938,66 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
+static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
+{
+ struct rpc_rqst *req = ERR_PTR(-EAGAIN);
+
+ if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
+ goto out;
+ req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
+ if (req != NULL)
+ goto out;
+ atomic_dec(&xprt->num_reqs);
+ req = ERR_PTR(-ENOMEM);
+out:
+ return req;
+}
+
+static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
+{
+ if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
+ kfree(req);
+ return true;
+ }
+ return false;
+}
+
static void xprt_alloc_slot(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_rqst *req;
- task->tk_status = 0;
- if (task->tk_rqstp)
- return;
if (!list_empty(&xprt->free)) {
- struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
- list_del_init(&req->rq_list);
- task->tk_rqstp = req;
- xprt_request_init(task, xprt);
- return;
+ req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
+ list_del(&req->rq_list);
+ goto out_init_req;
+ }
+ req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+ if (!IS_ERR(req))
+ goto out_init_req;
+ switch (PTR_ERR(req)) {
+ case -ENOMEM:
+ rpc_delay(task, HZ >> 2);
+ dprintk("RPC: dynamic allocation of request slot "
+ "failed! Retrying\n");
+ break;
+ case -EAGAIN:
+ rpc_sleep_on(&xprt->backlog, task, NULL);
+ dprintk("RPC: waiting for request slot\n");
}
- dprintk("RPC: waiting for request slot\n");
task->tk_status = -EAGAIN;
- task->tk_timeout = 0;
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ return;
+out_init_req:
+ task->tk_status = 0;
+ task->tk_rqstp = req;
+ xprt_request_init(task, xprt);
}
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
+ if (xprt_dynamic_free_slot(xprt, req))
+ return;
+
memset(req, 0, sizeof(*req)); /* mark unused */
spin_lock(&xprt->reserve_lock);
@@ -958,25 +1006,49 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
spin_unlock(&xprt->reserve_lock);
}
-struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
+static void xprt_free_all_slots(struct rpc_xprt *xprt)
+{
+ struct rpc_rqst *req;
+ while (!list_empty(&xprt->free)) {
+ req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
+ list_del(&req->rq_list);
+ kfree(req);
+ }
+}
+
+struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
+ unsigned int num_prealloc,
+ unsigned int max_alloc)
{
struct rpc_xprt *xprt;
+ struct rpc_rqst *req;
+ int i;
xprt = kzalloc(size, GFP_KERNEL);
if (xprt == NULL)
goto out;
- atomic_set(&xprt->count, 1);
- xprt->max_reqs = max_req;
- xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
- if (xprt->slot == NULL)
+ xprt_init(xprt, net);
+
+ for (i = 0; i < num_prealloc; i++) {
+ req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (!req)
+ break;
+ list_add(&req->rq_list, &xprt->free);
+ }
+ if (i < num_prealloc)
goto out_free;
+ if (max_alloc > num_prealloc)
+ xprt->max_reqs = max_alloc;
+ else
+ xprt->max_reqs = num_prealloc;
+ xprt->min_reqs = num_prealloc;
+ atomic_set(&xprt->num_reqs, num_prealloc);
- xprt->xprt_net = get_net(net);
return xprt;
out_free:
- kfree(xprt);
+ xprt_free(xprt);
out:
return NULL;
}
@@ -985,7 +1057,7 @@ EXPORT_SYMBOL_GPL(xprt_alloc);
void xprt_free(struct rpc_xprt *xprt)
{
put_net(xprt->xprt_net);
- kfree(xprt->slot);
+ xprt_free_all_slots(xprt);
kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);
@@ -1001,10 +1073,24 @@ void xprt_reserve(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
- task->tk_status = -EIO;
+ task->tk_status = 0;
+ if (task->tk_rqstp != NULL)
+ return;
+
+ /* Note: grabbing the xprt_lock_write() here is not strictly needed,
+ * but ensures that we throttle new slot allocation if the transport
+ * is congested (e.g. if reconnecting or if we're out of socket
+ * write buffer space).
+ */
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+ if (!xprt_lock_write(xprt, task))
+ return;
+
spin_lock(&xprt->reserve_lock);
xprt_alloc_slot(task);
spin_unlock(&xprt->reserve_lock);
+ xprt_release_write(xprt, task);
}
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
@@ -1021,6 +1107,7 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
{
struct rpc_rqst *req = task->tk_rqstp;
+ INIT_LIST_HEAD(&req->rq_list);
req->rq_timeout = task->tk_client->cl_timeout->to_initval;
req->rq_task = task;
req->rq_xprt = xprt;
@@ -1073,6 +1160,34 @@ void xprt_release(struct rpc_task *task)
xprt_free_bc_request(req);
}
+static void xprt_init(struct rpc_xprt *xprt, struct net *net)
+{
+ atomic_set(&xprt->count, 1);
+
+ spin_lock_init(&xprt->transport_lock);
+ spin_lock_init(&xprt->reserve_lock);
+
+ INIT_LIST_HEAD(&xprt->free);
+ INIT_LIST_HEAD(&xprt->recv);
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ spin_lock_init(&xprt->bc_pa_lock);
+ INIT_LIST_HEAD(&xprt->bc_pa_list);
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
+ xprt->last_used = jiffies;
+ xprt->cwnd = RPC_INITCWND;
+ xprt->bind_index = 0;
+
+ rpc_init_wait_queue(&xprt->binding, "xprt_binding");
+ rpc_init_wait_queue(&xprt->pending, "xprt_pending");
+ rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
+ rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
+
+ xprt_init_xid(xprt);
+
+ xprt->xprt_net = get_net(net);
+}
+
/**
* xprt_create_transport - create an RPC transport
* @args: rpc transport creation arguments
@@ -1081,7 +1196,6 @@ void xprt_release(struct rpc_task *task)
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
struct rpc_xprt *xprt;
- struct rpc_rqst *req;
struct xprt_class *t;
spin_lock(&xprt_list_lock);
@@ -1100,46 +1214,17 @@ found:
if (IS_ERR(xprt)) {
dprintk("RPC: xprt_create_transport: failed, %ld\n",
-PTR_ERR(xprt));
- return xprt;
+ goto out;
}
- if (test_and_set_bit(XPRT_INITIALIZED, &xprt->state))
- /* ->setup returned a pre-initialized xprt: */
- return xprt;
-
- spin_lock_init(&xprt->transport_lock);
- spin_lock_init(&xprt->reserve_lock);
-
- INIT_LIST_HEAD(&xprt->free);
- INIT_LIST_HEAD(&xprt->recv);
-#if defined(CONFIG_NFS_V4_1)
- spin_lock_init(&xprt->bc_pa_lock);
- INIT_LIST_HEAD(&xprt->bc_pa_list);
-#endif /* CONFIG_NFS_V4_1 */
-
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
if (xprt_has_timer(xprt))
setup_timer(&xprt->timer, xprt_init_autodisconnect,
(unsigned long)xprt);
else
init_timer(&xprt->timer);
- xprt->last_used = jiffies;
- xprt->cwnd = RPC_INITCWND;
- xprt->bind_index = 0;
-
- rpc_init_wait_queue(&xprt->binding, "xprt_binding");
- rpc_init_wait_queue(&xprt->pending, "xprt_pending");
- rpc_init_wait_queue(&xprt->sending, "xprt_sending");
- rpc_init_wait_queue(&xprt->resend, "xprt_resend");
- rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
-
- /* initialize free list */
- for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
- list_add(&req->rq_list, &xprt->free);
-
- xprt_init_xid(xprt);
-
dprintk("RPC: created transport %p with %u slots\n", xprt,
xprt->max_reqs);
+out:
return xprt;
}
@@ -1157,7 +1242,6 @@ static void xprt_destroy(struct rpc_xprt *xprt)
rpc_destroy_wait_queue(&xprt->binding);
rpc_destroy_wait_queue(&xprt->pending);
rpc_destroy_wait_queue(&xprt->sending);
- rpc_destroy_wait_queue(&xprt->resend);
rpc_destroy_wait_queue(&xprt->backlog);
cancel_work_sync(&xprt->task_cleanup);
/*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 0867070bb5c..b446e100286 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -283,6 +283,7 @@ xprt_setup_rdma(struct xprt_create *args)
}
xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
+ xprt_rdma_slot_table_entries,
xprt_rdma_slot_table_entries);
if (xprt == NULL) {
dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
@@ -452,9 +453,8 @@ xprt_rdma_connect(struct rpc_task *task)
}
static int
-xprt_rdma_reserve_xprt(struct rpc_task *task)
+xprt_rdma_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
int credits = atomic_read(&r_xprt->rx_buf.rb_credits);
@@ -466,7 +466,7 @@ xprt_rdma_reserve_xprt(struct rpc_task *task)
BUG_ON(r_xprt->rx_buf.rb_cwndscale <= 0);
}
xprt->cwnd = credits * r_xprt->rx_buf.rb_cwndscale;
- return xprt_reserve_xprt_cong(task);
+ return xprt_reserve_xprt_cong(xprt, task);
}
/*
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index cae761a8536..08c5d5a128f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -42,7 +42,7 @@
#include <linux/wait.h> /* wait_queue_head_t, etc */
#include <linux/spinlock.h> /* spinlock_t, etc */
-#include <asm/atomic.h> /* atomic_t, etc */
+#include <linux/atomic.h> /* atomic_t, etc */
#include <rdma/rdma_cm.h> /* RDMA connection api */
#include <rdma/ib_verbs.h> /* RDMA verbs api */
@@ -109,7 +109,7 @@ struct rpcrdma_ep {
*/
/* temporary static scatter/gather max */
-#define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */
+#define RPCRDMA_MAX_DATA_SEGS (64) /* max scatter/gather */
#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
#define MAX_RPCRDMAHDR (\
/* max supported RPC/RDMA header */ \
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 72abb735893..d7f97ef2659 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -37,7 +37,7 @@
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
-#ifdef CONFIG_NFS_V4_1
+#ifdef CONFIG_SUNRPC_BACKCHANNEL
#include <linux/sunrpc/bc_xprt.h>
#endif
@@ -54,7 +54,8 @@ static void xs_close(struct rpc_xprt *xprt);
* xprtsock tunables
*/
unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
-unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE;
+unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
+unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
@@ -75,6 +76,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
+static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
@@ -104,6 +106,15 @@ static ctl_table xs_tunables_table[] = {
.extra2 = &max_slot_table_size
},
{
+ .procname = "tcp_max_slot_table_entries",
+ .data = &xprt_max_tcp_slot_table_entries,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_slot_table_size,
+ .extra2 = &max_tcp_slot_table_limit
+ },
+ {
.procname = "min_resvport",
.data = &xprt_min_resvport,
.maxlen = sizeof(unsigned int),
@@ -755,6 +766,8 @@ static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
if (task == NULL)
goto out_release;
req = task->tk_rqstp;
+ if (req == NULL)
+ goto out_release;
if (req->rq_bytes_sent == 0)
goto out_release;
if (req->rq_bytes_sent == req->rq_snd_buf.len)
@@ -1236,7 +1249,7 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
return 0;
}
-#if defined(CONFIG_NFS_V4_1)
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/*
* Obtains an rpc_rqst previously allocated and invokes the common
* tcp read code to read the data. The result is placed in the callback
@@ -1299,7 +1312,7 @@ static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
{
return xs_tcp_read_reply(xprt, desc);
}
-#endif /* CONFIG_NFS_V4_1 */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/*
* Read data off the transport. This can be either an RPC_CALL or an
@@ -2489,7 +2502,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
}
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
- unsigned int slot_table_size)
+ unsigned int slot_table_size,
+ unsigned int max_slot_table_size)
{
struct rpc_xprt *xprt;
struct sock_xprt *new;
@@ -2499,7 +2513,8 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
return ERR_PTR(-EBADF);
}
- xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size);
+ xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
+ max_slot_table_size);
if (xprt == NULL) {
dprintk("RPC: xs_setup_xprt: couldn't allocate "
"rpc_xprt\n");
@@ -2541,7 +2556,8 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
struct rpc_xprt *xprt;
struct rpc_xprt *ret;
- xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
+ xprt_max_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2605,7 +2621,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
struct sock_xprt *transport;
struct rpc_xprt *ret;
- xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries);
+ xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
+ xprt_udp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2681,7 +2698,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
struct sock_xprt *transport;
struct rpc_xprt *ret;
- xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
+ xprt_max_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2760,7 +2778,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
*/
return args->bc_xprt->xpt_bc_xprt;
}
- xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
+ xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
+ xprt_tcp_slot_table_entries);
if (IS_ERR(xprt))
return xprt;
transport = container_of(xprt, struct sock_xprt, xprt);
@@ -2947,8 +2966,26 @@ static struct kernel_param_ops param_ops_slot_table_size = {
#define param_check_slot_table_size(name, p) \
__param_check(name, p, unsigned int);
+static int param_set_max_slot_table_size(const char *val,
+ const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp,
+ RPC_MIN_SLOT_TABLE,
+ RPC_MAX_SLOT_TABLE_LIMIT);
+}
+
+static struct kernel_param_ops param_ops_max_slot_table_size = {
+ .set = param_set_max_slot_table_size,
+ .get = param_get_uint,
+};
+
+#define param_check_max_slot_table_size(name, p) \
+ __param_check(name, p, unsigned int);
+
module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
slot_table_size, 0644);
+module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
+ max_slot_table_size, 0644);
module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
slot_table_size, 0644);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index d234a98a460..2761af36d14 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -47,7 +47,7 @@
#include <linux/string.h>
#include <asm/uaccess.h>
#include <linux/interrupt.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 645437cfc46..c14865172da 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -616,6 +616,9 @@ int wiphy_register(struct wiphy *wiphy)
if (res)
goto out_rm_dev;
+ rtnl_lock();
+ rdev->wiphy.registered = true;
+ rtnl_unlock();
return 0;
out_rm_dev:
@@ -647,6 +650,10 @@ void wiphy_unregister(struct wiphy *wiphy)
{
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ rtnl_lock();
+ rdev->wiphy.registered = false;
+ rtnl_unlock();
+
rfkill_unregister(rdev->rfkill);
/* protect the device list */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 28d2aa109be..e83e7fee3bc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3464,7 +3464,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
tmp) {
enum ieee80211_band band = nla_type(attr);
- if (band < 0 || band > IEEE80211_NUM_BANDS) {
+ if (band < 0 || band >= IEEE80211_NUM_BANDS) {
err = -EINVAL;
goto out_free;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1ad0f39fe09..02751dbc5a9 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -903,7 +903,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
!is_world_regdom(last_request->alpha2)) {
REG_DBG_PRINT("Ignoring regulatory request %s "
- "since the driver requires its own regulaotry "
+ "since the driver requires its own regulatory "
"domain to be set first",
reg_initiator_name(initiator));
return true;
@@ -1125,12 +1125,13 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
enum ieee80211_band band;
if (ignore_reg_update(wiphy, initiator))
- goto out;
+ return;
+
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
handle_band(wiphy, band, initiator);
}
-out:
+
reg_process_beacons(wiphy);
reg_process_ht_flags(wiphy);
if (wiphy->reg_notifier)
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index c6e4ca6a7d2..ff574597a85 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -93,7 +93,8 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
if (rdev->ops->suspend) {
rtnl_lock();
- ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
+ if (rdev->wiphy.registered)
+ ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
rtnl_unlock();
}
@@ -112,7 +113,8 @@ static int wiphy_resume(struct device *dev)
if (rdev->ops->resume) {
rtnl_lock();
- ret = rdev->ops->resume(&rdev->wiphy);
+ if (rdev->wiphy.registered)
+ ret = rdev->ops->resume(&rdev->wiphy);
rtnl_unlock();
}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 58064d9e565..791ab2e77f3 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -462,8 +462,8 @@ static struct xfrm_algo_desc ealg_list[] = {
.desc = {
.sadb_alg_id = SADB_X_EALG_AESCTR,
.sadb_alg_ivlen = 8,
- .sadb_alg_minbits = 128,
- .sadb_alg_maxbits = 256
+ .sadb_alg_minbits = 160,
+ .sadb_alg_maxbits = 288
}
},
};