diff options
Diffstat (limited to 'net')
67 files changed, 423 insertions, 292 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 5f27f8e3025..f1f2f7bb666 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) if (unlikely(!skb)) goto err_free; + skb_reset_network_header(skb); + skb_reset_transport_header(skb); return skb; err_free: diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 175b5135bdc..e317583fcc7 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) { int in, out, inp, outp; struct virtio_chan *chan = client->trans; - char *rdata = (char *)req->rc+sizeof(struct p9_fcall); unsigned long flags; size_t pdata_off = 0; struct trans_rpage_info *rpinfo = NULL; @@ -346,7 +345,8 @@ req_retry_pinned: * Arrange in such a way that server places header in the * alloced memory and payload onto the user buffer. */ - inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11); + inp = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, 11); /* * Running executables in the filesystem may result in * a read request with kernel buffer as opposed to user buffer. @@ -366,8 +366,8 @@ req_retry_pinned: } in += inp; } else { - in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, - req->rc->capacity); + in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, + req->rc->sdata, req->rc->capacity); } err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); @@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = { .close = p9_virtio_close, .request = p9_virtio_request, .cancel = p9_virtio_cancel, - .maxsize = PAGE_SIZE*VIRTQUEUE_NUM, + + /* + * We leave one entry for input and one entry for response + * headers. We also skip one more entry to accomodate, address + * that are not at page boundary, that can result in an extra + * page in zero copy. + */ + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), .pref = P9_TRANS_PREF_PAYLOAD_SEP, .def = 0, .owner = THIS_MODULE, diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 52cfd0c3ea7..d07223c834a 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) spin_unlock_irqrestore(&rq->lock, flags); skb_queue_walk_safe(&queue, skb, tmp) { - struct net_device *dev = skb->dev; + struct net_device *dev; + + br2684_push(atmvcc, skb); + dev = skb->dev; dev->stats.rx_bytes -= skb->len; dev->stats.rx_packets--; - - br2684_push(atmvcc, skb); } /* initialize netdev carrier state */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 8add9b49991..117e0d16178 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -494,9 +494,8 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) BT_DBG("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { - set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { err = -EINPROGRESS; break; @@ -510,12 +509,13 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index 8e6c06158f8..e7ee5314f39 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -155,6 +155,7 @@ struct bnep_session { unsigned int role; unsigned long state; unsigned long flags; + atomic_t terminate; struct task_struct *task; struct ethhdr eh; diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index ca39fcf010c..d9edfe8bf9d 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -484,9 +484,11 @@ static int bnep_session(void *arg) init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); - while (!kthread_should_stop()) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); + if (atomic_read(&s->terminate)) + break; /* RX */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); @@ -504,7 +506,7 @@ static int bnep_session(void *arg) schedule(); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); /* Cleanup session */ @@ -640,9 +642,10 @@ int bnep_del_connection(struct bnep_conndel_req *req) down_read(&bnep_session_sem); s = __bnep_get_session(req->dst); - if (s) - kthread_stop(s->task); - else + if (s) { + atomic_inc(&s->terminate); + wake_up_process(s->task); + } else err = -ENOENT; up_read(&bnep_session_sem); diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 040f67b1297..50f0d135eb8 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -386,7 +386,8 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl) capi_ctr_down(ctrl); - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(session->task); } static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h index db43b54ac9a..c32638dddbf 100644 --- a/net/bluetooth/cmtp/cmtp.h +++ b/net/bluetooth/cmtp/cmtp.h @@ -81,6 +81,7 @@ struct cmtp_session { char name[BTNAMSIZ]; + atomic_t terminate; struct task_struct *task; wait_queue_head_t wait; diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index c5b11af908b..521baa4fe83 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -292,9 +292,11 @@ static int cmtp_session(void *arg) init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); - while (!kthread_should_stop()) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); + if (atomic_read(&session->terminate)) + break; if (sk->sk_state != BT_CONNECTED) break; @@ -307,7 +309,7 @@ static int cmtp_session(void *arg) schedule(); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); down_write(&cmtp_session_sem); @@ -380,16 +382,17 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) if (!(session->flags & (1 << CMTP_LOOPBACK))) { err = cmtp_attach_device(session); - if (err < 0) - goto detach; + if (err < 0) { + atomic_inc(&session->terminate); + wake_up_process(session->task); + up_write(&cmtp_session_sem); + return err; + } } up_write(&cmtp_session_sem); return 0; -detach: - cmtp_detach_device(session); - unlink: __cmtp_unlink_session(session); @@ -414,7 +417,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) skb_queue_purge(&session->transmit); /* Stop session thread */ - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(session->task); } else err = -ENOENT; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ec0bc3f60f2..56943add45c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1209,7 +1209,6 @@ static void hci_cmd_timer(unsigned long arg) BT_ERR("%s command tx timeout", hdev->name); atomic_set(&hdev->cmd_cnt, 1); - clear_bit(HCI_RESET, &hdev->flags); tasklet_schedule(&hdev->cmd_task); } @@ -1327,7 +1326,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); if (!entry) { - return -ENOMEM; + err = -ENOMEM; goto err; } @@ -2408,7 +2407,10 @@ static void hci_cmd_task(unsigned long arg) if (hdev->sent_cmd) { atomic_dec(&hdev->cmd_cnt); hci_send_frame(skb); - mod_timer(&hdev->cmd_timer, + if (test_bit(HCI_RESET, &hdev->flags)) + del_timer(&hdev->cmd_timer); + else + mod_timer(&hdev->cmd_timer, jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT)); } else { skb_queue_head(&hdev->cmd_q, skb); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a40170e022e..7ef4eb4435f 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -58,8 +58,8 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - if (test_bit(HCI_MGMT, &hdev->flags) && - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) mgmt_discovering(hdev->id, 0); hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); @@ -76,8 +76,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - if (test_bit(HCI_MGMT, &hdev->flags) && - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) mgmt_discovering(hdev->id, 0); hci_conn_check_pending(hdev); @@ -959,9 +959,8 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) return; } - if (test_bit(HCI_MGMT, &hdev->flags) && - !test_and_set_bit(HCI_INQUIRY, - &hdev->flags)) + if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) mgmt_discovering(hdev->id, 1); } @@ -1340,8 +1339,8 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff BT_DBG("%s status %d", hdev->name, status); - if (test_bit(HCI_MGMT, &hdev->flags) && - test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && + test_bit(HCI_MGMT, &hdev->flags)) mgmt_discovering(hdev->id, 0); hci_req_complete(hdev, HCI_OP_INQUIRY, status); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 43b4c2deb7c..fb68f344c34 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -764,6 +764,7 @@ static int hidp_session(void *arg) up_write(&hidp_session_sem); + kfree(session->rd_data); kfree(session); return 0; } @@ -841,7 +842,8 @@ static int hidp_setup_input(struct hidp_session *session, err = input_register_device(input); if (err < 0) { - hci_conn_put_device(session->conn); + input_free_device(input); + session->input = NULL; return err; } @@ -1044,8 +1046,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, } err = hid_add_device(session->hid); - if (err < 0) - goto err_add_device; + if (err < 0) { + atomic_inc(&session->terminate); + wake_up_process(session->task); + up_write(&hidp_session_sem); + return err; + } if (session->input) { hidp_send_ctrl_message(session, @@ -1059,12 +1065,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, up_write(&hidp_session_sem); return 0; -err_add_device: - hid_destroy_device(session->hid); - session->hid = NULL; - atomic_inc(&session->terminate); - wake_up_process(session->task); - unlink: hidp_del_timer(session); @@ -1090,7 +1090,6 @@ purge: failed: up_write(&hidp_session_sem); - input_free_device(session->input); kfree(session); return err; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 3204ba8a701..b3bdb482bbe 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1159,9 +1159,8 @@ int __l2cap_wait_ack(struct sock *sk) int timeo = HZ/5; add_wait_queue(sk_sleep(sk), &wait); - while ((chan->unacked_frames > 0 && chan->conn)) { - set_current_state(TASK_INTERRUPTIBLE); - + set_current_state(TASK_INTERRUPTIBLE); + while (chan->unacked_frames > 0 && chan->conn) { if (!timeo) timeo = HZ/5; @@ -1173,6 +1172,7 @@ int __l2cap_wait_ack(struct sock *sk) release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 5c36b3e8739..61f1f623091 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -235,30 +235,26 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(nsk = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -266,8 +262,12 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) @@ -993,7 +993,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; - sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); + sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; sock_reset_flag(sk, SOCK_ZAPPED); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 5759bb7054f..5ba3f6df665 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -62,7 +62,6 @@ static DEFINE_MUTEX(rfcomm_mutex); #define rfcomm_lock() mutex_lock(&rfcomm_mutex) #define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) -static unsigned long rfcomm_event; static LIST_HEAD(session_list); @@ -120,7 +119,6 @@ static inline void rfcomm_schedule(void) { if (!rfcomm_thread) return; - set_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); wake_up_process(rfcomm_thread); } @@ -2038,19 +2036,18 @@ static int rfcomm_run(void *unused) rfcomm_add_listener(BDADDR_ANY); - while (!kthread_should_stop()) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!test_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event)) { - /* No pending events. Let's sleep. - * Incoming connections and data will wake us up. */ - schedule(); - } - set_current_state(TASK_RUNNING); + + if (kthread_should_stop()) + break; /* Process stuff */ - clear_bit(RFCOMM_SCHED_WAKEUP, &rfcomm_event); rfcomm_process_sessions(); + + schedule(); } + __set_current_state(TASK_RUNNING); rfcomm_kill_listener(); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 8f01e6b11a7..482722bbc7a 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -485,11 +485,6 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f lock_sock(sk); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; @@ -501,19 +496,20 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(nsk = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -521,8 +517,12 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 4c3621b5e0a..8270f05e3f1 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -564,30 +564,26 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag lock_sock(sk); - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; - goto done; - } - timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); - while (!(ch = bt_accept_dequeue(sk, newsock))) { + while (1) { set_current_state(TASK_INTERRUPTIBLE); - if (!timeo) { - err = -EAGAIN; + + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; break; } - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; - if (sk->sk_state != BT_LISTEN) { - err = -EBADFD; + if (!timeo) { + err = -EAGAIN; break; } @@ -595,8 +591,12 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag err = sock_intr_errno(timeo); break; } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); } - set_current_state(TASK_RUNNING); + __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 2cdf0070419..e73815456ad 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, int br_add_bridge(struct net *net, const char *name) { struct net_device *dev; + int res; dev = alloc_netdev(sizeof(struct net_bridge), name, br_dev_setup); @@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name) dev_net_set(dev, net); - return register_netdev(dev); + res = register_netdev(dev); + if (res) + free_netdev(dev); + return res; } int br_del_bridge(struct net *net, const char *name) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2d85ca7111d..995cbe0ac0b 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1456,7 +1456,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, { struct sk_buff *skb2; const struct ipv6hdr *ip6h; - struct icmp6hdr *icmp6h; + u8 icmp6_type; u8 nexthdr; unsigned len; int offset; @@ -1502,9 +1502,9 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, __skb_pull(skb2, offset); skb_reset_transport_header(skb2); - icmp6h = icmp6_hdr(skb2); + icmp6_type = icmp6_hdr(skb2)->icmp6_type; - switch (icmp6h->icmp6_type) { + switch (icmp6_type) { case ICMPV6_MGM_QUERY: case ICMPV6_MGM_REPORT: case ICMPV6_MGM_REDUCTION: @@ -1520,16 +1520,23 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, err = pskb_trim_rcsum(skb2, len); if (err) goto out; + err = -EINVAL; } + ip6h = ipv6_hdr(skb2); + switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: - if (!csum_fold(skb2->csum)) + if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len, + IPPROTO_ICMPV6, skb2->csum)) break; /*FALLTHROUGH*/ case CHECKSUM_NONE: - skb2->csum = 0; - if (skb_checksum_complete(skb2)) + skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, + skb2->len, + IPPROTO_ICMPV6, 0)); + if (__skb_checksum_complete(skb2)) goto out; } @@ -1537,7 +1544,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, BR_INPUT_SKB_CB(skb)->igmp = 1; - switch (icmp6h->icmp6_type) { + switch (icmp6_type) { case ICMPV6_MGM_REPORT: { struct mld_msg *mld; diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index ba6f73eb06c..a9aff9c7d02 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -4,7 +4,7 @@ menuconfig BRIDGE_NF_EBTABLES tristate "Ethernet Bridge tables (ebtables) support" - depends on BRIDGE && BRIDGE_NETFILTER + depends on BRIDGE && NETFILTER select NETFILTER_XTABLES help ebtables is a general, extensible frame/packet identification diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index 7c2fa0a0814..7f9ac0742d1 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -93,10 +93,14 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) caifdevs = caif_device_list(dev_net(dev)); BUG_ON(!caifdevs); - caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); + caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) return NULL; caifd->pcpu_refcnt = alloc_percpu(int); + if (!caifd->pcpu_refcnt) { + kfree(caifd); + return NULL; + } caifd->netdev = dev; dev_hold(dev); return caifd; diff --git a/net/can/af_can.c b/net/can/af_can.c index 8ce926d3b2c..9b0c32a2690 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -857,7 +857,7 @@ static __exit void can_exit(void) struct net_device *dev; if (stats_timer) - del_timer(&can_stattimer); + del_timer_sync(&can_stattimer); can_remove_proc(); diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c index d5f2d97ac05..1f4cb30a42c 100644 --- a/net/ceph/msgpool.c +++ b/net/ceph/msgpool.c @@ -7,27 +7,37 @@ #include <linux/ceph/msgpool.h> -static void *alloc_fn(gfp_t gfp_mask, void *arg) +static void *msgpool_alloc(gfp_t gfp_mask, void *arg) { struct ceph_msgpool *pool = arg; - void *p; + struct ceph_msg *msg; - p = ceph_msg_new(0, pool->front_len, gfp_mask); - if (!p) - pr_err("msgpool %s alloc failed\n", pool->name); - return p; + msg = ceph_msg_new(0, pool->front_len, gfp_mask); + if (!msg) { + dout("msgpool_alloc %s failed\n", pool->name); + } else { + dout("msgpool_alloc %s %p\n", pool->name, msg); + msg->pool = pool; + } + return msg; } -static void free_fn(void *element, void *arg) +static void msgpool_free(void *element, void *arg) { - ceph_msg_put(element); + struct ceph_msgpool *pool = arg; + struct ceph_msg *msg = element; + + dout("msgpool_release %s %p\n", pool->name, msg); + msg->pool = NULL; + ceph_msg_put(msg); } int ceph_msgpool_init(struct ceph_msgpool *pool, int front_len, int size, bool blocking, const char *name) { + dout("msgpool %s init\n", name); pool->front_len = front_len; - pool->pool = mempool_create(size, alloc_fn, free_fn, pool); + pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) return -ENOMEM; pool->name = name; @@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool, void ceph_msgpool_destroy(struct ceph_msgpool *pool) { + dout("msgpool %s destroy\n", pool->name); mempool_destroy(pool->pool); } struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len) { + struct ceph_msg *msg; + if (front_len > pool->front_len) { - pr_err("msgpool_get pool %s need front %d, pool size is %d\n", + dout("msgpool_get %s need front %d, pool size is %d\n", pool->name, front_len, pool->front_len); WARN_ON(1); @@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, return ceph_msg_new(0, front_len, GFP_NOFS); } - return mempool_alloc(pool->pool, GFP_NOFS); + msg = mempool_alloc(pool->pool, GFP_NOFS); + dout("msgpool_get %s %p\n", pool->name, msg); + return msg; } void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { + dout("msgpool_put %s %p\n", pool->name, msg); + /* reset msg front_len; user may have changed it */ msg->front.iov_len = pool->front_len; msg->hdr.front_len = cpu_to_le32(pool->front_len); kref_init(&msg->kref); /* retake single ref */ + mempool_free(msg, pool->pool); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ce310eee708..16836a7df7a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) put_osd(osd); } +static void remove_all_osds(struct ceph_osd_client *osdc) +{ + dout("__remove_old_osds %p\n", osdc); + mutex_lock(&osdc->request_mutex); + while (!RB_EMPTY_ROOT(&osdc->osds)) { + struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), + struct ceph_osd, o_node); + __remove_osd(osdc, osd); + } + mutex_unlock(&osdc->request_mutex); +} + static void __move_osd_to_lru(struct ceph_osd_client *osdc, struct ceph_osd *osd) { @@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd) list_del_init(&osd->o_osd_lru); } -static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all) +static void remove_old_osds(struct ceph_osd_client *osdc) { struct ceph_osd *osd, *nosd; dout("__remove_old_osds %p\n", osdc); mutex_lock(&osdc->request_mutex); list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { - if (!remove_all && time_before(jiffies, osd->lru_ttl)) + if (time_before(jiffies, osd->lru_ttl)) break; __remove_osd(osdc, osd); } @@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new) struct rb_node *parent = NULL; struct ceph_osd *osd = NULL; + dout("__insert_osd %p osd%d\n", new, new->o_osd); while (*p) { parent = *p; osd = rb_entry(parent, struct ceph_osd, o_node); @@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work) dout("osds timeout\n"); down_read(&osdc->map_sem); - remove_old_osds(osdc, 0); + remove_old_osds(osdc); up_read(&osdc->map_sem); schedule_delayed_work(&osdc->osds_timeout_work, @@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc) ceph_osdmap_destroy(osdc->osdmap); osdc->osdmap = NULL; } - remove_old_osds(osdc, 1); - WARN_ON(!RB_EMPTY_ROOT(&osdc->osds)); + remove_all_osds(osdc); mempool_destroy(osdc->req_mempool); ceph_msgpool_destroy(&osdc->msgpool_op); ceph_msgpool_destroy(&osdc->msgpool_op_reply); diff --git a/net/core/dev.c b/net/core/dev.c index 17d67b579be..b10ff0a7185 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1515,6 +1515,14 @@ static inline bool is_skb_forwardable(struct net_device *dev, */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + skb_orphan(skb); nf_reset(skb); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index e7ab0c0285b..3231b468bb7 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -384,8 +384,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) */ list_for_each_entry(r, &ops->rules_list, list) { if (r->action == FR_ACT_GOTO && - r->target == rule->pref) { - BUG_ON(rtnl_dereference(r->ctarget) != NULL); + r->target == rule->pref && + rtnl_dereference(r->ctarget) == NULL) { rcu_assign_pointer(r->ctarget, rule); if (--ops->unresolved_rules == 0) break; diff --git a/net/core/flow.c b/net/core/flow.c index bf32c33cad3..555a456efb0 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -30,6 +30,7 @@ struct flow_cache_entry { struct hlist_node hlist; struct list_head gc_list; } u; + struct net *net; u16 family; u8 dir; u32 genid; @@ -172,29 +173,26 @@ static void flow_new_hash_rnd(struct flow_cache *fc, static u32 flow_hash_code(struct flow_cache *fc, struct flow_cache_percpu *fcp, - const struct flowi *key) + const struct flowi *key, + size_t keysize) { const u32 *k = (const u32 *) key; + const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); - return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd) + return jhash2(k, length, fcp->hash_rnd) & (flow_cache_hash_size(fc) - 1); } -typedef unsigned long flow_compare_t; - /* I hear what you're saying, use memcmp. But memcmp cannot make - * important assumptions that we can here, such as alignment and - * constant size. + * important assumptions that we can here, such as alignment. */ -static int flow_key_compare(const struct flowi *key1, const struct flowi *key2) +static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, + size_t keysize) { const flow_compare_t *k1, *k1_lim, *k2; - const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t); - - BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t)); k1 = (const flow_compare_t *) key1; - k1_lim = k1 + n_elem; + k1_lim = k1 + keysize; k2 = (const flow_compare_t *) key2; @@ -215,6 +213,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, struct flow_cache_entry *fle, *tfle; struct hlist_node *entry; struct flow_cache_object *flo; + size_t keysize; unsigned int hash; local_bh_disable(); @@ -222,6 +221,11 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, fle = NULL; flo = NULL; + + keysize = flow_key_size(family); + if (!keysize) + goto nocache; + /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ if (!fcp->hash_table) @@ -230,11 +234,12 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, if (fcp->hash_rnd_recalc) flow_new_hash_rnd(fc, fcp); - hash = flow_hash_code(fc, fcp, key); + hash = flow_hash_code(fc, fcp, key, keysize); hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) { - if (tfle->family == family && + if (tfle->net == net && + tfle->family == family && tfle->dir == dir && - flow_key_compare(key, &tfle->key) == 0) { + flow_key_compare(key, &tfle->key, keysize) == 0) { fle = tfle; break; } @@ -246,9 +251,10 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { + fle->net = net; fle->family = family; fle->dir = dir; - memcpy(&fle->key, key, sizeof(*key)); + memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); fle->object = NULL; hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); fcp->hash_count++; diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8fab9b0bb20..1334d7e56f0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1319,11 +1319,15 @@ static void neigh_proxy_process(unsigned long arg) if (tdif <= 0) { struct net_device *dev = skb->dev; + __skb_unlink(skb, &tbl->proxy_queue); - if (tbl->proxy_redo && netif_running(dev)) + if (tbl->proxy_redo && netif_running(dev)) { + rcu_read_lock(); tbl->proxy_redo(skb); - else + rcu_read_unlock(); + } else { kfree_skb(skb); + } dev_put(dev); } else if (!sched_next || tdif < sched_next) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index adf84dd8c7b..52622517e0d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -558,13 +558,14 @@ int __netpoll_rx(struct sk_buff *skb) if (skb_shared(skb)) goto out; - iph = (struct iphdr *)skb->data; if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; + iph = (struct iphdr *)skb->data; if (iph->ihl < 5 || iph->version != 4) goto out; if (!pskb_may_pull(skb, iph->ihl*4)) goto out; + iph = (struct iphdr *)skb->data; if (ip_fast_csum((u8 *)iph, iph->ihl) != 0) goto out; @@ -579,6 +580,7 @@ int __netpoll_rx(struct sk_buff *skb) if (pskb_trim_rcsum(skb, len)) goto out; + iph = (struct iphdr *)skb->data; if (iph->protocol != IPPROTO_UDP) goto out; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 27002dffe7e..387703f56fc 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -611,8 +611,21 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) } EXPORT_SYMBOL_GPL(skb_morph); -/* skb frags copy userspace buffers to kernel */ -static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) +/* skb_copy_ubufs - copy userspace skb frags buffers to kernel + * @skb: the skb to modify + * @gfp_mask: allocation priority + * + * This must be called on SKBTX_DEV_ZEROCOPY skb. + * It will copy all frags into kernel and drop the reference + * to userspace pages. + * + * If this function is called from an interrupt gfp_mask() must be + * %GFP_ATOMIC. + * + * Returns 0 on success or a negative error code on failure + * to allocate kernel memory to copy to. + */ +int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { int i; int num_frags = skb_shinfo(skb)->nr_frags; @@ -652,6 +665,8 @@ static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) skb_shinfo(skb)->frags[i - 1].page = head; head = (struct page *)head->private; } + + skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; return 0; } @@ -677,7 +692,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) return NULL; - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } n = skb + 1; @@ -803,7 +817,6 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) n = NULL; goto out; } - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; @@ -896,7 +909,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { if (skb_copy_ubufs(skb, gfp_mask)) goto nofrags; - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) get_page(skb_shinfo(skb)->frags[i].page); diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 27997d35ebd..a2468363978 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -340,7 +340,7 @@ void ether_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; dev->tx_queue_len = 1000; /* Ethernet wants good queues */ dev->flags = IFF_BROADCAST|IFF_MULTICAST; - dev->priv_flags = IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_TX_SKB_SHARING; memset(dev->broadcast, 0xFF, ETH_ALEN); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1b745d412cf..dd2b9478ddd 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -466,8 +466,13 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) goto out; if (addr->sin_family != AF_INET) { + /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) + * only if s_addr is INADDR_ANY. + */ err = -EAFNOSUPPORT; - goto out; + if (addr->sin_family != AF_UNSPEC || + addr->sin_addr.s_addr != htonl(INADDR_ANY)) + goto out; } chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 33e2c35b74b..80106d89d54 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -142,6 +142,14 @@ const struct fib_prop fib_props[RTN_MAX + 1] = { }; /* Release a nexthop info record */ +static void free_fib_info_rcu(struct rcu_head *head) +{ + struct fib_info *fi = container_of(head, struct fib_info, rcu); + + if (fi->fib_metrics != (u32 *) dst_default_metrics) + kfree(fi->fib_metrics); + kfree(fi); +} void free_fib_info(struct fib_info *fi) { @@ -156,7 +164,7 @@ void free_fib_info(struct fib_info *fi) } endfor_nexthops(fi); fib_info_cnt--; release_net(fi->fib_net); - kfree_rcu(fi, rcu); + call_rcu(&fi->rcu, free_fib_info_rcu); } void fib_release_info(struct fib_info *fi) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 283c0a26e03..d577199eabd 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -767,7 +767,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs) break; for (i=0; i<nsrcs; i++) { /* skip inactive filters */ - if (pmc->sfcount[MCAST_INCLUDE] || + if (psf->sf_count[MCAST_INCLUDE] || pmc->sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) continue; diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 5c9b9d96391..e59aabd0eae 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) return skb; nlmsg_failure: + kfree_skb(skb); *errp = -EINVAL; printk(KERN_ERR "ip_queue: error creating packet message\n"); return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) { struct nf_queue_entry *entry; - if (vmsg->value > NF_MAX_VERDICT) + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) return -EINVAL; entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, break; case IPQM_VERDICT: - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) - status = -EINVAL; - else - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; + status = ipq_set_verdict(&pmsg->msg.verdict, + len - sizeof(*pmsg)); + break; default: status = -EINVAL; } diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index b14ec7d03b6..4bfad5da94f 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -254,6 +254,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), + SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), + SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ea0d2183df4..21fab3edb92 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1124,7 +1124,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, return 0; /* ...Then it's D-SACK, and must reside below snd_una completely */ - if (!after(end_seq, tp->snd_una)) + if (after(end_seq, tp->snd_una)) return 0; if (!before(start_seq, tp->undo_marker)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1c12b8ec849..c34f0151394 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -808,20 +808,38 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) kfree(inet_rsk(req)->opt); } -static void syn_flood_warning(const struct sk_buff *skb) +/* + * Return 1 if a syncookie should be sent + */ +int tcp_syn_flood_action(struct sock *sk, + const struct sk_buff *skb, + const char *proto) { - const char *msg; + const char *msg = "Dropping request"; + int want_cookie = 0; + struct listen_sock *lopt; + + #ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) + if (sysctl_tcp_syncookies) { msg = "Sending cookies"; - else + want_cookie = 1; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); + } else #endif - msg = "Dropping request"; + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); - pr_info("TCP: Possible SYN flooding on port %d. %s.\n", - ntohs(tcp_hdr(skb)->dest), msg); + lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; + if (!lopt->synflood_warned) { + lopt->synflood_warned = 1; + pr_info("%s: Possible SYN flooding on port %d. %s. " + " Check SNMP counters.\n", + proto, ntohs(tcp_hdr(skb)->dest), msg); + } + return want_cookie; } +EXPORT_SYMBOL(tcp_syn_flood_action); /* * Save and compile IPv4 options into the request_sock if needed. @@ -1235,11 +1253,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) __be32 saddr = ip_hdr(skb)->saddr; __be32 daddr = ip_hdr(skb)->daddr; __u32 isn = TCP_SKB_CB(skb)->when; -#ifdef CONFIG_SYN_COOKIES int want_cookie = 0; -#else -#define want_cookie 0 /* Argh, why doesn't gcc optimize this :( */ -#endif /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) @@ -1250,14 +1264,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * evidently real one. */ if (inet_csk_reqsk_queue_is_full(sk) && !isn) { - if (net_ratelimit()) - syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) { - want_cookie = 1; - } else -#endif - goto drop; + want_cookie = tcp_syn_flood_action(sk, skb, "TCP"); + if (!want_cookie) + goto drop; } /* Accept backlog is full. If we have already queued enough @@ -1303,9 +1312,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES want_cookie = 0; /* not our kind of cookie */ -#endif tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f012ebd87b4..12368c58606 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -374,8 +374,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) "%s(): cannot allocate memory for statistics; dev=%s.\n", __func__, dev->name)); neigh_parms_release(&nd_tbl, ndev->nd_parms); - ndev->dead = 1; - in6_dev_finish_destroy(ndev); + dev_put(dev); + kfree(ndev); return NULL; } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 9ef1831746e..b46e9f88ce3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -599,7 +599,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) return 0; } -int datagram_send_ctl(struct net *net, +int datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, int *hlimit, int *tclass, int *dontfrag) @@ -658,7 +658,8 @@ int datagram_send_ctl(struct net *net, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!ipv6_chk_addr(net, &src_info->ipi6_addr, + if (!inet_sk(sk)->transparent && + !ipv6_chk_addr(net, &src_info->ipi6_addr, strict ? dev : NULL, 0)) err = -EINVAL; else diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f3caf1b8d57..54303945019 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -322,8 +322,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo } static struct ip6_flowlabel * -fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, - int optlen, int *err_p) +fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, + char __user *optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; @@ -360,7 +360,7 @@ fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval, msg.msg_control = (void*)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); - err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk, + err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, &junk, &junk); if (err) goto done; @@ -528,7 +528,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) return -EINVAL; - fl = fl_create(net, &freq, optval, optlen, &err); + fl = fl_create(net, sk, &freq, optval, optlen, &err); if (fl == NULL) return err; sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 9cb191ecaba..2fbda5fc4cc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -475,7 +475,7 @@ sticky_done: msg.msg_controllen = optlen; msg.msg_control = (void*)(opt+1); - retv = datagram_send_ctl(net, &msg, &fl6, opt, &junk, &junk, + retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; @@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, } static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) + char __user *optval, int __user *optlen, unsigned flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; @@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, msg.msg_control = optval; msg.msg_controllen = len; - msg.msg_flags = 0; + msg.msg_flags = flags; lock_sock(sk); skb = np->pktoptions; @@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, if(level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { @@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, return compat_mc_getsockopt(sk, level, optname, optval, optlen, ipv6_getsockopt); - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, + MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 3e6ebcdb477..ee7839f4d6e 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1059,7 +1059,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, break; for (i=0; i<nsrcs; i++) { /* skip inactive filters */ - if (pmc->mca_sfcount[MCAST_INCLUDE] || + if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != psf->sf_count[MCAST_EXCLUDE]) continue; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 24939486328..e63c3972a73 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -218,6 +218,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp) return skb; nlmsg_failure: + kfree_skb(skb); *errp = -EINVAL; printk(KERN_ERR "ip6_queue: error creating packet message\n"); return NULL; @@ -313,7 +314,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) { struct nf_queue_entry *entry; - if (vmsg->value > NF_MAX_VERDICT) + if (vmsg->value > NF_MAX_VERDICT || vmsg->value == NF_STOLEN) return -EINVAL; entry = ipq_find_dequeue_entry(vmsg->id); @@ -358,12 +359,9 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg, break; case IPQM_VERDICT: - if (pmsg->msg.verdict.value > NF_MAX_VERDICT) - status = -EINVAL; - else - status = ipq_set_verdict(&pmsg->msg.verdict, - len - sizeof(*pmsg)); - break; + status = ipq_set_verdict(&pmsg->msg.verdict, + len - sizeof(*pmsg)); + break; default: status = -EINVAL; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6a79f3081bd..343852e5c70 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -817,8 +817,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, - &tclass, &dontfrag); + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9e69eb0ec6d..1250f902067 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -104,6 +104,9 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) struct inet_peer *peer; u32 *p = NULL; + if (!(rt->dst.flags & DST_HOST)) + return NULL; + if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); @@ -252,6 +255,9 @@ static void ip6_dst_destroy(struct dst_entry *dst) struct inet6_dev *idev = rt->rt6i_idev; struct inet_peer *peer = rt->rt6i_peer; + if (!(rt->dst.flags & DST_HOST)) + dst_destroy_metrics_generic(dst); + if (idev != NULL) { rt->rt6i_idev = NULL; in6_dev_put(idev); @@ -723,9 +729,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, ipv6_addr_copy(&rt->rt6i_gateway, daddr); } - rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { @@ -775,9 +779,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct rt6_info *rt = ip6_rt_copy(ort, daddr); if (rt) { - rt->rt6i_dst.plen = 128; rt->rt6i_flags |= RTF_CACHE; - rt->dst.flags |= DST_HOST; dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_raw(&ort->dst))); } return rt; @@ -1078,12 +1080,15 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, neigh = NULL; } - rt->rt6i_idev = idev; + rt->dst.flags |= DST_HOST; + rt->dst.output = ip6_output; dst_set_neighbour(&rt->dst, neigh); atomic_set(&rt->dst.__refcnt, 1); - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); - rt->dst.output = ip6_output; + + ipv6_addr_copy(&rt->rt6i_dst.addr, addr); + rt->rt6i_dst.plen = 128; + rt->rt6i_idev = idev; spin_lock_bh(&icmp6_dst_lock); rt->dst.next = icmp6_dst_gc_list; @@ -1261,6 +1266,14 @@ int ip6_route_add(struct fib6_config *cfg) if (rt->rt6i_dst.plen == 128) rt->dst.flags |= DST_HOST; + if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) { + u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); + if (!metrics) { + err = -ENOMEM; + goto out; + } + dst_init_metrics(&rt->dst, metrics, 0); + } #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); rt->rt6i_src.plen = cfg->fc_src_len; @@ -1607,9 +1620,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; - nrt->rt6i_dst.plen = 128; - nrt->dst.flags |= DST_HOST; - ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); @@ -1754,9 +1764,10 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, if (rt) { rt->dst.input = ort->dst.input; rt->dst.output = ort->dst.output; + rt->dst.flags |= DST_HOST; ipv6_addr_copy(&rt->rt6i_dst.addr, dest); - rt->rt6i_dst.plen = ort->rt6i_dst.plen; + rt->rt6i_dst.plen = 128; dst_copy_metrics(&rt->dst, &ort->dst); rt->dst.error = ort->dst.error; rt->rt6i_idev = ort->rt6i_idev; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 07bf1085458..00b15ac7a70 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; + if (tos == 1) + tos = ipv6_get_dsfield(iph6); + /* ISATAP (RFC4214) - must come before 6to4 */ if (dev->priv_flags & IFF_ISATAP) { struct neighbour *neigh = NULL; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d1fb63f4aeb..3c9fa618b69 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -531,20 +531,6 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, return tcp_v6_send_synack(sk, req, rvp); } -static inline void syn_flood_warning(struct sk_buff *skb) -{ -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) - printk(KERN_INFO - "TCPv6: Possible SYN flooding on port %d. " - "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest)); - else -#endif - printk(KERN_INFO - "TCPv6: Possible SYN flooding on port %d. " - "Dropping request.\n", ntohs(tcp_hdr(skb)->dest)); -} - static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree_skb(inet6_rsk(req)->pktopts); @@ -1179,11 +1165,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); __u32 isn = TCP_SKB_CB(skb)->when; struct dst_entry *dst = NULL; -#ifdef CONFIG_SYN_COOKIES int want_cookie = 0; -#else -#define want_cookie 0 -#endif if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); @@ -1192,14 +1174,9 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop; if (inet_csk_reqsk_queue_is_full(sk) && !isn) { - if (net_ratelimit()) - syn_flood_warning(skb); -#ifdef CONFIG_SYN_COOKIES - if (sysctl_tcp_syncookies) - want_cookie = 1; - else -#endif - goto drop; + want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6"); + if (!want_cookie) + goto drop; } if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) @@ -1249,9 +1226,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) while (l-- > 0) *c++ ^= *hash_location++; -#ifdef CONFIG_SYN_COOKIES want_cookie = 0; /* not our kind of cookie */ -#endif tmp_ext.cookie_out_never = 0; /* false */ tmp_ext.cookie_plus = tmp_opt.cookie_plus; } else if (!tp->rx_opt.cookie_in_always) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 29213b51c49..bb95e8e1c6f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1090,8 +1090,8 @@ do_udp_sendmsg: memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(*opt); - err = datagram_send_ctl(sock_net(sk), msg, &fl6, opt, &hlimit, - &tclass, &dontfrag); + err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag); if (err < 0) { fl6_sock_release(flowlabel); return err; diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index d0b70dadf73..2615ffc8e78 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -40,9 +40,9 @@ extern int sysctl_slot_timeout; extern int sysctl_fast_poll_increase; extern char sysctl_devname[]; extern int sysctl_max_baud_rate; -extern int sysctl_min_tx_turn_time; -extern int sysctl_max_tx_data_size; -extern int sysctl_max_tx_window; +extern unsigned int sysctl_min_tx_turn_time; +extern unsigned int sysctl_max_tx_data_size; +extern unsigned int sysctl_max_tx_window; extern int sysctl_max_noreply_time; extern int sysctl_warn_noreply_time; extern int sysctl_lap_keepalive_time; diff --git a/net/irda/qos.c b/net/irda/qos.c index 1b51bcf4239..4369f7f41bc 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -60,7 +60,7 @@ int sysctl_max_noreply_time = 12; * Default is 10us which means using the unmodified value given by the * peer except if it's 0 (0 is likely a bug in the other stack). */ -unsigned sysctl_min_tx_turn_time = 10; +unsigned int sysctl_min_tx_turn_time = 10; /* * Maximum data size to be used in transmission in payload of LAP frame. * There is a bit of confusion in the IrDA spec : @@ -75,13 +75,13 @@ unsigned sysctl_min_tx_turn_time = 10; * bytes frames or all negotiated frame sizes, but you can use the sysctl * to play with this value anyway. * Jean II */ -unsigned sysctl_max_tx_data_size = 2042; +unsigned int sysctl_max_tx_data_size = 2042; /* * Maximum transmit window, i.e. number of LAP frames between turn-around. * This allow to override what the peer told us. Some peers are buggy and * don't always support what they tell us. * Jean II */ -unsigned sysctl_max_tx_window = 7; +unsigned int sysctl_max_tx_window = 7; static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get); static int irlap_param_link_disconnect(void *instance, irda_param_t *parm, diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 866f269183c..acb44230b25 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1012,7 +1012,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) cancel_work_sync(&local->reconfig_filter); ieee80211_clear_tx_pending(local); - sta_info_stop(local); rate_control_deinitialize(local); if (skb_queue_len(&local->skb_queue) || @@ -1024,6 +1023,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) destroy_workqueue(local->workqueue); wiphy_unregister(local->hw.wiphy); + sta_info_stop(local); ieee80211_wep_free(local); ieee80211_led_exit(local); kfree(local->int_scan_req); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 3db78b696c5..21070e9bc8d 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -665,7 +665,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) BUG_ON(!sdata->bss); atomic_dec(&sdata->bss->num_sta_ps); - __sta_info_clear_tim_bit(sdata->bss, sta); + sta_info_clear_tim_bit(sta); } local->num_sta--; diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 2fd4565144d..31d56b23b9e 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -364,6 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, break; case PPTP_WAN_ERROR_NOTIFY: + case PPTP_SET_LINK_INFO: case PPTP_ECHO_REQUEST: case PPTP_ECHO_REPLY: /* I don't have to explain these ;) */ diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 37bf94394be..8235b86b4e8 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -409,7 +409,7 @@ static void tcp_options(const struct sk_buff *skb, if (opsize < 2) /* "silly options" */ return; if (opsize > length) - break; /* don't parse partial options */ + return; /* don't parse partial options */ if (opcode == TCPOPT_SACK_PERM && opsize == TCPOLEN_SACK_PERM) @@ -447,7 +447,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, BUG_ON(ptr == NULL); /* Fast path for timestamp-only option */ - if (length == TCPOLEN_TSTAMP_ALIGNED*4 + if (length == TCPOLEN_TSTAMP_ALIGNED && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) @@ -469,7 +469,7 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff, if (opsize < 2) /* "silly options" */ return; if (opsize > length) - break; /* don't parse partial options */ + return; /* don't parse partial options */ if (opcode == TCPOPT_SACK && opsize >= (TCPOLEN_SACK_BASE diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 00bd475eab4..a80b0cb03f1 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -646,8 +646,8 @@ verdicthdr_get(const struct nlattr * const nfqa[]) return NULL; vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); - verdict = ntohl(vhdr->verdict); - if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) + verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK; + if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN) return NULL; return vhdr; } diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c index 76a083184d8..ed0db15ab00 100644 --- a/net/netfilter/xt_rateest.c +++ b/net/netfilter/xt_rateest.c @@ -78,7 +78,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) { struct xt_rateest_match_info *info = par->matchinfo; struct xt_rateest *est1, *est2; - int ret = false; + int ret = -EINVAL; if (hweight32(info->flags & (XT_RATEEST_MATCH_ABS | XT_RATEEST_MATCH_REL)) != 1) @@ -101,13 +101,12 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) if (!est1) goto err1; + est2 = NULL; if (info->flags & XT_RATEEST_MATCH_REL) { est2 = xt_rateest_lookup(info->name2); if (!est2) goto err2; - } else - est2 = NULL; - + } info->est1 = est1; info->est2 = est2; @@ -116,7 +115,7 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par) err2: xt_rateest_put(est1); err1: - return -EINVAL; + return ret; } static void xt_rateest_mt_destroy(const struct xt_mtdtor_param *par) diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 102fc212cd6..e051398fdf6 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; - dev_queue_xmit(skb2); - err = 0; + err = dev_queue_xmit(skb2); out: if (err) { diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index be4505ee67a..b01427924f8 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -425,7 +425,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, struct rsvp_filter *f, **fp; struct rsvp_session *s, **sp; struct tc_rsvp_pinfo *pinfo = NULL; - struct nlattr *opt = tca[TCA_OPTIONS-1]; + struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_RSVP_MAX + 1]; struct tcf_exts e; unsigned int h1, h2; @@ -439,7 +439,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &rsvp_ext_map); + err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map); if (err < 0) return err; @@ -449,8 +449,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, if (f->handle != handle && handle) goto errout2; - if (tb[TCA_RSVP_CLASSID-1]) { - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); + if (tb[TCA_RSVP_CLASSID]) { + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); tcf_bind_filter(tp, &f->res, base); } @@ -462,7 +462,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, err = -EINVAL; if (handle) goto errout2; - if (tb[TCA_RSVP_DST-1] == NULL) + if (tb[TCA_RSVP_DST] == NULL) goto errout2; err = -ENOBUFS; @@ -471,19 +471,19 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base, goto errout2; h2 = 16; - if (tb[TCA_RSVP_SRC-1]) { - memcpy(f->src, nla_data(tb[TCA_RSVP_SRC-1]), sizeof(f->src)); + if (tb[TCA_RSVP_SRC]) { + memcpy(f->src, nla_data(tb[TCA_RSVP_SRC]), sizeof(f->src)); h2 = hash_src(f->src); } - if (tb[TCA_RSVP_PINFO-1]) { - pinfo = nla_data(tb[TCA_RSVP_PINFO-1]); + if (tb[TCA_RSVP_PINFO]) { + pinfo = nla_data(tb[TCA_RSVP_PINFO]); f->spi = pinfo->spi; f->tunnelhdr = pinfo->tunnelhdr; } - if (tb[TCA_RSVP_CLASSID-1]) - f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID-1]); + if (tb[TCA_RSVP_CLASSID]) + f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]); - dst = nla_data(tb[TCA_RSVP_DST-1]); + dst = nla_data(tb[TCA_RSVP_DST]); h1 = hash_dst(dst, pinfo ? pinfo->protocol : 0, pinfo ? pinfo->tunnelid : 0); err = -ENOMEM; @@ -642,8 +642,7 @@ nla_put_failure: return -1; } -static struct tcf_proto_ops RSVP_OPS = { - .next = NULL, +static struct tcf_proto_ops RSVP_OPS __read_mostly = { .kind = RSVP_ID, .classify = rsvp_classify, .init = rsvp_init, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 167c880cf8d..76388b083f2 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1689,6 +1689,11 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, case SCTP_CMD_PURGE_ASCONF_QUEUE: sctp_asconf_queue_teardown(asoc); break; + + case SCTP_CMD_SET_ASOC: + asoc = cmd->obj.asoc; + break; + default: pr_warn("Impossible command: %u, %p\n", cmd->verb, cmd->obj.ptr); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 49b847b00f9..a0f31e6c1c6 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2047,6 +2047,12 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + /* Restore association pointer to provide SCTP command interpeter + * with a valid context in case it needs to manipulate + * the queues */ + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, + SCTP_ASOC((struct sctp_association *)asoc)); + return retval; nomem: diff --git a/net/socket.c b/net/socket.c index 24a77400b65..ffe92ca32f2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl: diff --git a/net/wireless/core.c b/net/wireless/core.c index 645437cfc46..c14865172da 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -616,6 +616,9 @@ int wiphy_register(struct wiphy *wiphy) if (res) goto out_rm_dev; + rtnl_lock(); + rdev->wiphy.registered = true; + rtnl_unlock(); return 0; out_rm_dev: @@ -647,6 +650,10 @@ void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + rtnl_lock(); + rdev->wiphy.registered = false; + rtnl_unlock(); + rfkill_unregister(rdev->rfkill); /* protect the device list */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 02751dbc5a9..68a471ba193 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -852,6 +852,7 @@ static void handle_channel(struct wiphy *wiphy, return; } + chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index b7b6ff8be55..dec0fa28372 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -118,6 +118,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) i++, j++) request->channels[i] = &wdev->wiphy->bands[band]->channels[j]; + request->rates[band] = + (1 << wdev->wiphy->bands[band]->n_bitrates) - 1; } } request->n_channels = n_channels; diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index c6e4ca6a7d2..ff574597a85 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -93,7 +93,8 @@ static int wiphy_suspend(struct device *dev, pm_message_t state) if (rdev->ops->suspend) { rtnl_lock(); - ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); + if (rdev->wiphy.registered) + ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); rtnl_unlock(); } @@ -112,7 +113,8 @@ static int wiphy_resume(struct device *dev) if (rdev->ops->resume) { rtnl_lock(); - ret = rdev->ops->resume(&rdev->wiphy); + if (rdev->wiphy.registered) + ret = rdev->ops->resume(&rdev->wiphy); rtnl_unlock(); } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index a026b0ef244..54a0dc2e2f8 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -212,6 +212,11 @@ resume: /* only the first xfrm gets the encap type */ encap_type = 0; + if (async && x->repl->check(x, skb, seq)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); + goto drop_unlock; + } + x->repl->advance(x, seq); x->curlft.bytes += skb->len; |