diff options
Diffstat (limited to 'net')
32 files changed, 273 insertions, 143 deletions
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index f597987b242..f288fc4aef9 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -36,6 +36,7 @@ static struct ctl_path ax25_path[] = { { .procname = "ax25", .ctl_name = NET_AX25, }, { } }; + static const ctl_table ax25_param_table[] = { { .ctl_name = NET_AX25_IP_DEFAULT_MODE, @@ -167,6 +168,7 @@ static const ctl_table ax25_param_table[] = { .extra1 = &min_proto, .extra2 = &max_proto }, +#ifdef CONFIG_AX25_DAMA_SLAVE { .ctl_name = NET_AX25_DAMA_SLAVE_TIMEOUT, .procname = "dama_slave_timeout", @@ -177,6 +179,8 @@ static const ctl_table ax25_param_table[] = { .extra1 = &min_ds_timeout, .extra2 = &max_ds_timeout }, +#endif + { .ctl_name = 0 } /* that's all, folks! */ }; @@ -210,16 +214,6 @@ void ax25_register_sysctl(void) ax25_table[n].procname = ax25_dev->dev->name; ax25_table[n].mode = 0555; -#ifndef CONFIG_AX25_DAMA_SLAVE - /* - * We do not wish to have a representation of this parameter - * in /proc/sys/ when configured *not* to include the - * AX.25 DAMA slave code, do we? - */ - - child[AX25_VALUES_DS_TIMEOUT].procname = NULL; -#endif - child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */ for (k = 0; k < AX25_MAX_VALUES; k++) diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 6e280a8a31e..6a9a6cd74b1 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -113,7 +113,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) struct rtable *rt = &br->fake_rtable; atomic_set(&rt->u.dst.__refcnt, 1); - rt->u.dst.dev = &br->dev; + rt->u.dst.dev = br->dev; rt->u.dst.path = &rt->u.dst; rt->u.dst.metrics[RTAX_MTU - 1] = 1500; rt->u.dst.flags = DST_NOXFRM; diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 921bbe5cb94..6e63ec3f1fc 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -368,14 +368,25 @@ static void br_make_blocking(struct net_bridge_port *p) /* called under bridge lock */ static void br_make_forwarding(struct net_bridge_port *p) { - if (p->state == BR_STATE_BLOCKING) { - if (p->br->stp_enabled == BR_KERNEL_STP) - p->state = BR_STATE_LISTENING; - else - p->state = BR_STATE_LEARNING; + struct net_bridge *br = p->br; - br_log_state(p); - mod_timer(&p->forward_delay_timer, jiffies + p->br->forward_delay); } + if (p->state != BR_STATE_BLOCKING) + return; + + if (br->forward_delay == 0) { + p->state = BR_STATE_FORWARDING; + br_topology_change_detection(br); + del_timer(&p->forward_delay_timer); + } + else if (p->br->stp_enabled == BR_KERNEL_STP) + p->state = BR_STATE_LISTENING; + else + p->state = BR_STATE_LEARNING; + + br_log_state(p); + + if (br->forward_delay != 0) + mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } /* called under bridge lock */ diff --git a/net/core/dev.c b/net/core/dev.c index 69320a56a08..01993ad74e7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1796,7 +1796,7 @@ gso: skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); #endif if (q->enqueue) { - spinlock_t *root_lock = qdisc_root_lock(q); + spinlock_t *root_lock = qdisc_lock(q); spin_lock(root_lock); @@ -1805,7 +1805,6 @@ gso: spin_unlock(root_lock); - rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc; goto out; } @@ -1909,7 +1908,6 @@ int netif_rx(struct sk_buff *skb) if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: - dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -1995,7 +1993,7 @@ static void net_tx_action(struct softirq_action *h) smp_mb__before_clear_bit(); clear_bit(__QDISC_STATE_SCHED, &q->state); - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { qdisc_run(q); spin_unlock(root_lock); @@ -2270,6 +2268,20 @@ out: return ret; } +/* Network device is going away, flush any packets still pending */ +static void flush_backlog(void *arg) +{ + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) + if (skb->dev == dev) { + __skb_unlink(skb, &queue->input_pkt_queue); + kfree_skb(skb); + } +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; @@ -2279,7 +2291,6 @@ static int process_backlog(struct napi_struct *napi, int quota) napi->weight = weight_p; do { struct sk_buff *skb; - struct net_device *dev; local_irq_disable(); skb = __skb_dequeue(&queue->input_pkt_queue); @@ -2288,14 +2299,9 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); break; } - local_irq_enable(); - dev = skb->dev; - netif_receive_skb(skb); - - dev_put(dev); } while (++work < quota && jiffies == start_time); return work; @@ -3988,6 +3994,10 @@ int register_netdevice(struct net_device *dev) } } + /* Enable software GSO if SG is supported. */ + if (dev->features & NETIF_F_SG) + dev->features |= NETIF_F_GSO; + netdev_initialize_kobject(dev); ret = netdev_register_kobject(dev); if (ret) @@ -4165,6 +4175,8 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; + on_each_cpu(flush_backlog, dev, 1); + netdev_wait_allrefs(dev); /* paranoia */ diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f62c8af85d3..9d92e41826e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos) struct neighbour *n = neigh_get_first(seq); if (n) { + --(*pos); while (*pos) { n = neigh_get_next(seq, n, pos); if (!n) @@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos) struct pneigh_entry *pn = pneigh_get_first(seq); if (pn) { + --(*pos); while (*pos) { pn = pneigh_get_next(seq, pn, pos); if (!pn) @@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos) { struct neigh_seq_state *state = seq->private; void *rc; + loff_t idxpos = *pos; - rc = neigh_get_idx(seq, pos); + rc = neigh_get_idx(seq, &idxpos); if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) - rc = pneigh_get_idx(seq, pos); + rc = pneigh_get_idx(seq, &idxpos); return rc; } @@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl __acquires(tbl->lock) { struct neigh_seq_state *state = seq->private; - loff_t pos_minus_one; state->tbl = tbl; state->bucket = 0; @@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl read_lock_bh(&tbl->lock); - pos_minus_one = *pos - 1; - return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN; + return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN; } EXPORT_SYMBOL(neigh_seq_start); @@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos) void *rc; if (v == SEQ_START_TOKEN) { - rc = neigh_get_idx(seq, pos); + rc = neigh_get_first(seq); goto out; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 3284605f2ec..2498cdaf8cb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2085,15 +2085,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { /* reset time */ pkt_dev->flows[flow].count = 0; + pkt_dev->flows[flow].flags = 0; pkt_dev->curfl += 1; if (pkt_dev->curfl >= pkt_dev->cflows) pkt_dev->curfl = 0; /*reset */ } } else { flow = random32() % pkt_dev->cflows; + pkt_dev->curfl = flow; - if (pkt_dev->flows[flow].count > pkt_dev->lflow) + if (pkt_dev->flows[flow].count > pkt_dev->lflow) { pkt_dev->flows[flow].count = 0; + pkt_dev->flows[flow].flags = 0; + } } return pkt_dev->curfl; @@ -2162,7 +2166,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) mc = random32() % pkt_dev->src_mac_count; else { mc = pkt_dev->cur_src_mac_offset++; - if (pkt_dev->cur_src_mac_offset > + if (pkt_dev->cur_src_mac_offset >= pkt_dev->src_mac_count) pkt_dev->cur_src_mac_offset = 0; } @@ -2189,7 +2193,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) else { mc = pkt_dev->cur_dst_mac_offset++; - if (pkt_dev->cur_dst_mac_offset > + if (pkt_dev->cur_dst_mac_offset >= pkt_dev->dst_mac_count) { pkt_dev->cur_dst_mac_offset = 0; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 770d827f5ab..e0689fd7b79 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -232,6 +232,7 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = &ipv4_doint_and_flush, .strategy = &ipv4_doint_and_flush_strategy, + .extra2 = &init_net, }, { .ctl_name = NET_IPV4_NO_PMTU_DISC, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index a027003d69a..a4402de425d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -269,7 +269,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, skb->mark = sk->sk_mark; mtu = dst_mtu(dst); - if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { + if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_OUTREQUESTS); return NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ea33b26512c..741cfcd96f8 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -346,6 +346,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, */ if (optlen == 0) optval = NULL; + else if (optval == NULL) + goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index a46badd1082..ec394cf5a19 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -199,10 +199,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq6 = inet6_rsk(req); treq = tcp_rsk(req); - if (security_inet_conn_request(sk, skb, req)) { - reqsk_free(req); - goto out; - } + if (security_inet_conn_request(sk, skb, req)) + goto out_free; req->mss = mss; ireq->rmt_port = th->source; @@ -255,14 +253,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) fl.fl_ip_dport = inet_rsk(req)->rmt_port; fl.fl_ip_sport = inet_sk(sk)->sport; security_req_classify_flow(req, &fl); - if (ip6_dst_lookup(sk, &dst, &fl)) { - reqsk_free(req); - goto out; - } + if (ip6_dst_lookup(sk, &dst, &fl)) + goto out_free; + if (final_p) ipv6_addr_copy(&fl.fl6_dst, final_p); if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0) - goto out; + goto out_free; } req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); @@ -273,7 +270,10 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, dst); - -out: return ret; +out: + return ret; +out_free: + reqsk_free(req); + return NULL; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a4f9a832722..ec59345af65 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -82,6 +82,7 @@ struct ieee80211_sta_bss { u8 bssid[ETH_ALEN]; u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 dtim_period; u16 capability; /* host byte order */ enum ieee80211_band band; int freq; @@ -586,6 +587,7 @@ struct ieee80211_local { struct timer_list sta_cleanup; unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; + unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES]; struct tasklet_struct tx_pending_tasklet; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index a4c5b90de76..0c02c471bca 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1689,6 +1689,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (local->hw.conf.beacon_int < 10) local->hw.conf.beacon_int = 100; + if (local->hw.max_listen_interval == 0) + local->hw.max_listen_interval = 1; + + local->hw.conf.listen_interval = local->hw.max_listen_interval; + local->wstats_flags |= local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC | IEEE80211_HW_SIGNAL_DB | IEEE80211_HW_SIGNAL_DBM) ? diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index acb04133a95..e1d11c9b672 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -551,6 +551,7 @@ static void ieee80211_set_associated(struct net_device *dev, /* set timing information */ sdata->bss_conf.beacon_int = bss->beacon_int; sdata->bss_conf.timestamp = bss->timestamp; + sdata->bss_conf.dtim_period = bss->dtim_period; changed |= ieee80211_handle_bss_capability(sdata, bss); @@ -773,7 +774,8 @@ static void ieee80211_send_assoc(struct net_device *dev, mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_REASSOC_REQ); mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); - mgmt->u.reassoc_req.listen_interval = cpu_to_le16(1); + mgmt->u.reassoc_req.listen_interval = + cpu_to_le16(local->hw.conf.listen_interval); memcpy(mgmt->u.reassoc_req.current_ap, ifsta->prev_bssid, ETH_ALEN); } else { @@ -781,7 +783,8 @@ static void ieee80211_send_assoc(struct net_device *dev, mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, IEEE80211_STYPE_ASSOC_REQ); mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); - mgmt->u.assoc_req.listen_interval = cpu_to_le16(1); + mgmt->u.reassoc_req.listen_interval = + cpu_to_le16(local->hw.conf.listen_interval); } /* SSID */ @@ -2688,6 +2691,16 @@ static void ieee80211_rx_bss_info(struct net_device *dev, bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); + if (elems->tim) { + struct ieee80211_tim_ie *tim_ie = + (struct ieee80211_tim_ie *)elems->tim; + bss->dtim_period = tim_ie->dtim_period; + } + + /* set default value for buggy APs */ + if (!elems->tim || bss->dtim_period == 0) + bss->dtim_period = 1; + bss->supp_rates_len = 0; if (elems->supp_rates) { clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len; @@ -3650,11 +3663,21 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, "%s\n", print_mac(mac, bssid), print_mac(mac2, ifsta->bssid)); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ - if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0 && - (bss = ieee80211_rx_bss_get(dev, bssid, - local->hw.conf.channel->center_freq, - ifsta->ssid, ifsta->ssid_len))) { + + if (found && memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { int ret; + int search_freq; + + if (ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) + search_freq = bss->freq; + else + search_freq = local->hw.conf.channel->center_freq; + + bss = ieee80211_rx_bss_get(dev, bssid, search_freq, + ifsta->ssid, ifsta->ssid_len); + if (!bss) + goto dont_join; + printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" " based on configured SSID\n", dev->name, print_mac(mac, bssid)); @@ -3662,6 +3685,8 @@ static int ieee80211_sta_find_ibss(struct net_device *dev, ieee80211_rx_bss_put(local, bss); return ret; } + +dont_join: #ifdef CONFIG_MAC80211_IBSS_DEBUG printk(KERN_DEBUG " did not try to join ibss\n"); #endif /* CONFIG_MAC80211_IBSS_DEBUG */ @@ -3895,7 +3920,7 @@ done: if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { struct ieee80211_if_sta *ifsta = &sdata->u.sta; if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) || - (!ifsta->state == IEEE80211_IBSS_JOINED && + (!(ifsta->state == IEEE80211_IBSS_JOINED) && !ieee80211_sta_active_ibss(dev))) ieee80211_sta_find_ibss(dev, ifsta); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 69019e94387..771ec68b848 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1060,13 +1060,14 @@ static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_tx_data *tx) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info; int ret, i; - if (netif_subqueue_stopped(local->mdev, skb)) - return IEEE80211_TX_AGAIN; - if (skb) { + if (netif_subqueue_stopped(local->mdev, skb)) + return IEEE80211_TX_AGAIN; + info = IEEE80211_SKB_CB(skb); + ieee80211_dump_frame(wiphy_name(local->hw.wiphy), "TX to low-level driver", skb); ret = local->ops->tx(local_to_hw(local), skb); @@ -1215,6 +1216,7 @@ retry: if (ret == IEEE80211_TX_FRAG_AGAIN) skb = NULL; + set_bit(queue, local->queues_pending); smp_mb(); /* @@ -1708,14 +1710,19 @@ void ieee80211_tx_pending(unsigned long data) netif_tx_lock_bh(dev); for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { /* Check that this queue is ok */ - if (__netif_subqueue_stopped(local->mdev, i)) + if (__netif_subqueue_stopped(local->mdev, i) && + !test_bit(i, local->queues_pending_run)) continue; if (!test_bit(i, local->queues_pending)) { + clear_bit(i, local->queues_pending_run); ieee80211_wake_queue(&local->hw, i); continue; } + clear_bit(i, local->queues_pending_run); + netif_start_subqueue(local->mdev, i); + store = &local->pending_packet[i]; tx.extra_frag = store->extra_frag; tx.num_extra_frag = store->num_extra_frag; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 19f85e1b369..0d463c80c40 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -361,6 +361,7 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) struct ieee80211_local *local = hw_to_local(hw); if (test_bit(queue, local->queues_pending)) { + set_bit(queue, local->queues_pending_run); tasklet_schedule(&local->tx_pending_tasklet); } else { netif_wake_subqueue(local->mdev, queue); diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 28437f0001d..4310e2f6566 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -241,12 +241,14 @@ void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local, } else { struct netdev_queue *txq; spinlock_t *root_lock; + struct Qdisc *q; txq = netdev_get_tx_queue(local->mdev, agg_queue); - root_lock = qdisc_root_lock(txq->qdisc); + q = rcu_dereference(txq->qdisc); + root_lock = qdisc_lock(q); spin_lock_bh(root_lock); - qdisc_reset(txq->qdisc); + qdisc_reset(q); spin_unlock_bh(root_lock); } } diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index 8aa82273014..e5b69556bb5 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c @@ -109,6 +109,25 @@ static DEFINE_RFKILL_TASK(rfkill_uwb, RFKILL_TYPE_UWB); static DEFINE_RFKILL_TASK(rfkill_wimax, RFKILL_TYPE_WIMAX); static DEFINE_RFKILL_TASK(rfkill_wwan, RFKILL_TYPE_WWAN); +static void rfkill_schedule_evsw_rfkillall(int state) +{ + /* EVERY radio type. state != 0 means radios ON */ + /* handle EPO (emergency power off) through shortcut */ + if (state) { + rfkill_schedule_set(&rfkill_wwan, + RFKILL_STATE_UNBLOCKED); + rfkill_schedule_set(&rfkill_wimax, + RFKILL_STATE_UNBLOCKED); + rfkill_schedule_set(&rfkill_uwb, + RFKILL_STATE_UNBLOCKED); + rfkill_schedule_set(&rfkill_bt, + RFKILL_STATE_UNBLOCKED); + rfkill_schedule_set(&rfkill_wlan, + RFKILL_STATE_UNBLOCKED); + } else + rfkill_schedule_epo(); +} + static void rfkill_event(struct input_handle *handle, unsigned int type, unsigned int code, int data) { @@ -132,21 +151,7 @@ static void rfkill_event(struct input_handle *handle, unsigned int type, } else if (type == EV_SW) { switch (code) { case SW_RFKILL_ALL: - /* EVERY radio type. data != 0 means radios ON */ - /* handle EPO (emergency power off) through shortcut */ - if (data) { - rfkill_schedule_set(&rfkill_wwan, - RFKILL_STATE_UNBLOCKED); - rfkill_schedule_set(&rfkill_wimax, - RFKILL_STATE_UNBLOCKED); - rfkill_schedule_set(&rfkill_uwb, - RFKILL_STATE_UNBLOCKED); - rfkill_schedule_set(&rfkill_bt, - RFKILL_STATE_UNBLOCKED); - rfkill_schedule_set(&rfkill_wlan, - RFKILL_STATE_UNBLOCKED); - } else - rfkill_schedule_epo(); + rfkill_schedule_evsw_rfkillall(data); break; default: break; @@ -168,6 +173,7 @@ static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, handle->handler = handler; handle->name = "rfkill"; + /* causes rfkill_start() to be called */ error = input_register_handle(handle); if (error) goto err_free_handle; @@ -185,6 +191,23 @@ static int rfkill_connect(struct input_handler *handler, struct input_dev *dev, return error; } +static void rfkill_start(struct input_handle *handle) +{ + /* Take event_lock to guard against configuration changes, we + * should be able to deal with concurrency with rfkill_event() + * just fine (which event_lock will also avoid). */ + spin_lock_irq(&handle->dev->event_lock); + + if (test_bit(EV_SW, handle->dev->evbit)) { + if (test_bit(SW_RFKILL_ALL, handle->dev->swbit)) + rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL, + handle->dev->sw)); + /* add resync for further EV_SW events here */ + } + + spin_unlock_irq(&handle->dev->event_lock); +} + static void rfkill_disconnect(struct input_handle *handle) { input_close_device(handle); @@ -225,6 +248,7 @@ static struct input_handler rfkill_handler = { .event = rfkill_event, .connect = rfkill_connect, .disconnect = rfkill_disconnect, + .start = rfkill_start, .name = "rfkill", .id_table = rfkill_ids, }; diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index c6f2f388cb7..d2d45655cd1 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -105,6 +105,16 @@ static void rfkill_led_trigger(struct rfkill *rfkill, #endif /* CONFIG_RFKILL_LEDS */ } +#ifdef CONFIG_RFKILL_LEDS +static void rfkill_led_trigger_activate(struct led_classdev *led) +{ + struct rfkill *rfkill = container_of(led->trigger, + struct rfkill, led_trigger); + + rfkill_led_trigger(rfkill, rfkill->state); +} +#endif /* CONFIG_RFKILL_LEDS */ + static void notify_rfkill_state_change(struct rfkill *rfkill) { blocking_notifier_call_chain(&rfkill_notifier_list, @@ -589,7 +599,10 @@ static void rfkill_led_trigger_register(struct rfkill *rfkill) #ifdef CONFIG_RFKILL_LEDS int error; - rfkill->led_trigger.name = rfkill->dev.bus_id; + if (!rfkill->led_trigger.name) + rfkill->led_trigger.name = rfkill->dev.bus_id; + if (!rfkill->led_trigger.activate) + rfkill->led_trigger.activate = rfkill_led_trigger_activate; error = led_trigger_register(&rfkill->led_trigger); if (error) rfkill->led_trigger.name = NULL; diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 6b517b9dac5..43d37256c15 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) case TC_ACT_QUEUED: case TC_ACT_STOLEN: kfree_skb(skb); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: kfree_skb(skb); goto drop; @@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ret = qdisc_enqueue(skb, flow->q); if (ret != 0) { drop: __maybe_unused - sch->qstats.drops++; - if (flow) - flow->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + if (flow) + flow->qstats.drops++; + } return ret; } sch->bstats.bytes += qdisc_pkt_len(skb); @@ -455,7 +457,7 @@ drop: __maybe_unused return 0; } tasklet_schedule(&p->task); - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } /* @@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch) if (!ret) { sch->q.qlen++; sch->qstats.requeues++; - } else { + } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; p->link.qstats.drops++; } diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 14954bf4a68..4e261ce62f4 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) (cl = cbq_class_lookup(q, prio)) != NULL) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; for (;;) { int result = 0; defmap = head->defaults; @@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; case TC_ACT_RECLASSIFY: @@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->rx_class = cl; #endif if (cl == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return ret; } - sch->qstats.drops++; - cbq_mark_toplevel(q, cl); - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cbq_mark_toplevel(q, cl); + cl->qstats.drops++; + } return ret; } @@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) cbq_activate_class(cl); return 0; } - sch->qstats.drops++; - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return ret; } @@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) q->rx_class = NULL; if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { + int ret; cbq_mark_toplevel(q, cl); q->rx_class = cl; cl->q->__parent = sch; - if (qdisc_enqueue(skb, cl->q) == 0) { + ret = qdisc_enqueue(skb, cl->q); + if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; sch->bstats.packets++; sch->bstats.bytes += qdisc_pkt_len(skb); @@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) cbq_activate_class(cl); return 0; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return 0; } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index a935676987e..edd1298f85f 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) case TC_ACT_QUEUED: case TC_ACT_STOLEN: kfree_skb(skb); - return NET_XMIT_SUCCESS; + return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: goto drop; @@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = qdisc_enqueue(skb, p->q); if (err != NET_XMIT_SUCCESS) { - sch->qstats.drops++; + if (net_xmit_drop_count(err)) + sch->qstats.drops++; return err; } @@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) drop: kfree_skb(skb); sch->qstats.drops++; - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) @@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch) err = p->q->ops->requeue(skb, p->q); if (err != NET_XMIT_SUCCESS) { - sch->qstats.drops++; + if (net_xmit_drop_count(err)) + sch->qstats.drops++; return err; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 9c9cd4d9489..7cf83b37459 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -29,7 +29,7 @@ /* Main transmission queue. */ /* Modifications to data participating in scheduling must be protected with - * qdisc_root_lock(qdisc) spinlock. + * qdisc_lock(qdisc) spinlock. * * The idea is the following: * - enqueue, dequeue are serialized via qdisc root lock @@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q) if (unlikely((skb = dequeue_skb(q)) == NULL)) return 0; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); @@ -507,7 +507,7 @@ errout: } EXPORT_SYMBOL(qdisc_create_dflt); -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_reset(struct Qdisc *qdisc) { @@ -543,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head) kfree((char *) qdisc - qdisc->padded); } -/* Under qdisc_root_lock(qdisc) and BH! */ +/* Under qdisc_lock(qdisc) and BH! */ void qdisc_destroy(struct Qdisc *qdisc) { @@ -659,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock) dev_queue = netdev_get_tx_queue(dev, i); q = dev_queue->qdisc; - root_lock = qdisc_root_lock(q); + root_lock = qdisc_lock(q); if (lock) spin_lock_bh(root_lock); @@ -735,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - spinlock_t *root_lock = qdisc_root_lock(qdisc); + spinlock_t *root_lock = qdisc_lock(qdisc); dev_queue->qdisc = qdisc_default; dev_queue->qdisc_sleeping = qdisc_default; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 0ae7d19dcba..c2b8d9cce3d 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) if (cl->level == 0) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; tcf = q->root.filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { - if (err == NET_XMIT_BYPASS) + if (err & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return err; @@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) err = qdisc_enqueue(skb, cl->qdisc); if (unlikely(err != NET_XMIT_SUCCESS)) { - cl->qstats.drops++; - sch->qstats.drops++; + if (net_xmit_drop_count(err)) { + cl->qstats.drops++; + sch->qstats.drops++; + } return err; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 75a40951c4f..be35422711a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) return cl; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; tcf = q->filter_list; while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; #endif - } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { - sch->qstats.drops++; - cl->qstats.drops++; + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return NET_XMIT_DROP; } else { cl->bstats.packets += @@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; #endif - } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != + } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { - sch->qstats.drops++; - cl->qstats.drops++; + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + cl->qstats.drops++; + } return NET_XMIT_DROP; } else htb_activate(q, cl); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index a5908570067..fb0294d0b55 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (count == 0) { sch->qstats.drops++; kfree_skb(skb); - return NET_XMIT_BYPASS; + return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; } skb_orphan(skb); @@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->q.qlen++; sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; - } else + } else if (net_xmit_drop_count(ret)) { sch->qstats.drops++; + } pr_debug("netem: enqueue ret %d\n", ret); return ret; diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index f849243eb09..eac197610ed 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct tcf_result res; int err; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } @@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) sch->q.qlen++; return NET_XMIT_SUCCESS; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return ret; } @@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) qdisc = prio_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch) sch->qstats.requeues++; return 0; } - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return NET_XMIT_DROP; } diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 3f2d1d7f3bb..5da05839e22 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.packets++; sch->q.qlen++; - } else { + } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; sch->qstats.drops++; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8589da66656..6e041d10dbd 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, if (!q->filter_list) return sfq_hash(q, skb) + 1; - *qerr = NET_XMIT_BYPASS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: - *qerr = NET_XMIT_SUCCESS; + *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return 0; } @@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) hash = sfq_classify(skb, sch, &ret); if (hash == 0) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; @@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) hash = sfq_classify(skb, sch, &ret); if (hash == 0) { - if (ret == NET_XMIT_BYPASS) + if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index b296672f763..7d3b7ff3bf0 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ret = qdisc_enqueue(skb, q->qdisc); if (ret != 0) { - sch->qstats.drops++; + if (net_xmit_drop_count(ret)) + sch->qstats.drops++; return ret; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index a238d6834b3..483a01d0740 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -195,8 +195,7 @@ out: } /* Based on tcp_v6_xmit() in tcp_ipv6.c. */ -static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, - int ipfragok) +static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) { struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); @@ -231,7 +230,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); - return ip6_xmit(sk, skb, &fl, np->opt, ipfragok); + if (!(transport->param_flags & SPP_PMTUD_ENABLE)) + skb->local_df = 1; + + return ip6_xmit(sk, skb, &fl, np->opt, 0); } /* Returns the dst cache entry for the given source and destination ip diff --git a/net/sctp/output.c b/net/sctp/output.c index 45684646b1d..0dc4a7dfb23 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -586,10 +586,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", nskb->len); - if (tp->param_flags & SPP_PMTUD_ENABLE) - (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); - else - (*tp->af_specific->sctp_xmit)(nskb, tp, 1); + nskb->local_df = packet->ipfragok; + (*tp->af_specific->sctp_xmit)(nskb, tp); out: packet->size = packet->overhead; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index a6e0818bcff..0b65354aaf6 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -862,16 +862,21 @@ static int sctp_inet_supported_addrs(const struct sctp_sock *opt, /* Wrapper routine that calls the ip transmit routine. */ static inline int sctp_v4_xmit(struct sk_buff *skb, - struct sctp_transport *transport, int ipfragok) + struct sctp_transport *transport) { + struct inet_sock *inet = inet_sk(skb->sk); + SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", __func__, skb, skb->len, NIPQUAD(skb->rtable->rt_src), NIPQUAD(skb->rtable->rt_dst)); + inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? + IP_PMTUDISC_DO : IP_PMTUDISC_DONT; + SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); - return ip_queue_xmit(skb, ipfragok); + return ip_queue_xmit(skb, 0); } static struct sctp_af sctp_af_inet; |