From 9446f3efc53512e5ad9e0966539021a2a41fe5a0 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 17 Jan 2012 10:32:01 +0100 Subject: mac80211: fix debugfs key->station symlink Since stations moved into a virtual interface subdirectory, this link has been broken. Fix it. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/debugfs_key.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 38e6101190d..59edcd95a58 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -225,9 +225,9 @@ KEY_OPS(key); key, &key_##name##_ops); void ieee80211_debugfs_key_add(struct ieee80211_key *key) - { +{ static int keycount; - char buf[50]; + char buf[100]; struct sta_info *sta; if (!key->local->debugfs.keys) @@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key) sta = key->sta; if (sta) { - sprintf(buf, "../../stations/%pM", sta->sta.addr); + sprintf(buf, "../../netdev:%s/stations/%pM", + sta->sdata->name, sta->sta.addr); key->debugfs.stalink = debugfs_create_symlink("station", key->debugfs.dir, buf); } -- cgit v1.2.3-70-g09d2 From 65e8b0ccb6cf176f8eddb1b05534be46580da9dd Mon Sep 17 00:00:00 2001 From: Javier Cardona Date: Tue, 17 Jan 2012 18:17:46 -0800 Subject: mac80211: Use the right headroom size for mesh mgmt frames Use local->tx_headroom instad of local->hw.extra_tx_headroom. local->tx_headroom is the max of hw.extra_tx_headroom required by the driver and the headroom required by mac80211 for status reporting. On drivers where hw.extra_tx_headroom is smaller than what mac80211 requires (e.g. ath5k), we would not reserve sufficient buffer space to report tx status. Also, don't reserve local->tx_headroom + local->hw.extra_tx_headroom. Reported-by: Simon Morgenthaler Reported-by: Kai Scharwies Signed-off-by: Javier Cardona Signed-off-by: John W. Linville --- net/mac80211/mesh_hwmp.c | 8 ++++---- net/mac80211/mesh_plink.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 73abb7524b2..54df1b2bafd 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + sizeof(mgmt->u.action.u.mesh_action); - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + 37); /* max HWMP IE */ if (!skb) return -1; - skb_reserve(skb, local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | @@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, if (time_before(jiffies, ifmsh->next_perr)) return -EAGAIN; - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + 15 /* PERR IE */); if (!skb) return -1; - skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 41ef1b47644..a17251730b9 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + sizeof(mgmt->u.action.u.self_prot); - skb = dev_alloc_skb(local->hw.extra_tx_headroom + + skb = dev_alloc_skb(local->tx_headroom + hdr_len + 2 + /* capability info */ 2 + /* AID */ @@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, sdata->u.mesh.ie_len); if (!skb) return -1; - skb_reserve(skb, local->hw.extra_tx_headroom); + skb_reserve(skb, local->tx_headroom); mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | -- cgit v1.2.3-70-g09d2 From bc4934bc61d0a11fd62c5187ff83645628f8be8b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 18 Jan 2012 14:10:25 +0100 Subject: mac80211: fix work removal on deauth request When deauth is requested while an auth or assoc work item is in progress, we currently delete it without regard for any state it might need to clean up. Fix it by cleaning up for those items. In the case Pontus found, the problem manifested itself as such: authenticate with 00:23:69:aa:dd:7b (try 1) authenticated failed to insert Dummy STA entry for the AP (error -17) deauthenticating from 00:23:69:aa:dd:7b by local choice (reason=2) It could also happen differently if the driver uses the tx_sync callback. We can't just call the ->done() method of the work items because that will lock up due to the locking in cfg80211. This fix isn't very clean, but that seems acceptable since I have patches pending to remove this code completely. Cc: stable@vger.kernel.org Reported-by: Pontus Fuchs Tested-by: Pontus Fuchs Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/mac80211/mlme.c | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ecb4c84c1bb..295be92f7c7 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_work *wk; u8 bssid[ETH_ALEN]; bool assoc_bss = false; @@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, assoc_bss = true; } else { bool not_auth_yet = false; + struct ieee80211_work *tmp, *wk = NULL; mutex_unlock(&ifmgd->mtx); mutex_lock(&local->mtx); - list_for_each_entry(wk, &local->work_list, list) { - if (wk->sdata != sdata) + list_for_each_entry(tmp, &local->work_list, list) { + if (tmp->sdata != sdata) continue; - if (wk->type != IEEE80211_WORK_DIRECT_PROBE && - wk->type != IEEE80211_WORK_AUTH && - wk->type != IEEE80211_WORK_ASSOC && - wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) + if (tmp->type != IEEE80211_WORK_DIRECT_PROBE && + tmp->type != IEEE80211_WORK_AUTH && + tmp->type != IEEE80211_WORK_ASSOC && + tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT) continue; - if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) + if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN)) continue; - not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE; - list_del_rcu(&wk->list); - free_work(wk); + not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE; + list_del_rcu(&tmp->list); + synchronize_rcu(); + wk = tmp; break; } mutex_unlock(&local->mtx); + if (wk && wk->type == IEEE80211_WORK_ASSOC) { + /* clean up dummy sta & TX sync */ + sta_info_destroy_addr(wk->sdata, wk->filter_ta); + if (wk->assoc.synced) + drv_finish_tx_sync(local, wk->sdata, + wk->filter_ta, + IEEE80211_TX_SYNC_ASSOC); + } else if (wk && wk->type == IEEE80211_WORK_AUTH) { + if (wk->probe_auth.synced) + drv_finish_tx_sync(local, wk->sdata, + wk->filter_ta, + IEEE80211_TX_SYNC_AUTH); + } + kfree(wk); + /* * If somebody requests authentication and we haven't * sent out an auth frame yet there's no need to send -- cgit v1.2.3-70-g09d2 From 013d97e9da1877f1334aa8ff3a19921ebbfe99b5 Mon Sep 17 00:00:00 2001 From: Francesco Ruggeri Date: Mon, 16 Jan 2012 10:40:10 +0000 Subject: net: race condition in ipv6 forwarding and disable_ipv6 parameters There is a race condition in addrconf_sysctl_forward() and addrconf_sysctl_disable(). These functions change idev->cnf.forwarding (resp. idev->cnf.disable_ipv6) and then try to grab the rtnl lock before performing any actions. If that fails they restore the original value and restart the syscall. This creates race conditions if ipv6 code tries to access these parameters, or if multiple instances try to do the same operation. As an example of the former, if __ipv6_ifa_notify() finds a 0 in idev->cnf.forwarding when invoked by addrconf_ifdown() it may not free anycast addresses, ultimately resulting in the net_device not being freed. This patch reads the user parameters into a temporary location and only writes the actual parameters when the rtnl lock is acquired. Tested in 2.6.38.8. Signed-off-by: Francesco Ruggeri Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 61 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index a225d5ee3c2..c02280a4d12 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf) rcu_read_unlock(); } -static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old) +static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) { struct net *net; + int old; + + if (!rtnl_trylock()) + return restart_syscall(); net = (struct net *)table->extra2; - if (p == &net->ipv6.devconf_dflt->forwarding) - return 0; + old = *p; + *p = newf; - if (!rtnl_trylock()) { - /* Restore the original values before restarting */ - *p = old; - return restart_syscall(); + if (p == &net->ipv6.devconf_dflt->forwarding) { + rtnl_unlock(); + return 0; } if (p == &net->ipv6.devconf_all->forwarding) { - __s32 newf = net->ipv6.devconf_all->forwarding; net->ipv6.devconf_dflt->forwarding = newf; addrconf_forward_change(net, newf); - } else if ((!*p) ^ (!old)) + } else if ((!newf) ^ (!old)) dev_forward_change((struct inet6_dev *)table->extra1); rtnl_unlock(); - if (*p) + if (newf) rt6_purge_dflt_routers(net); return 1; } @@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write, int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; + ctl_table lctl; int ret; - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + /* + * ctl->data points to idev->cnf.forwarding, we should + * not modify it until we get the rtnl lock. + */ + lctl = *ctl; + lctl.data = &val; + + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_fixup_forwarding(ctl, valp, val); @@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf) rcu_read_unlock(); } -static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old) +static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) { struct net *net; + int old; + + if (!rtnl_trylock()) + return restart_syscall(); net = (struct net *)table->extra2; + old = *p; + *p = newf; - if (p == &net->ipv6.devconf_dflt->disable_ipv6) + if (p == &net->ipv6.devconf_dflt->disable_ipv6) { + rtnl_unlock(); return 0; - - if (!rtnl_trylock()) { - /* Restore the original values before restarting */ - *p = old; - return restart_syscall(); } if (p == &net->ipv6.devconf_all->disable_ipv6) { - __s32 newf = net->ipv6.devconf_all->disable_ipv6; net->ipv6.devconf_dflt->disable_ipv6 = newf; addrconf_disable_change(net, newf); - } else if ((!*p) ^ (!old)) + } else if ((!newf) ^ (!old)) dev_disable_change((struct inet6_dev *)table->extra1); rtnl_unlock(); @@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write, int *valp = ctl->data; int val = *valp; loff_t pos = *ppos; + ctl_table lctl; int ret; - ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + /* + * ctl->data points to idev->cnf.disable_ipv6, we should + * not modify it until we get the rtnl lock. + */ + lctl = *ctl; + lctl.data = &val; + + ret = proc_dointvec(&lctl, write, buffer, lenp, ppos); if (write) ret = addrconf_disable_ipv6(ctl, valp, val); -- cgit v1.2.3-70-g09d2 From fc16dcd8c2e1e9bc91ed765957e1f2bbf334253e Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 18 Jan 2012 17:47:58 +0000 Subject: tcp: fix undo after RTO for BIC This patch fixes BIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_bic.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 6187eb4d1dc..f45e1c24244 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; - ca->loss_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->epoch_start = 0; @@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca) static void bictcp_init(struct sock *sk) { - bictcp_reset(inet_csk_ca(sk)); + struct bictcp *ca = inet_csk_ca(sk); + + bictcp_reset(ca); + ca->loss_cwnd = 0; + if (initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; } @@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) } /* if in slow start or link utilization is very low */ - if (ca->loss_cwnd == 0) { + if (ca->last_max_cwnd == 0) { if (ca->cnt > 20) /* increase cwnd 5% per RTT */ ca->cnt = 20; } @@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct bictcp *ca = inet_csk_ca(sk); - return max(tp->snd_cwnd, ca->last_max_cwnd); + return max(tp->snd_cwnd, ca->loss_cwnd); } static void bictcp_state(struct sock *sk, u8 new_state) -- cgit v1.2.3-70-g09d2 From 5a45f0086a2dcf50db7e6a0bf5be933880f85127 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 18 Jan 2012 17:47:59 +0000 Subject: tcp: fix undo after RTO for CUBIC This patch fixes CUBIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell Acked-by: Stephen Hemminger Acked-by: Sangtae Ha Signed-off-by: David S. Miller --- net/ipv4/tcp_cubic.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index f376b05cca8..a9077f441cb 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca) { ca->cnt = 0; ca->last_max_cwnd = 0; - ca->loss_cwnd = 0; ca->last_cwnd = 0; ca->last_time = 0; ca->bic_origin_point = 0; @@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk) static void bictcp_init(struct sock *sk) { - bictcp_reset(inet_csk_ca(sk)); + struct bictcp *ca = inet_csk_ca(sk); + + bictcp_reset(ca); + ca->loss_cwnd = 0; if (hystart) bictcp_hystart_reset(sk); @@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) * The initial growth of cubic function may be too conservative * when the available bandwidth is still unknown. */ - if (ca->loss_cwnd == 0 && ca->cnt > 20) + if (ca->last_max_cwnd == 0 && ca->cnt > 20) ca->cnt = 20; /* increase cwnd 5% per RTT */ /* TCP Friendly */ @@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { struct bictcp *ca = inet_csk_ca(sk); - return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); } static void bictcp_state(struct sock *sk, u8 new_state) -- cgit v1.2.3-70-g09d2 From a42b4799c683723e8c464de4026af085b2ebd5fa Mon Sep 17 00:00:00 2001 From: Vijay Subramanian Date: Thu, 19 Jan 2012 10:20:59 +0000 Subject: netem: Fix off-by-one bug in reordering With netem reordering, a gap of N is supposed to reorder every Nth packet with given reorder probability. However, the code currently skips N packets and reorders every (N+1)th packet. Signed-off-by: Vijay Subramanian Signed-off-by: David S. Miller --- net/sched/sch_netem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e7e1d0b57b3..2776012132e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -419,7 +419,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ - q->counter < q->gap || /* inside last reordering gap */ + q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { psched_time_t now; psched_tdiff_t delay; -- cgit v1.2.3-70-g09d2 From 974c12360dfe6ab01201fe9e708e7755c413f8b6 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 19 Jan 2012 14:42:21 +0000 Subject: tcp: detect loss above high_seq in recovery Correctly implement a loss detection heuristic: New sequences (above high_seq) sent during the fast recovery are deemed lost when higher sequences are SACKed. Current code does not catch these losses, because tcp_mark_head_lost() does not check packets beyond high_seq. The fix is straight-forward by checking packets until the highest sacked packet. In addition, all the FLAG_DATA_LOST logic are in-effective and redundant and can be removed. Update the loss heuristic comments. The algorithm above is documented as heuristic B, but it is redundant too because heuristic A already covers B. Note that this change only marks some forward-retransmitted packets LOST. It does NOT forbid TCP performing further CWR on new losses. A potential follow-up patch under preparation is to perform another CWR on "new" losses such as 1) sequence above high_seq is lost (by resetting high_seq to snd_nxt) 2) retransmission is lost. Signed-off-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/snmp.h | 1 - net/ipv4/proc.c | 1 - net/ipv4/tcp_input.c | 41 +++++++++++++++-------------------------- 3 files changed, 15 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/include/linux/snmp.h b/include/linux/snmp.h index e16557a357e..c1241c42817 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -192,7 +192,6 @@ enum LINUX_MIB_TCPPARTIALUNDO, /* TCPPartialUndo */ LINUX_MIB_TCPDSACKUNDO, /* TCPDSACKUndo */ LINUX_MIB_TCPLOSSUNDO, /* TCPLossUndo */ - LINUX_MIB_TCPLOSS, /* TCPLoss */ LINUX_MIB_TCPLOSTRETRANSMIT, /* TCPLostRetransmit */ LINUX_MIB_TCPRENOFAILURES, /* TCPRenoFailures */ LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 3569d8ecaea..6afc807ee2a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO), SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO), SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO), - SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS), SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT), SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES), SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2877c3e0958..976034f8232 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly; #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ -#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ @@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) - * 3. Loss detection event of one of three flavors: + * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. - * A''. Its FACK modfication, head until snd.fack is lost. - * B. SACK arrives sacking data transmitted after never retransmitted - * hole was sent out. - * C. SACK arrives sacking SND.NXT at the moment, when the + * A''. Its FACK modification, head until snd.fack is lost. + * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * @@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, } /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". - * Event "C". Later note: FACK people cheated me again 8), we have to account + * Event "B". Later note: FACK people cheated me again 8), we have to account * for reordering! Ugly, but should help. * * Search retransmitted skbs from write_queue that were sent when snd_nxt was @@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; - /* Event "B" in the comment above. */ - if (after(end_seq, tp->high_seq)) - state.flag |= FLAG_DATA_LOST; - /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) @@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk) tcp_verify_left_out(tp); } -/* Mark head of queue up as lost. With RFC3517 SACK, the packets is - * is against sacked "cnt", otherwise it's against facked "cnt" +/* Detect loss in event "A" above by marking head of queue up as lost. + * For FACK or non-SACK(Reno) senders, the first "packets" number of segments + * are considered lost. For RFC3517 SACK, a segment is considered lost if it + * has at least tp->reordering SACKed seqments above it; "packets" refers to + * the maximum SACKed segments to pass before reaching this limit. */ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { @@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) int cnt, oldcnt; int err; unsigned int mss; + /* Use SACK to deduce losses of new sequences sent during recovery */ + const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { @@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; - if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) + if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; @@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (tcp_check_sack_reneging(sk, flag)) return; - /* C. Process data loss notification, provided it is valid. */ - if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && - before(tp->snd_una, tp->high_seq) && - icsk->icsk_ca_state != TCP_CA_Open && - tp->fackets_out > tp->reordering) { - tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); - } - - /* D. Check consistency of the current state. */ + /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); - /* E. Check state exit conditions. State can be terminated + /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); @@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } } - /* F. Process state. */ + /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { -- cgit v1.2.3-70-g09d2 From bf0813bd282f6ded0a5efca3db238287f7a3dbe8 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 19 Jan 2012 15:40:06 +0000 Subject: pktgen: Fix unsigned function that is returning negative vals Every call to num_args() immediately checks the return value for less than zero, as it will return -EFAULT for a failed get_user() call. So it makes no sense for the function to be declared as an unsigned long. Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- net/core/pktgen.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 65f80c7b165..4d8ce93cd50 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -767,8 +767,8 @@ done: return i; } -static unsigned long num_arg(const char __user * user_buffer, - unsigned long maxlen, unsigned long *num) +static long num_arg(const char __user *user_buffer, unsigned long maxlen, + unsigned long *num) { int i; *num = 0; -- cgit v1.2.3-70-g09d2 From 8a622e71f58ec9f092fc99eacae0e6cf14f6e742 Mon Sep 17 00:00:00 2001 From: shawnlu Date: Fri, 20 Jan 2012 12:22:04 +0000 Subject: tcp: md5: using remote adress for md5 lookup in rst packet md5 key is added in socket through remote address. remote address should be used in finding md5 key when sending out reset packet. Signed-off-by: shawnlu Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1eb4ad57670..337ba4cca05 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len = sizeof(rep.th); #ifdef CONFIG_TCP_MD5SIG - key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL; + key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 906c7ca4354..3edd05ae438 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) #ifdef CONFIG_TCP_MD5SIG if (sk) - key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr); + key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr); #endif if (th->ack) -- cgit v1.2.3-70-g09d2 From 0e90b31f4ba77027a7c21cbfc66404df0851ca21 Mon Sep 17 00:00:00 2001 From: Glauber Costa Date: Fri, 20 Jan 2012 04:57:16 +0000 Subject: net: introduce res_counter_charge_nofail() for socket allocations There is a case in __sk_mem_schedule(), where an allocation is beyond the maximum, but yet we are allowed to proceed. It happens under the following condition: sk->sk_wmem_queued + size >= sk->sk_sndbuf The network code won't revert the allocation in this case, meaning that at some point later it'll try to do it. Since this is never communicated to the underlying res_counter code, there is an inbalance in res_counter uncharge operation. I see two ways of fixing this: 1) storing the information about those allocations somewhere in memcg, and then deducting from that first, before we start draining the res_counter, 2) providing a slightly different allocation function for the res_counter, that matches the original behavior of the network code more closely. I decided to go for #2 here, believing it to be more elegant, since #1 would require us to do basically that, but in a more obscure way. Signed-off-by: Glauber Costa Cc: KAMEZAWA Hiroyuki Cc: Johannes Weiner Cc: Michal Hocko CC: Tejun Heo CC: Li Zefan CC: Laurent Chavey Acked-by: Tejun Heo Signed-off-by: David S. Miller --- include/linux/res_counter.h | 6 ++++++ include/net/sock.h | 10 ++++------ kernel/res_counter.c | 25 +++++++++++++++++++++++++ net/core/sock.c | 4 ++-- 4 files changed, 37 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index d06d014afda..da81af086ea 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h @@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent); * * returns 0 on success and <0 if the counter->usage will exceed the * counter->limit _locked call expects the counter->lock to be taken + * + * charge_nofail works the same, except that it charges the resource + * counter unconditionally, and returns < 0 if the after the current + * charge we are over limit. */ int __must_check res_counter_charge_locked(struct res_counter *counter, unsigned long val); int __must_check res_counter_charge(struct res_counter *counter, unsigned long val, struct res_counter **limit_fail_at); +int __must_check res_counter_charge_nofail(struct res_counter *counter, + unsigned long val, struct res_counter **limit_fail_at); /* * uncharge - tell that some portion of the resource is released diff --git a/include/net/sock.h b/include/net/sock.h index 0e7a9b05f92..4c69ac165e6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1008,9 +1008,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, struct res_counter *fail; int ret; - ret = res_counter_charge(prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - + ret = res_counter_charge_nofail(prot->memory_allocated, + amt << PAGE_SHIFT, &fail); if (ret < 0) *parent_status = OVER_LIMIT; } @@ -1054,12 +1053,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) } static inline void -sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status) +sk_memory_allocated_sub(struct sock *sk, int amt) { struct proto *prot = sk->sk_prot; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp && - parent_status != OVER_LIMIT) /* Otherwise was uncharged already */ + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) memcg_memory_allocated_sub(sk->sk_cgrp, amt); atomic_long_sub(amt, prot->memory_allocated); diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 6d269cce7aa..d508363858b 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -66,6 +66,31 @@ done: return ret; } +int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, + struct res_counter **limit_fail_at) +{ + int ret, r; + unsigned long flags; + struct res_counter *c; + + r = ret = 0; + *limit_fail_at = NULL; + local_irq_save(flags); + for (c = counter; c != NULL; c = c->parent) { + spin_lock(&c->lock); + r = res_counter_charge_locked(c, val); + if (r) + c->usage += val; + spin_unlock(&c->lock); + if (r < 0 && ret == 0) { + *limit_fail_at = c; + ret = r; + } + } + local_irq_restore(flags); + + return ret; +} void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) { if (WARN_ON(counter->usage < val)) diff --git a/net/core/sock.c b/net/core/sock.c index 5c5af9988f9..3e81fd2e3c7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1827,7 +1827,7 @@ suppress_allocation: /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; - sk_memory_allocated_sub(sk, amt, parent_status); + sk_memory_allocated_sub(sk, amt); return 0; } @@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule); void __sk_mem_reclaim(struct sock *sk) { sk_memory_allocated_sub(sk, - sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0); + sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; if (sk_under_memory_pressure(sk) && -- cgit v1.2.3-70-g09d2 From b1cc16b8e643096adb92bbcb76c6c4c564141c40 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 22 Jan 2012 14:45:14 -0500 Subject: bluetooth: hci: Fix type of "enable_hs" to bool. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: net/bluetooth/hci_core.c: In function ‘__check_enable_hs’: net/bluetooth/hci_core.c:2587:1: warning: return from incompatible pointer type [enabled by default] Signed-off-by: David S. Miller --- include/net/bluetooth/hci.h | 2 +- net/bluetooth/hci_core.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5b2fed5eebf..00596e816b4 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1388,6 +1388,6 @@ struct hci_inquiry_req { }; #define IREQ_CACHE_FLUSH 0x0001 -extern int enable_hs; +extern bool enable_hs; #endif /* __HCI_H */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 845da3ee56a..9de93714213 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -55,7 +55,7 @@ #define AUTO_OFF_TIMEOUT 2000 -int enable_hs; +bool enable_hs; static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); -- cgit v1.2.3-70-g09d2 From f80400a26a2e8bff541de12834a1134358bb6642 Mon Sep 17 00:00:00 2001 From: Michał Mirosław Date: Sun, 22 Jan 2012 00:20:40 +0000 Subject: ethtool: allow ETHTOOL_GSSET_INFO for users MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow ETHTOOL_GSSET_INFO ethtool ioctl() for unprivileged users. ETHTOOL_GSTRINGS is already allowed, but is unusable without this one. Signed-off-by: Michał Mirosław Acked-by: Ben Hutchings Signed-off-by: David S. Miller --- net/core/ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 921aa2b4b41..369b4189452 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: case ETHTOOL_GSG: + case ETHTOOL_GSSET_INFO: case ETHTOOL_GSTRINGS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: -- cgit v1.2.3-70-g09d2 From 56ac11cf2f21366ad48b356f7a0d1af8cff3588e Mon Sep 17 00:00:00 2001 From: Radu Iliescu Date: Thu, 19 Jan 2012 03:57:57 +0000 Subject: llc: Fix race condition in llc_ui_recvmsg There is a race on sk_receive_queue between llc_ui_recvmsg and sock_queue_rcv_skb. Our current solution is to protect skb_eat in llc_ui_recvmsg with the queue spinlock. Signed-off-by: Radu Iliescu Signed-off-by: David S. Miller --- net/llc/af_llc.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index a18e6c3d36e..b9bef2c7502 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); + unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq; @@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, goto copy_uaddr; if (!(flags & MSG_PEEK)) { + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, 0); + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } @@ -859,7 +862,9 @@ copy_uaddr: llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { + spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, 0); + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } -- cgit v1.2.3-70-g09d2 From c452ed70771cea3af73d21a5914989137fbd28b8 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 24 Jan 2012 16:03:33 -0500 Subject: net: flow_dissector.c missing include linux/export.h The file net/core/flow_dissector.c seems to be missing including linux/export.h. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0985b9b14b8..a225089df5b 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -1,4 +1,5 @@ #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From efc3dbc37412c027e363736b4f4c74ee5e8ecffc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 24 Jan 2012 17:03:44 -0500 Subject: rds: Make rds_sock_lock BH rather than IRQ safe. rds_sock_info() triggers locking warnings because we try to perform a local_bh_enable() (via sock_i_ino()) while hardware interrupts are disabled (via taking rds_sock_lock). There is no reason for rds_sock_lock to be a hardware IRQ disabling lock, none of these access paths run in hardware interrupt context. Therefore making it a BH disabling lock is safe and sufficient to fix this bug. Reported-by: Kumar Sanghvi Reported-by: Josh Boyer Signed-off-by: David S. Miller --- net/rds/af_rds.c | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index bb6ad81b671..424ff622ab5 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -68,7 +68,6 @@ static int rds_release(struct socket *sock) { struct sock *sk = sock->sk; struct rds_sock *rs; - unsigned long flags; if (!sk) goto out; @@ -94,10 +93,10 @@ static int rds_release(struct socket *sock) rds_rdma_drop_keys(rs); rds_notify_queue_get(rs, NULL); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_del_init(&rs->rs_item); rds_sock_count--; - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); rds_trans_put(rs->rs_transport); @@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = { static int __rds_create(struct socket *sock, struct sock *sk, int protocol) { - unsigned long flags; struct rds_sock *rs; sock_init_data(sock, sk); @@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol) spin_lock_init(&rs->rs_rdma_lock); rs->rs_rdma_keys = RB_ROOT; - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_add_tail(&rs->rs_item, &rds_sock_list); rds_sock_count++; - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); return 0; } @@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, { struct rds_sock *rs; struct rds_incoming *inc; - unsigned long flags; unsigned int total = 0; len /= sizeof(struct rds_info_message); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); list_for_each_entry(rs, &rds_sock_list, rs_item) { read_lock(&rs->rs_recv_lock); @@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len, read_unlock(&rs->rs_recv_lock); } - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); lens->nr = total; lens->each = sizeof(struct rds_info_message); @@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len, { struct rds_info_socket sinfo; struct rds_sock *rs; - unsigned long flags; len /= sizeof(struct rds_info_socket); - spin_lock_irqsave(&rds_sock_lock, flags); + spin_lock_bh(&rds_sock_lock); if (len < rds_sock_count) goto out; @@ -529,7 +525,7 @@ out: lens->nr = rds_sock_count; lens->each = sizeof(struct rds_info_socket); - spin_unlock_irqrestore(&rds_sock_lock, flags); + spin_unlock_bh(&rds_sock_lock); } static void rds_exit(void) -- cgit v1.2.3-70-g09d2