summaryrefslogtreecommitdiffstats
path: root/net/rds
diff options
context:
space:
mode:
Diffstat (limited to 'net/rds')
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c4
-rw-r--r--net/rds/tcp_recv.c4
-rw-r--r--net/rds/tcp_send.c4
4 files changed, 8 insertions, 8 deletions
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index af95c8e058f..a65ee78db0c 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock_bh(&sk->sk_callback_lock);
+ read_lock(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
break;
}
out:
- read_unlock_bh(&sk->sk_callback_lock);
+ read_unlock(&sk->sk_callback_lock);
state_change(sk);
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 72981375f47..7787537e9c2 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
rdsdebug("listen data ready sk %p\n", sk);
- read_lock_bh(&sk->sk_callback_lock);
+ read_lock(&sk->sk_callback_lock);
ready = sk->sk_user_data;
if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
queue_work(rds_wq, &rds_tcp_listen_work);
out:
- read_unlock_bh(&sk->sk_callback_lock);
+ read_unlock(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 6243258f840..4fac4f2bb9d 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
- read_lock_bh(&sk->sk_callback_lock);
+ read_lock(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) { /* check for teardown race */
ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
- read_unlock_bh(&sk->sk_callback_lock);
+ read_unlock(&sk->sk_callback_lock);
ready(sk, bytes);
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 1b4fd68f0c7..81cf5a4c5e4 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock_bh(&sk->sk_callback_lock);
+ read_lock(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
- read_unlock_bh(&sk->sk_callback_lock);
+ read_unlock(&sk->sk_callback_lock);
/*
* write_space is only called when data leaves tcp's send queue if