summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-01-17 22:57:37 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-01-31 19:28:08 -0500
commit961a828df64979d2a9faeeeee043391670a193b9 (patch)
tree4b7ddaf1a19c589e3c8ec96b6c732faa507f2899 /fs
parent2aeb98f498ce37742b743080fdc6c8cf64053599 (diff)
SUNRPC: Fix potential races in xprt_lock_write_next()
We have to ensure that the wake up from the waitqueue and the assignment of xprt->snd_task are atomic. We can do this by assigning the snd_task while under the waitqueue spinlock. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/nfs4state.c17
3 files changed, 17 insertions, 14 deletions
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index df3d02c3e8c..c45c21a5470 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -222,6 +222,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
return server->nfs_client->cl_session;
}
+extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy);
extern int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 360240cc1e9..828a76590af 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -385,17 +385,20 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
free_slotid, tbl->highest_used_slotid);
}
+bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ return true;
+}
+
/*
* Signal state manager thread if session fore channel is drained
*/
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{
- struct rpc_task *task;
-
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
- task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
- if (task)
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
+ nfs4_set_task_privileged, NULL);
return;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index a42e60d3ee5..f0e9881c2aa 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -190,23 +190,22 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
static void nfs4_end_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
+ struct nfs4_slot_table *tbl;
int max_slots;
if (ses == NULL)
return;
+ tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
- spin_lock(&ses->fc_slot_table.slot_tbl_lock);
- max_slots = ses->fc_slot_table.max_slots;
+ spin_lock(&tbl->slot_tbl_lock);
+ max_slots = tbl->max_slots;
while (max_slots--) {
- struct rpc_task *task;
-
- task = rpc_wake_up_next(&ses->fc_slot_table.
- slot_tbl_waitq);
- if (!task)
+ if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
+ nfs4_set_task_privileged,
+ NULL) == NULL)
break;
- rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
}
- spin_unlock(&ses->fc_slot_table.slot_tbl_lock);
+ spin_unlock(&tbl->slot_tbl_lock);
}
}