summaryrefslogtreecommitdiffstats
path: root/fs/nfs/delegation.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-12-23 15:21:38 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-12-23 15:21:38 -0500
commit343104308a33c4f1e23c8e841ede95e97b870842 (patch)
treee7772538627a0a2176bd23cd3b2f1acbacd24592 /fs/nfs/delegation.c
parent0cb2659b818eca99235e17c04291cfa9985c14f7 (diff)
NFSv4: Fix up another delegation related race
When we can update_open_stateid(), we need to be certain that we don't race with a delegation return. While we could do this by grabbing the nfs_client->cl_lock, a dedicated spin lock in the delegation structure will scale better. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/delegation.c')
-rw-r--r--fs/nfs/delegation.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index cc563cfa694..e0cb4ee3b23 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -140,13 +140,17 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs
if (delegation == NULL)
goto nomatch;
+ spin_lock(&delegation->lock);
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
sizeof(delegation->stateid.data)) != 0)
- goto nomatch;
+ goto nomatch_unlock;
list_del_rcu(&delegation->super_list);
nfsi->delegation_state = 0;
rcu_assign_pointer(nfsi->delegation, NULL);
+ spin_unlock(&delegation->lock);
return delegation;
+nomatch_unlock:
+ spin_unlock(&delegation->lock);
nomatch:
return NULL;
}
@@ -172,6 +176,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
delegation->change_attr = nfsi->change_attr;
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
+ spin_lock_init(&delegation->lock);
spin_lock(&clp->cl_lock);
if (rcu_dereference(nfsi->delegation) != NULL) {