summaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2012-11-29 17:27:47 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-12-06 00:30:52 +0100
commitb75ad4cda5a6cd3431b1c65c2739c5ebd2c4b9da (patch)
tree6ba6dd80fc8e118067ca70bd67345864835fe98a /fs/nfs
parent62ae082d883d167cdaa7895cf2972d85e178228a (diff)
NFSv4.1: Ensure smooth handover of slots from one task to the next waiting
Currently, we see a lot of bouncing for the value of highest_used_slotid due to the fact that slots are getting freed, instead of getting instantly transmitted to the next waiting task. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/nfs4proc.c12
-rw-r--r--fs/nfs/nfs4session.c59
-rw-r--r--fs/nfs/nfs4session.h4
-rw-r--r--fs/nfs/nfs4state.c6
4 files changed, 69 insertions, 12 deletions
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 99d99a5a3f6..992233561db 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -401,14 +401,15 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
if (tbl->highest_used_slotid > tbl->target_highest_slotid)
send_new_highest_used_slotid = true;
+ if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
+ send_new_highest_used_slotid = false;
+ goto out_unlock;
+ }
nfs4_free_slot(tbl, res->sr_slot);
if (tbl->highest_used_slotid != NFS4_NO_SLOT)
send_new_highest_used_slotid = false;
- if (!nfs4_session_draining(session)) {
- if (rpc_wake_up_next(&tbl->slot_tbl_waitq) != NULL)
- send_new_highest_used_slotid = false;
- }
+out_unlock:
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
if (send_new_highest_used_slotid)
@@ -1465,6 +1466,7 @@ unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
+ nfs4_sequence_done(task, &data->o_res.seq_res);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
@@ -2135,6 +2137,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
+ nfs4_sequence_done(task, &calldata->res.seq_res);
goto out;
}
@@ -4384,6 +4387,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
+ nfs4_sequence_done(task, &calldata->res.seq_res);
return;
}
calldata->timestamp = jiffies;
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index 701170293ce..066cfa101b4 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -217,11 +217,65 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session)
nfs4_shrink_slot_table(&session->bc_slot_table, 0);
}
+static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
+{
+ struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
+ struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
+ struct nfs4_slot *slot = pslot;
+ struct nfs4_slot_table *tbl = slot->table;
+
+ if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
+ return false;
+ slot->renewal_time = jiffies;
+ slot->generation = tbl->generation;
+ args->sa_slot = slot;
+ res->sr_slot = slot;
+ res->sr_status_flags = 0;
+ res->sr_status = 1;
+ return true;
+}
+
+static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot)
+{
+ if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
+ return true;
+ return false;
+}
+
+bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot)
+{
+ if (slot->slot_nr > tbl->max_slotid)
+ return false;
+ return __nfs41_wake_and_assign_slot(tbl, slot);
+}
+
+static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
+{
+ struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
+ if (!IS_ERR(slot)) {
+ bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
+ if (ret)
+ return ret;
+ nfs4_free_slot(tbl, slot);
+ }
+ return false;
+}
+
+void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
+{
+ for (;;) {
+ if (!nfs41_try_wake_next_slot_table_entry(tbl))
+ break;
+ }
+}
+
/* Update the client's idea of target_highest_slotid */
static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
u32 target_highest_slotid)
{
- unsigned int max_slotid, i;
+ unsigned int max_slotid;
if (tbl->target_highest_slotid == target_highest_slotid)
return;
@@ -229,9 +283,8 @@ static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
tbl->generation++;
max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, tbl->target_highest_slotid);
- for (i = tbl->max_slotid + 1; i <= max_slotid; i++)
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
tbl->max_slotid = max_slotid;
+ nfs41_wake_slot_table(tbl);
}
void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index bdd14a60722..7db73937016 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -94,6 +94,10 @@ static inline bool nfs4_session_draining(struct nfs4_session *session)
return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
}
+bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *slot);
+void nfs41_wake_slot_table(struct nfs4_slot_table *tbl);
+
/*
* Determine if sessions are in use.
*/
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7d73df5a05d..78e90a80fc3 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -255,17 +255,13 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
struct nfs4_slot_table *tbl;
- unsigned int i;
if (ses == NULL)
return;
tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&tbl->slot_tbl_lock);
- for (i = 0; i <= tbl->max_slotid; i++) {
- if (rpc_wake_up_next(&tbl->slot_tbl_waitq) == NULL)
- break;
- }
+ nfs41_wake_slot_table(tbl);
spin_unlock(&tbl->slot_tbl_lock);
}
}