summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-11-13 16:23:44 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:26 -0500
commit8aca67f0ae2d8811165c22326825a645cc8e1b48 (patch)
tree19e82f4bc7b4f865a9dcf4744e7c224ea517ba10 /net/sunrpc
parente6b3c4db6fbcd0d33720696f37790d6b8be12313 (diff)
SUNRPC: Fix a potential race in rpc_wake_up_task()
Use RCU to ensure that we can safely call rpc_finish_wakeup after we've called __rpc_do_wake_up_task. If not, there is a theoretical race, in which the rpc_task finishes executing, and gets freed first. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/sched.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 66d01365f3a..6b808c03fb7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -427,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task)
*/
void rpc_wake_up_task(struct rpc_task *task)
{
+ rcu_read_lock_bh();
if (rpc_start_wakeup(task)) {
if (RPC_IS_QUEUED(task)) {
struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
- spin_lock_bh(&queue->lock);
+ /* Note: we're already in a bh-safe context */
+ spin_lock(&queue->lock);
__rpc_do_wake_up_task(task);
- spin_unlock_bh(&queue->lock);
+ spin_unlock(&queue->lock);
}
rpc_finish_wakeup(task);
}
+ rcu_read_unlock_bh();
}
/*
@@ -499,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
- spin_lock_bh(&queue->lock);
+ rcu_read_lock_bh();
+ spin_lock(&queue->lock);
if (RPC_IS_PRIORITY(queue))
task = __rpc_wake_up_next_priority(queue);
else {
task_for_first(task, &queue->tasks[0])
__rpc_wake_up_task(task);
}
- spin_unlock_bh(&queue->lock);
+ spin_unlock(&queue->lock);
+ rcu_read_unlock_bh();
return task;
}
@@ -522,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
struct rpc_task *task, *next;
struct list_head *head;
- spin_lock_bh(&queue->lock);
+ rcu_read_lock_bh();
+ spin_lock(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -531,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
break;
head--;
}
- spin_unlock_bh(&queue->lock);
+ spin_unlock(&queue->lock);
+ rcu_read_unlock_bh();
}
/**
@@ -546,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
struct rpc_task *task, *next;
struct list_head *head;
- spin_lock_bh(&queue->lock);
+ rcu_read_lock_bh();
+ spin_lock(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -557,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
break;
head--;
}
- spin_unlock_bh(&queue->lock);
+ spin_unlock(&queue->lock);
+ rcu_read_unlock_bh();
}
static void __rpc_atrun(struct rpc_task *task)
@@ -817,8 +826,9 @@ rpc_alloc_task(void)
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
-static void rpc_free_task(struct rpc_task *task)
+static void rpc_free_task(struct rcu_head *rcu)
{
+ struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
dprintk("RPC: %4d freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
@@ -872,7 +882,7 @@ void rpc_put_task(struct rpc_task *task)
task->tk_client = NULL;
}
if (task->tk_flags & RPC_TASK_DYNAMIC)
- rpc_free_task(task);
+ call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
if (tk_ops->rpc_release)
tk_ops->rpc_release(calldata);
}