diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 13:15:01 -0800 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 21:27:22 +0100 |
commit | 2a7f124414b35645049e9c1b125a6f0b470aa5ae (patch) | |
tree | 298d9bc310dc46baed69baf88f083c6db35f0964 /block | |
parent | 72e06c255181537d0b3e1f657a9ed81655d745b1 (diff) |
blkcg: move rcu_read_lock() outside of blkio_group get functions
rcu_read_lock() in throtl_get_tb() and cfq_get_cfqg() holds onto
@blkcg while looking up blkg. For API cleanup, the next patch will
make the caller responsible for determining @blkcg to look blkg from
and let them specify it as a parameter. Move rcu read locking out to
the callers to prepare for the change.
-v2: Originally this patch was described as a fix for RCU read locking
bug around @blkg, which Vivek pointed out to be incorrect. It
was from misunderstanding the role of rcu locking as protecting
@blkg not @blkcg. Patch description updated.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 18 | ||||
-rw-r--r-- | block/cfq-iosched.c | 11 |
2 files changed, 11 insertions, 18 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3699ab40d49..9beaac7fb39 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -313,25 +313,23 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) if (unlikely(blk_queue_bypass(q))) return NULL; - rcu_read_lock(); blkcg = task_blkio_cgroup(current); tg = throtl_find_tg(td, blkcg); - if (tg) { - rcu_read_unlock(); + if (tg) return tg; - } /* * Need to allocate a group. Allocation of group also needs allocation * of per cpu stats which in-turn takes a mutex() and can block. Hence * we need to drop rcu lock and queue_lock before we call alloc. */ - rcu_read_unlock(); spin_unlock_irq(q->queue_lock); + rcu_read_unlock(); tg = throtl_alloc_tg(td); /* Group allocated and queue is still alive. take the lock */ + rcu_read_lock(); spin_lock_irq(q->queue_lock); /* Make sure @q is still alive */ @@ -343,7 +341,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) /* * Initialize the new group. After sleeping, read the blkcg again. */ - rcu_read_lock(); blkcg = task_blkio_cgroup(current); /* @@ -354,7 +351,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) if (__tg) { kfree(tg); - rcu_read_unlock(); return __tg; } @@ -365,7 +361,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) } throtl_init_add_tg_lists(td, tg, blkcg); - rcu_read_unlock(); return tg; } @@ -1150,7 +1145,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) * basic fields like stats and io rates. If a group has no rules, * just update the dispatch stats in lockless manner and return. */ - rcu_read_lock(); blkcg = task_blkio_cgroup(current); tg = throtl_find_tg(td, blkcg); @@ -1160,11 +1154,9 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) if (tg_no_rule_group(tg, rw)) { blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, rw_is_sync(bio->bi_rw)); - rcu_read_unlock(); - goto out; + goto out_unlock_rcu; } } - rcu_read_unlock(); /* * Either group has not been allocated yet or it is not an unlimited @@ -1222,6 +1214,8 @@ queue_bio: out_unlock: spin_unlock_irq(q->queue_lock); +out_unlock_rcu: + rcu_read_unlock(); out: return throttled; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 61693d3404d..6063c4482b8 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1128,13 +1128,10 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) struct cfq_group *cfqg = NULL, *__cfqg = NULL; struct request_queue *q = cfqd->queue; - rcu_read_lock(); blkcg = task_blkio_cgroup(current); cfqg = cfq_find_cfqg(cfqd, blkcg); - if (cfqg) { - rcu_read_unlock(); + if (cfqg) return cfqg; - } /* * Need to allocate a group. Allocation of group also needs allocation @@ -1164,7 +1161,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) if (__cfqg) { kfree(cfqg); - rcu_read_unlock(); return __cfqg; } @@ -1172,7 +1168,6 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd) cfqg = &cfqd->root_group; cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); - rcu_read_unlock(); return cfqg; } @@ -2870,6 +2865,8 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_group *cfqg; retry: + rcu_read_lock(); + cfqg = cfq_get_cfqg(cfqd); cic = cfq_cic_lookup(cfqd, ioc); /* cic always exists here */ @@ -2885,6 +2882,7 @@ retry: cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { + rcu_read_unlock(); spin_unlock_irq(cfqd->queue->queue_lock); new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask | __GFP_ZERO, @@ -2910,6 +2908,7 @@ retry: if (new_cfqq) kmem_cache_free(cfq_pool, new_cfqq); + rcu_read_unlock(); return cfqq; } |