summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9beaac7fb39..c252df9169d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,21 +303,23 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}
-static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+static struct throtl_grp *throtl_get_tg(struct throtl_data *td,
+ struct blkio_cgroup *blkcg)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
- struct blkio_cgroup *blkcg;
struct request_queue *q = td->queue;
/* no throttling for dead queue */
if (unlikely(blk_queue_bypass(q)))
return NULL;
- blkcg = task_blkio_cgroup(current);
tg = throtl_find_tg(td, blkcg);
if (tg)
return tg;
+ if (!css_tryget(&blkcg->css))
+ return NULL;
+
/*
* Need to allocate a group. Allocation of group also needs allocation
* of per cpu stats which in-turn takes a mutex() and can block. Hence
@@ -331,6 +333,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
/* Group allocated and queue is still alive. take the lock */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
+ css_put(&blkcg->css);
/* Make sure @q is still alive */
if (unlikely(blk_queue_bypass(q))) {
@@ -339,11 +342,6 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
}
/*
- * Initialize the new group. After sleeping, read the blkcg again.
- */
- blkcg = task_blkio_cgroup(current);
-
- /*
* If some other thread already allocated the group while we were
* not holding queue lock, free up the group
*/
@@ -1163,7 +1161,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
* IO group
*/
spin_lock_irq(q->queue_lock);
- tg = throtl_get_tg(td);
+ tg = throtl_get_tg(td, blkcg);
if (unlikely(!tg))
goto out_unlock;