diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 2 | ||||
-rw-r--r-- | block/blk-cgroup.c | 25 | ||||
-rw-r--r-- | block/blk-core.c | 3 | ||||
-rw-r--r-- | block/blk-exec.c | 8 |
4 files changed, 27 insertions, 11 deletions
diff --git a/block/Kconfig b/block/Kconfig index 09acf1b3990..a7e40a7c821 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -89,7 +89,7 @@ config BLK_DEV_INTEGRITY config BLK_DEV_THROTTLING bool "Block layer bio throttling support" - depends on BLK_CGROUP=y && EXPERIMENTAL + depends on BLK_CGROUP=y default n ---help--- Block layer bio throttling support. It can be used to limit diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index cafcd743118..3f6d39d23bb 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -285,6 +285,13 @@ static void blkg_destroy_all(struct request_queue *q) blkg_destroy(blkg); spin_unlock(&blkcg->lock); } + + /* + * root blkg is destroyed. Just clear the pointer since + * root_rl does not take reference on root blkg. + */ + q->root_blkg = NULL; + q->root_rl.blkg = NULL; } static void blkg_rcu_free(struct rcu_head *rcu_head) @@ -326,6 +333,9 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, */ if (rl == &q->root_rl) { ent = &q->blkg_list; + /* There are no more block groups, hence no request lists */ + if (list_empty(ent)) + return NULL; } else { blkg = container_of(rl, struct blkcg_gq, rl); ent = &blkg->q_node; @@ -590,7 +600,7 @@ struct cftype blkcg_files[] = { }; /** - * blkcg_pre_destroy - cgroup pre_destroy callback + * blkcg_css_offline - cgroup css_offline callback * @cgroup: cgroup of interest * * This function is called when @cgroup is about to go away and responsible @@ -600,7 +610,7 @@ struct cftype blkcg_files[] = { * * This is the blkcg counterpart of ioc_release_fn(). */ -static int blkcg_pre_destroy(struct cgroup *cgroup) +static void blkcg_css_offline(struct cgroup *cgroup) { struct blkcg *blkcg = cgroup_to_blkcg(cgroup); @@ -622,10 +632,9 @@ static int blkcg_pre_destroy(struct cgroup *cgroup) } spin_unlock_irq(&blkcg->lock); - return 0; } -static void blkcg_destroy(struct cgroup *cgroup) +static void blkcg_css_free(struct cgroup *cgroup) { struct blkcg *blkcg = cgroup_to_blkcg(cgroup); @@ -633,7 +642,7 @@ static void blkcg_destroy(struct cgroup *cgroup) kfree(blkcg); } -static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) +static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup) { static atomic64_t id_seq = ATOMIC64_INIT(0); struct blkcg *blkcg; @@ -730,10 +739,10 @@ static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) struct cgroup_subsys blkio_subsys = { .name = "blkio", - .create = blkcg_create, + .css_alloc = blkcg_css_alloc, + .css_offline = blkcg_css_offline, + .css_free = blkcg_css_free, .can_attach = blkcg_can_attach, - .pre_destroy = blkcg_pre_destroy, - .destroy = blkcg_destroy, .subsys_id = blkio_subsys_id, .base_cftypes = blkcg_files, .module = THIS_MODULE, diff --git a/block/blk-core.c b/block/blk-core.c index a33870b1847..3c95c4d6e31 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2868,7 +2868,8 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b) struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); - return !(rqa->q <= rqb->q); + return !(rqa->q < rqb->q || + (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); } /* diff --git a/block/blk-exec.c b/block/blk-exec.c index 8b6dc5bd4dd..f71eac35c1b 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, rq_end_io_fn *done) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + bool is_pm_resume; WARN_ON(irqs_disabled()); rq->rq_disk = bd_disk; rq->end_io = done; + /* + * need to check this before __blk_run_queue(), because rq can + * be freed before that returns. + */ + is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; spin_lock_irq(q->queue_lock); @@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, __elv_add_request(q, rq, where); __blk_run_queue(q); /* the queue is stopped so it won't be run */ - if (rq->cmd_type == REQ_TYPE_PM_RESUME) + if (is_pm_resume) q->request_fn(q); spin_unlock_irq(q->queue_lock); } |