diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 4 | ||||
-rw-r--r-- | block/blk-core.c | 11 | ||||
-rw-r--r-- | block/blk-ioc.c | 5 | ||||
-rw-r--r-- | block/blk-settings.c | 39 | ||||
-rw-r--r-- | block/cfq-iosched.c | 57 | ||||
-rw-r--r-- | block/genhd.c | 2 |
6 files changed, 63 insertions, 55 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1fa2654db0a..e7dbbaf5fb3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -147,16 +147,16 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) return -EINVAL; blkcg = cgroup_to_blkio_cgroup(cgroup); + spin_lock(&blkio_list_lock); spin_lock_irq(&blkcg->lock); blkcg->weight = (unsigned int)val; hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { - spin_lock(&blkio_list_lock); list_for_each_entry(blkiop, &blkio_list, list) blkiop->ops.blkio_update_group_weight_fn(blkg, blkcg->weight); - spin_unlock(&blkio_list_lock); } spin_unlock_irq(&blkcg->lock); + spin_unlock(&blkio_list_lock); return 0; } diff --git a/block/blk-core.c b/block/blk-core.c index 718897e6d37..d1a9a0a64f9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) */ static inline bool queue_should_plug(struct request_queue *q) { - return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); + return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); } static int __make_request(struct request_queue *q, struct bio *bio) @@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq) * and to it is freed is accounted as io that is in progress at * the driver side. */ - if (blk_account_rq(rq)) { + if (blk_account_rq(rq)) q->in_flight[rq_is_sync(rq)]++; - /* - * Mark this device as supporting hardware queuing, if - * we have more IOs in flight than 4. - */ - if (!blk_queue_queuing(q) && queue_in_flight(q) > 4) - set_bit(QUEUE_FLAG_CQ, &q->queue_flags); - } } /** diff --git a/block/blk-ioc.c b/block/blk-ioc.c index cbdabb0dd6d..98e6bf61b0a 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc) if (atomic_long_dec_and_test(&ioc->refcount)) { rcu_read_lock(); - if (ioc->aic && ioc->aic->dtor) - ioc->aic->dtor(ioc->aic); cfq_dtor(ioc); rcu_read_unlock(); @@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task) task_unlock(task); if (atomic_dec_and_test(&ioc->nr_tasks)) { - if (ioc->aic && ioc->aic->exit) - ioc->aic->exit(ioc->aic); cfq_exit(ioc); } @@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ret->ioprio = 0; ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ - ret->aic = NULL; INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_HLIST_HEAD(&ret->cic_list); ret->ioc_data = NULL; diff --git a/block/blk-settings.c b/block/blk-settings.c index d52d4adc440..5eeb9e0d256 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset) { sector_t alignment; - unsigned int top, bottom; + unsigned int top, bottom, ret = 0; t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); @@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); + t->misaligned |= b->misaligned; + alignment = queue_limit_alignment_offset(b, offset); /* Bottom device has different alignment. Check that it is @@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, bottom = max(b->physical_block_size, b->io_min) + alignment; /* Verify that top and bottom intervals line up */ - if (max(top, bottom) & (min(top, bottom) - 1)) + if (max(top, bottom) & (min(top, bottom) - 1)) { t->misaligned = 1; + ret = -1; + } } t->logical_block_size = max(t->logical_block_size, @@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, if (t->physical_block_size & (t->logical_block_size - 1)) { t->physical_block_size = t->logical_block_size; t->misaligned = 1; + ret = -1; } /* Minimum I/O a multiple of the physical block size? */ if (t->io_min & (t->physical_block_size - 1)) { t->io_min = t->physical_block_size; t->misaligned = 1; + ret = -1; } /* Optimal I/O a multiple of the physical block size? */ if (t->io_opt & (t->physical_block_size - 1)) { t->io_opt = 0; t->misaligned = 1; + ret = -1; } /* Find lowest common alignment_offset */ @@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, & (max(t->physical_block_size, t->io_min) - 1); /* Verify that new alignment_offset is on a logical block boundary */ - if (t->alignment_offset & (t->logical_block_size - 1)) + if (t->alignment_offset & (t->logical_block_size - 1)) { t->misaligned = 1; + ret = -1; + } /* Discard alignment and granularity */ if (b->discard_granularity) { @@ -626,11 +635,33 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, (t->discard_granularity - 1); } - return t->misaligned ? -1 : 0; + return ret; } EXPORT_SYMBOL(blk_stack_limits); /** + * bdev_stack_limits - adjust queue limits for stacked drivers + * @t: the stacking driver limits (top device) + * @bdev: the component block_device (bottom) + * @start: first data sector within component device + * + * Description: + * Merges queue limits for a top device and a block_device. Returns + * 0 if alignment didn't change. Returns -1 if adding the bottom + * device caused misalignment. + */ +int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t start) +{ + struct request_queue *bq = bdev_get_queue(bdev); + + start += get_start_sect(bdev); + + return blk_stack_limits(t, &bq->limits, start << 9); +} +EXPORT_SYMBOL(bdev_stack_limits); + +/** * disk_stack_limits - adjust queue limits for stacked drivers * @disk: MD/DM gendisk (top) * @bdev: the underlying block device (bottom) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 918c7fd9aeb..023f4e69a33 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4; */ #define CFQ_MIN_TT (2) -/* - * Allow merged cfqqs to perform this amount of seeky I/O before - * deciding to break the queues up again. - */ -#define CFQQ_COOP_TOUT (HZ) - #define CFQ_SLICE_SCALE (5) #define CFQ_HW_QUEUE_MIN (5) #define CFQ_SERVICE_SHIFT 12 +#define CFQQ_SEEK_THR 8 * 1024 +#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) + #define RQ_CIC(rq) \ ((struct cfq_io_context *) (rq)->elevator_private) #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) @@ -137,7 +134,6 @@ struct cfq_queue { u64 seek_total; sector_t seek_mean; sector_t last_request_pos; - unsigned long seeky_start; pid_t pid; @@ -314,6 +310,7 @@ enum cfqq_state_flags { CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ CFQ_CFQQ_FLAG_sync, /* synchronous queue */ CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ }; @@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed); CFQ_CFQQ_FNS(slice_new); CFQ_CFQQ_FNS(sync); CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(split_coop); CFQ_CFQQ_FNS(deep); CFQ_CFQQ_FNS(wait_busy); #undef CFQ_CFQQ_FNS @@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_clear_cfqq_wait_busy(cfqq); /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) + cfq_mark_cfqq_split_coop(cfqq); + + /* * store what was left of this slice, if the queue idled/timed out */ if (timed_out && !cfq_cfqq_slice_new(cfqq)) { @@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, return cfqd->last_position - blk_rq_pos(rq); } -#define CFQQ_SEEK_THR 8 * 1024 -#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) - static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct request *rq, bool for_preempt) { @@ -1803,7 +1807,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) * Otherwise, we do only if they are the last ones * in their service tree. */ - return service_tree->count == 1; + return service_tree->count == 1 && cfq_cfqq_sync(cfqq); } static void cfq_arm_slice_timer(struct cfq_data *cfqd) @@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, total = cfqq->seek_total + (cfqq->seek_samples/2); do_div(total, cfqq->seek_samples); cfqq->seek_mean = (sector_t)total; - - /* - * If this cfqq is shared between multiple processes, check to - * make sure that those processes are still issuing I/Os within - * the mean seek distance. If not, it may be time to break the - * queues apart again. - */ - if (cfq_cfqq_coop(cfqq)) { - if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start) - cfqq->seeky_start = jiffies; - else if (!CFQQ_SEEKY(cfqq)) - cfqq->seeky_start = 0; - } } /* @@ -3077,6 +3068,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, return true; /* + * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. + */ + if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) + return false; + + /* * if the new request is sync, but the currently running queue is * not, let the sync request have priority. */ @@ -3447,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, return cic_to_cfqq(cic, 1); } -static int should_split_cfqq(struct cfq_queue *cfqq) -{ - if (cfqq->seeky_start && - time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT)) - return 1; - return 0; -} - /* * Returns NULL if a new cfqq should be allocated, or the old cfqq if this * was the last process referring to said cfqq. @@ -3463,9 +3452,9 @@ static struct cfq_queue * split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) { if (cfqq_process_refs(cfqq) == 1) { - cfqq->seeky_start = 0; cfqq->pid = current->pid; cfq_clear_cfqq_coop(cfqq); + cfq_clear_cfqq_split_coop(cfqq); return cfqq; } @@ -3504,7 +3493,7 @@ new_queue: /* * If the queue was seeky for too long, break it apart. */ - if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { + if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); cfqq = split_cfqq(cic, cfqq); if (!cfqq) diff --git a/block/genhd.c b/block/genhd.c index b11a4ad7d57..d13ba76a169 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev, { struct gendisk *disk = dev_to_disk(dev); - return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); + return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); } static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); |