summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-06-05 13:38:39 -0600
committerJens Axboe <axboe@fb.com>2014-06-05 13:38:39 -0600
commit762380ad9322951cea4ce9d24864265f9c66a916 (patch)
tree9ec3fe551583dcb9243d02a9728511e99216dcdc /include/linux
parent046f153343e33dcad1be7f6249ea6ff1c6fd9b58 (diff)
block: add notion of a chunk size for request merging
Some drivers have different limits on what size a request should optimally be, depending on the offset of the request. Similar to dividing a device into chunks. Add a setting that allows the driver to inform the block layer of such a chunk size. The block layer will then prevent merging across the chunks. This is needed to optimally support NVMe with a non-zero stripe size. Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blkdev.h22
1 files changed, 21 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3cd426e971d..dc2c703f05f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -280,6 +280,7 @@ struct queue_limits {
unsigned long seg_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int chunk_sectors;
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
@@ -910,6 +911,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
return q->limits.max_sectors;
}
+/*
+ * Return maximum size of a request at given offset. Only valid for
+ * file system requests.
+ */
+static inline unsigned int blk_max_size_offset(struct request_queue *q,
+ sector_t offset)
+{
+ if (!q->limits.chunk_sectors)
+ return q->limits.max_hw_sectors;
+
+ return q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1));
+}
+
static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -917,7 +932,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
return q->limits.max_hw_sectors;
- return blk_queue_get_max_sectors(q, rq->cmd_flags);
+ if (!q->limits.chunk_sectors)
+ return blk_queue_get_max_sectors(q, rq->cmd_flags);
+
+ return min(blk_max_size_offset(q, blk_rq_pos(rq)),
+ blk_queue_get_max_sectors(q, rq->cmd_flags));
}
static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -983,6 +1002,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,