From a7f557923441186a3cdbabc54f1bcacf42b63bf5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:17 +0900 Subject: block: kill blk_start_queueing() blk_start_queueing() is identical to __blk_run_queue() except that it doesn't check for recursion. None of the current users depends on blk_start_queueing() running request_fn directly. Replace usages of blk_start_queueing() with [__]blk_run_queue() and kill it. [ Impact: removal of mostly duplicate interface function ] Signed-off-by: Tejun Heo --- include/linux/blkdev.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2755d5c6da2..12e20de44b6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -797,7 +797,6 @@ extern void blk_sync_queue(struct request_queue *q); extern void __blk_stop_queue(struct request_queue *q); extern void __blk_run_queue(struct request_queue *); extern void blk_run_queue(struct request_queue *); -extern void blk_start_queueing(struct request_queue *); extern int blk_rq_map_user(struct request_queue *, struct request *, struct rq_map_data *, void __user *, unsigned long, gfp_t); -- cgit v1.2.3-70-g09d2 From 5efccd17ceb0fc43837a331297c2c407969d7201 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:18 +0900 Subject: block: reorder request completion functions Reorder request completion functions such that * All request completion functions are located together. * Functions which are used by only one caller is put right above the caller. * end_request() is put after other completion functions but before blk_update_request(). This change is for completion function cleanup which will follow. [ Impact: cleanup, code reorganization ] Signed-off-by: Tejun Heo --- block/blk-core.c | 144 ++++++++++++++++++++++++------------------------- include/linux/blkdev.h | 16 +++--- 2 files changed, 80 insertions(+), 80 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index cf10dfcda99..406a93e526b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1683,6 +1683,35 @@ static void blk_account_io_done(struct request *req) } } +/** + * blk_rq_bytes - Returns bytes left to complete in the entire request + * @rq: the request being processed + **/ +unsigned int blk_rq_bytes(struct request *rq) +{ + if (blk_fs_request(rq)) + return rq->hard_nr_sectors << 9; + + return rq->data_len; +} +EXPORT_SYMBOL_GPL(blk_rq_bytes); + +/** + * blk_rq_cur_bytes - Returns bytes left to complete in the current segment + * @rq: the request being processed + **/ +unsigned int blk_rq_cur_bytes(struct request *rq) +{ + if (blk_fs_request(rq)) + return rq->current_nr_sectors << 9; + + if (rq->bio) + return rq->bio->bi_size; + + return rq->data_len; +} +EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); + /** * __end_that_request_first - end I/O on a request * @req: the request being processed @@ -1797,6 +1826,22 @@ static int __end_that_request_first(struct request *req, int error, return 1; } +static int end_that_request_data(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) +{ + if (rq->bio) { + if (__end_that_request_first(rq, error, nr_bytes)) + return 1; + + /* Bidi request must be completed as a whole */ + if (blk_bidi_rq(rq) && + __end_that_request_first(rq->next_rq, error, bidi_bytes)) + return 1; + } + + return 0; +} + /* * queue lock must be held */ @@ -1825,78 +1870,6 @@ static void end_that_request_last(struct request *req, int error) } } -/** - * blk_rq_bytes - Returns bytes left to complete in the entire request - * @rq: the request being processed - **/ -unsigned int blk_rq_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return rq->hard_nr_sectors << 9; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_bytes); - -/** - * blk_rq_cur_bytes - Returns bytes left to complete in the current segment - * @rq: the request being processed - **/ -unsigned int blk_rq_cur_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return rq->current_nr_sectors << 9; - - if (rq->bio) - return rq->bio->bi_size; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); - -/** - * end_request - end I/O on the current segment of the request - * @req: the request being processed - * @uptodate: error value or %0/%1 uptodate flag - * - * Description: - * Ends I/O on the current segment of a request. If that is the only - * remaining segment, the request is also completed and freed. - * - * This is a remnant of how older block drivers handled I/O completions. - * Modern drivers typically end I/O on the full request in one go, unless - * they have a residual value to account for. For that case this function - * isn't really useful, unless the residual just happens to be the - * full current segment. In other words, don't use this function in new - * code. Use blk_end_request() or __blk_end_request() to end a request. - **/ -void end_request(struct request *req, int uptodate) -{ - int error = 0; - - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; - - __blk_end_request(req, error, req->hard_cur_sectors << 9); -} -EXPORT_SYMBOL(end_request); - -static int end_that_request_data(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) -{ - if (rq->bio) { - if (__end_that_request_first(rq, error, nr_bytes)) - return 1; - - /* Bidi request must be completed as a whole */ - if (blk_bidi_rq(rq) && - __end_that_request_first(rq->next_rq, error, bidi_bytes)) - return 1; - } - - return 0; -} - /** * blk_end_io - Generic end_io function to complete a request. * @rq: the request being processed @@ -2006,6 +1979,33 @@ int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, } EXPORT_SYMBOL_GPL(blk_end_bidi_request); +/** + * end_request - end I/O on the current segment of the request + * @req: the request being processed + * @uptodate: error value or %0/%1 uptodate flag + * + * Description: + * Ends I/O on the current segment of a request. If that is the only + * remaining segment, the request is also completed and freed. + * + * This is a remnant of how older block drivers handled I/O completions. + * Modern drivers typically end I/O on the full request in one go, unless + * they have a residual value to account for. For that case this function + * isn't really useful, unless the residual just happens to be the + * full current segment. In other words, don't use this function in new + * code. Use blk_end_request() or __blk_end_request() to end a request. + **/ +void end_request(struct request *req, int uptodate) +{ + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; + + __blk_end_request(req, error, req->hard_cur_sectors << 9); +} +EXPORT_SYMBOL(end_request); + /** * blk_update_request - Special helper function for request stacking drivers * @rq: the request being processed diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 12e20de44b6..156ffd9de96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -831,6 +831,14 @@ static inline void blk_run_address_space(struct address_space *mapping) extern void blkdev_dequeue_request(struct request *req); +/* + * blk_end_request() takes bytes instead of sectors as a complete size. + * blk_rq_bytes() returns bytes left to complete in the entire request. + * blk_rq_cur_bytes() returns bytes left to complete in the current segment. + */ +extern unsigned int blk_rq_bytes(struct request *rq); +extern unsigned int blk_rq_cur_bytes(struct request *rq); + /* * blk_end_request() and friends. * __blk_end_request() and end_request() must be called with @@ -857,14 +865,6 @@ extern void blk_abort_queue(struct request_queue *); extern void blk_update_request(struct request *rq, int error, unsigned int nr_bytes); -/* - * blk_end_request() takes bytes instead of sectors as a complete size. - * blk_rq_bytes() returns bytes left to complete in the entire request. - * blk_rq_cur_bytes() returns bytes left to complete in the current segment. - */ -extern unsigned int blk_rq_bytes(struct request *rq); -extern unsigned int blk_rq_cur_bytes(struct request *rq); - /* * Access functions for manipulating queue properties */ -- cgit v1.2.3-70-g09d2 From 0b302d5aa7975006fa2ec3d66386610b9b36c669 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:18 +0900 Subject: block: kill blk_end_request_callback() With recent IDE updates, blk_end_request_callback() doesn't have any user now. Kill it. [ Impact: removal of unused convoluted interface ] Signed-off-by: Tejun Heo --- block/blk-core.c | 48 +++--------------------------------------------- include/linux/blkdev.h | 3 --- 2 files changed, 3 insertions(+), 48 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 678ede23ed0..2f277ea0e59 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1971,10 +1971,6 @@ static void end_that_request_last(struct request *req, int error) * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq - * @drv_callback: function called between completion of bios in the request - * and completion of the request. - * If the callback returns non %0, this helper returns without - * completion of the request. * * Description: * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. @@ -1985,8 +1981,7 @@ static void end_that_request_last(struct request *req, int error) * %1 - this request is not freed yet, it still has pending buffers. **/ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes, - int (drv_callback)(struct request *)) + unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags = 0UL; @@ -1994,10 +1989,6 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) return 1; - /* Special feature for tricky drivers */ - if (drv_callback && drv_callback(rq)) - return 1; - add_disk_randomness(rq->rq_disk); spin_lock_irqsave(q->queue_lock, flags); @@ -2023,7 +2014,7 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, **/ int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { - return blk_end_io(rq, error, nr_bytes, 0, NULL); + return blk_end_io(rq, error, nr_bytes, 0); } EXPORT_SYMBOL_GPL(blk_end_request); @@ -2070,7 +2061,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request); int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { - return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); + return blk_end_io(rq, error, nr_bytes, bidi_bytes); } EXPORT_SYMBOL_GPL(blk_end_bidi_request); @@ -2131,39 +2122,6 @@ void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) } EXPORT_SYMBOL_GPL(blk_update_request); -/** - * blk_end_request_callback - Special helper function for tricky drivers - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * @drv_callback: function called between completion of bios in the request - * and completion of the request. - * If the callback returns non %0, this helper returns without - * completion of the request. - * - * Description: - * Ends I/O on a number of bytes attached to @rq. - * If @rq has leftover, sets it up for the next range of segments. - * - * This special helper function is used only for existing tricky drivers. - * (e.g. cdrom_newpc_intr() of ide-cd) - * This interface will be removed when such drivers are rewritten. - * Don't use this interface in other places anymore. - * - * Return: - * %0 - we are done with this request - * %1 - this request is not freed yet. - * this request still has pending buffers or - * the driver doesn't want to finish this request yet. - **/ -int blk_end_request_callback(struct request *rq, int error, - unsigned int nr_bytes, - int (drv_callback)(struct request *)) -{ - return blk_end_io(rq, error, nr_bytes, 0, drv_callback); -} -EXPORT_SYMBOL_GPL(blk_end_request_callback); - void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 156ffd9de96..1fa9dcf9aa6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -855,9 +855,6 @@ extern int __blk_end_request(struct request *rq, int error, extern int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes); extern void end_request(struct request *, int); -extern int blk_end_request_callback(struct request *rq, int error, - unsigned int nr_bytes, - int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); -- cgit v1.2.3-70-g09d2 From 2e60e02297cf54e367567f2d85b2ca56b1c4a906 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:18 +0900 Subject: block: clean up request completion API Request completion has gone through several changes and became a bit messy over the time. Clean it up. 1. end_that_request_data() is a thin wrapper around end_that_request_data_first() which checks whether bio is NULL before doing anything and handles bidi completion. blk_update_request() is a thin wrapper around end_that_request_data() which clears nr_sectors on the last iteration but doesn't use the bidi completion. Clean it up by moving the initial bio NULL check and nr_sectors clearing on the last iteration into end_that_request_data() and renaming it to blk_update_request(), which makes blk_end_io() the only user of end_that_request_data(). Collapse end_that_request_data() into blk_end_io(). 2. There are four visible completion variants - blk_end_request(), __blk_end_request(), blk_end_bidi_request() and end_request(). blk_end_request() and blk_end_bidi_request() uses blk_end_request() as the backend but __blk_end_request() and end_request() use separate implementation in __blk_end_request() due to different locking rules. blk_end_bidi_request() is identical to blk_end_io(). Collapse blk_end_io() into blk_end_bidi_request(), separate out request update into internal helper blk_update_bidi_request() and add __blk_end_bidi_request(). Redefine [__]blk_end_request() as thin inline wrappers around [__]blk_end_bidi_request(). 3. As the whole request issue/completion usages are about to be modified and audited, it's a good chance to convert completion functions return bool which better indicates the intended meaning of return values. 4. The function name end_that_request_last() is from the days when it was a public interface and slighly confusing. Give it a proper internal name - blk_finish_request(). 5. Add description explaning that blk_end_bidi_request() can be safely used for uni requests as suggested by Boaz Harrosh. The only visible behavior change is from #1. nr_sectors counts are cleared after the final iteration no matter which function is used to complete the request. I couldn't find any place where the code assumes those nr_sectors counters contain the values for the last segment and this change is good as it makes the API much more consistent as the end result is now same whether a request is completed using [__]blk_end_request() alone or in combination with blk_update_request(). API further cleaned up per Christoph's suggestion. [ Impact: cleanup, rq->*nr_sectors always updated after req completion ] Signed-off-by: Tejun Heo Reviewed-by: Boaz Harrosh Cc: Christoph Hellwig --- block/blk-core.c | 226 ++++++++++++++++--------------------------------- include/linux/blkdev.h | 94 +++++++++++++++++--- 2 files changed, 157 insertions(+), 163 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 2f277ea0e59..89cc05d9a7a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1808,25 +1808,35 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) } /** - * __end_that_request_first - end I/O on a request - * @req: the request being processed + * blk_update_request - Special helper function for request stacking drivers + * @rq: the request being processed * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete + * @nr_bytes: number of bytes to complete @rq * * Description: - * Ends I/O on a number of bytes attached to @req, and sets it up - * for the next range of segments (if any) in the cluster. + * Ends I/O on a number of bytes attached to @rq, but doesn't complete + * the request structure even if @rq doesn't have leftover. + * If @rq has leftover, sets it up for the next range of segments. + * + * This special helper function is only for request stacking drivers + * (e.g. request-based dm) so that they can handle partial completion. + * Actual device drivers should use blk_end_request instead. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. * * Return: - * %0 - we are done with this request, call end_that_request_last() - * %1 - still buffers pending for this request + * %false - this request doesn't have any more data + * %true - this request has more data **/ -static int __end_that_request_first(struct request *req, int error, - int nr_bytes) +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) { int total_bytes, bio_nbytes, next_idx = 0; struct bio *bio; + if (!req->bio) + return false; + trace_block_rq_complete(req->q, req); /* @@ -1903,8 +1913,16 @@ static int __end_that_request_first(struct request *req, int error, /* * completely done */ - if (!req->bio) - return 0; + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->nr_sectors = req->hard_nr_sectors = 0; + req->current_nr_sectors = req->hard_cur_sectors = 0; + return false; + } /* * if the request wasn't completed, update state @@ -1918,29 +1936,31 @@ static int __end_that_request_first(struct request *req, int error, blk_recalc_rq_sectors(req, total_bytes >> 9); blk_recalc_rq_segments(req); - return 1; + return true; } +EXPORT_SYMBOL_GPL(blk_update_request); -static int end_that_request_data(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) +static bool blk_update_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes) { - if (rq->bio) { - if (__end_that_request_first(rq, error, nr_bytes)) - return 1; + if (blk_update_request(rq, error, nr_bytes)) + return true; - /* Bidi request must be completed as a whole */ - if (blk_bidi_rq(rq) && - __end_that_request_first(rq->next_rq, error, bidi_bytes)) - return 1; - } + /* Bidi request must be completed as a whole */ + if (unlikely(blk_bidi_rq(rq)) && + blk_update_request(rq->next_rq, error, bidi_bytes)) + return true; - return 0; + add_disk_randomness(rq->rq_disk); + + return false; } /* * queue lock must be held */ -static void end_that_request_last(struct request *req, int error) +static void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -1966,161 +1986,65 @@ static void end_that_request_last(struct request *req, int error) } /** - * blk_end_io - Generic end_io function to complete a request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete @rq - * @bidi_bytes: number of bytes to complete @rq->next_rq + * blk_end_bidi_request - Complete a bidi request + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. - * If @rq has leftover, sets it up for the next range of segments. + * Drivers that supports bidi can safely call this member for any + * type of request, bidi or uni. In the later case @bidi_bytes is + * just ignored. * * Return: - * %0 - we are done with this request - * %1 - this request is not freed yet, it still has pending buffers. + * %false - we are done with this request + * %true - still buffers pending for this request **/ -static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes) +bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; - unsigned long flags = 0UL; - - if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) - return 1; + unsigned long flags; - add_disk_randomness(rq->rq_disk); + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; spin_lock_irqsave(q->queue_lock, flags); - end_that_request_last(rq, error); + blk_finish_request(rq, error); spin_unlock_irqrestore(q->queue_lock, flags); - return 0; -} - -/** - * blk_end_request - Helper function for drivers to complete the request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * - * Description: - * Ends I/O on a number of bytes attached to @rq. - * If @rq has leftover, sets it up for the next range of segments. - * - * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request - **/ -int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) -{ - return blk_end_io(rq, error, nr_bytes, 0); -} -EXPORT_SYMBOL_GPL(blk_end_request); - -/** - * __blk_end_request - Helper function for drivers to complete the request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * - * Description: - * Must be called with queue lock held unlike blk_end_request(). - * - * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request - **/ -int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) -{ - if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) - return 1; - - add_disk_randomness(rq->rq_disk); - - end_that_request_last(rq, error); - - return 0; + return false; } -EXPORT_SYMBOL_GPL(__blk_end_request); +EXPORT_SYMBOL_GPL(blk_end_bidi_request); /** - * blk_end_bidi_request - Helper function for drivers to complete bidi request. - * @rq: the bidi request being processed + * __blk_end_bidi_request - Complete a bidi request with queue lock held + * @rq: the request to complete * @error: %0 for success, < %0 for error * @nr_bytes: number of bytes to complete @rq * @bidi_bytes: number of bytes to complete @rq->next_rq * * Description: - * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. + * Identical to blk_end_bidi_request() except that queue lock is + * assumed to be locked on entry and remains so on return. * * Return: - * %0 - we are done with this request - * %1 - still buffers pending for this request - **/ -int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, - unsigned int bidi_bytes) -{ - return blk_end_io(rq, error, nr_bytes, bidi_bytes); -} -EXPORT_SYMBOL_GPL(blk_end_bidi_request); - -/** - * end_request - end I/O on the current segment of the request - * @req: the request being processed - * @uptodate: error value or %0/%1 uptodate flag - * - * Description: - * Ends I/O on the current segment of a request. If that is the only - * remaining segment, the request is also completed and freed. - * - * This is a remnant of how older block drivers handled I/O completions. - * Modern drivers typically end I/O on the full request in one go, unless - * they have a residual value to account for. For that case this function - * isn't really useful, unless the residual just happens to be the - * full current segment. In other words, don't use this function in new - * code. Use blk_end_request() or __blk_end_request() to end a request. + * %false - we are done with this request + * %true - still buffers pending for this request **/ -void end_request(struct request *req, int uptodate) +bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { - int error = 0; + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; + blk_finish_request(rq, error); - __blk_end_request(req, error, req->hard_cur_sectors << 9); -} -EXPORT_SYMBOL(end_request); - -/** - * blk_update_request - Special helper function for request stacking drivers - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete @rq - * - * Description: - * Ends I/O on a number of bytes attached to @rq, but doesn't complete - * the request structure even if @rq doesn't have leftover. - * If @rq has leftover, sets it up for the next range of segments. - * - * This special helper function is only for request stacking drivers - * (e.g. request-based dm) so that they can handle partial completion. - * Actual device drivers should use blk_end_request instead. - */ -void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) -{ - if (!end_that_request_data(rq, error, nr_bytes, 0)) { - /* - * These members are not updated in end_that_request_data() - * when all bios are completed. - * Update them so that the request stacking driver can find - * how many bytes remain in the request later. - */ - rq->nr_sectors = rq->hard_nr_sectors = 0; - rq->current_nr_sectors = rq->hard_cur_sectors = 0; - } + return false; } -EXPORT_SYMBOL_GPL(blk_update_request); +EXPORT_SYMBOL_GPL(__blk_end_bidi_request); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1fa9dcf9aa6..501f6845cc7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -840,27 +840,97 @@ extern unsigned int blk_rq_bytes(struct request *rq); extern unsigned int blk_rq_cur_bytes(struct request *rq); /* - * blk_end_request() and friends. - * __blk_end_request() and end_request() must be called with - * the request queue spinlock acquired. + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + * + * blk_end_request() and friends. __blk_end_request() and + * end_request() must be called with the request queue spinlock + * acquired. * * Several drivers define their own end_request and call * blk_end_request() for parts of the original function. * This prevents code duplication in drivers. */ -extern int blk_end_request(struct request *rq, int error, - unsigned int nr_bytes); -extern int __blk_end_request(struct request *rq, int error, - unsigned int nr_bytes); -extern int blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes); -extern void end_request(struct request *, int); +extern bool blk_update_request(struct request *rq, int error, + unsigned int nr_bytes); +extern bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes); +extern bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes); + +/** + * blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Ends I/O on a number of bytes attached to @rq. + * If @rq has leftover, sets it up for the next range of segments. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +static inline bool blk_end_request(struct request *rq, int error, + unsigned int nr_bytes) +{ + return blk_end_bidi_request(rq, error, nr_bytes, 0); +} + +/** + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Must be called with queue lock held unlike blk_end_request(). + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +static inline bool __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes) +{ + return __blk_end_bidi_request(rq, error, nr_bytes, 0); +} + +/** + * end_request - end I/O on the current segment of the request + * @rq: the request being processed + * @uptodate: error value or %0/%1 uptodate flag + * + * Description: + * Ends I/O on the current segment of a request. If that is the only + * remaining segment, the request is also completed and freed. + * + * This is a remnant of how older block drivers handled I/O completions. + * Modern drivers typically end I/O on the full request in one go, unless + * they have a residual value to account for. For that case this function + * isn't really useful, unless the residual just happens to be the + * full current segment. In other words, don't use this function in new + * code. Use blk_end_request() or __blk_end_request() to end a request. + **/ +static inline void end_request(struct request *rq, int uptodate) +{ + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; + + __blk_end_bidi_request(rq, error, rq->hard_cur_sectors << 9, 0); +} + extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); extern void blk_abort_queue(struct request_queue *); -extern void blk_update_request(struct request *rq, int error, - unsigned int nr_bytes); /* * Access functions for manipulating queue properties -- cgit v1.2.3-70-g09d2 From 40cbbb781d3eba5d6ac0860db078af490e5c7c6b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:19 +0900 Subject: block: implement and use [__]blk_end_request_all() There are many [__]blk_end_request() call sites which call it with full request length and expect full completion. Many of them ensure that the request actually completes by doing BUG_ON() the return value, which is awkward and error-prone. This patch adds [__]blk_end_request_all() which takes @rq and @error and fully completes the request. BUG_ON() is added to to ensure that this actually happens. Most conversions are simple but there are a few noteworthy ones. * cdrom/viocd: viocd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/block/dasd: dasd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/char/tape_block: tapeblock_end_request() replaced with direct calls to blk_end_request_all(). [ Impact: cleanup ] Signed-off-by: Tejun Heo Cc: Russell King Cc: Stephen Rothwell Cc: Mike Miller Cc: Martin Schwidefsky Cc: Jeff Garzik Cc: Rusty Russell Cc: Jeremy Fitzhardinge Cc: Alex Dubov Cc: James Bottomley --- arch/arm/plat-omap/mailbox.c | 11 +++-------- block/blk-barrier.c | 9 ++------- block/blk-core.c | 2 +- block/elevator.c | 2 +- drivers/block/cpqarray.c | 3 +-- drivers/block/sx8.c | 3 +-- drivers/block/virtio_blk.c | 2 +- drivers/block/xen-blkfront.c | 4 +--- drivers/cdrom/gdrom.c | 2 +- drivers/cdrom/viocd.c | 25 ++++--------------------- drivers/memstick/core/mspro_block.c | 2 +- drivers/s390/block/dasd.c | 17 ++++------------- drivers/s390/char/tape_block.c | 15 ++++----------- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 32 ++++++++++++++++++++++++++++++++ 15 files changed, 58 insertions(+), 73 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 0abfbaa5987..cf81bad8aec 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -192,8 +192,7 @@ static void mbox_tx_work(struct work_struct *work) } spin_lock(q->queue_lock); - if (__blk_end_request(rq, 0, 0)) - BUG(); + __blk_end_request_all(rq, 0); spin_unlock(q->queue_lock); } } @@ -224,10 +223,7 @@ static void mbox_rx_work(struct work_struct *work) break; msg = (mbox_msg_t) rq->data; - - if (blk_end_request(rq, 0, 0)) - BUG(); - + blk_end_request_all(rq, 0); mbox->rxq->callback((void *)msg); } } @@ -337,8 +333,7 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf) *p = (mbox_msg_t) rq->data; - if (blk_end_request(rq, 0, 0)) - BUG(); + blk_end_request_all(rq, 0); if (unlikely(mbox_seq_test(mbox, *p))) { pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 20b4111fa05..c8d087655ef 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) */ q->ordseq = 0; rq = q->orig_bar_rq; - - if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) - BUG(); - + __blk_end_request_all(rq, q->orderr); return true; } @@ -252,9 +249,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) * with prejudice. */ elv_dequeue_request(q, rq); - if (__blk_end_request(rq, -EOPNOTSUPP, - blk_rq_bytes(rq))) - BUG(); + __blk_end_request_all(rq, -EOPNOTSUPP); *rqp = NULL; return false; } diff --git a/block/blk-core.c b/block/blk-core.c index b84250d3019..0520cc70458 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1780,7 +1780,7 @@ struct request *elv_next_request(struct request_queue *q) break; } else if (ret == BLKPREP_KILL) { rq->cmd_flags |= REQ_QUIET; - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); + __blk_end_request_all(rq, -EIO); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; diff --git a/block/elevator.c b/block/elevator.c index b03b8752e18..1af5d9f04af 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -810,7 +810,7 @@ void elv_abort_queue(struct request_queue *q) rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; trace_block_rq_abort(q, rq); - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); + __blk_end_request_all(rq, -EIO); } } EXPORT_SYMBOL(elv_abort_queue); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index ca268ca1115..488a8f4a60a 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1024,8 +1024,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout) cmd->req.sg[i].size, ddir); DBGPX(printk("Done with %p\n", rq);); - if (__blk_end_request(rq, error, blk_rq_bytes(rq))) - BUG(); + __blk_end_request_all(rq, error); } /* diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index ff0448e4bf0..60e85bb6f79 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host, struct request *req = crq->rq; int rc; - rc = __blk_end_request(req, error, blk_rq_bytes(req)); - assert(rc == 0); + __blk_end_request_all(req, error); rc = carm_put_request(host, crq); assert(rc == 0); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5d34764c8a8..50745e64414 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -62,7 +62,7 @@ static void blk_done(struct virtqueue *vq) break; } - __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); + __blk_end_request_all(vbr->req, error); list_del(&vbr->list); mempool_free(vbr, vblk->pool); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8f905089b72..cd6cfe3b51e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) for (i = info->ring.rsp_cons; i != rp; i++) { unsigned long id; - int ret; bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; @@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - ret = __blk_end_request(req, error, blk_rq_bytes(req)); - BUG_ON(ret); + __blk_end_request_all(req, error); break; default: BUG(); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 2eecb779437..fee9a9e83fc 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -632,7 +632,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) * before handling ending the request */ spin_lock(&gdrom_lock); list_del_init(&req->queuelist); - __blk_end_request(req, err, blk_rq_bytes(req)); + __blk_end_request_all(req, err); } spin_unlock(&gdrom_lock); kfree(read_command); diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 13929356135..cc3efa096e1 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -291,23 +291,6 @@ static int send_request(struct request *req) return 0; } -static void viocd_end_request(struct request *req, int error) -{ - int nsectors = req->hard_nr_sectors; - - /* - * Make sure it's fully ended, and ensure that we process - * at least one sector. - */ - if (blk_pc_request(req)) - nsectors = (req->data_len + 511) >> 9; - if (!nsectors) - nsectors = 1; - - if (__blk_end_request(req, error, nsectors << 9)) - BUG(); -} - static int rwreq; static void do_viocd_request(struct request_queue *q) @@ -316,11 +299,11 @@ static void do_viocd_request(struct request_queue *q) while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { if (!blk_fs_request(req)) - viocd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); else if (send_request(req) < 0) { printk(VIOCD_KERN_WARNING "unable to send message to OS/400!"); - viocd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); } else rwreq++; } @@ -531,9 +514,9 @@ return_complete: "with rc %d:0x%04X: %s\n", req, event->xRc, bevent->sub_result, err->msg); - viocd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); } else - viocd_end_request(req, 0); + __blk_end_request_all(req, 0); /* restart handling of incoming requests */ spin_unlock_irqrestore(&viocd_reqlock, flags); diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index de143deb06f..a41634699f8 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -826,7 +826,7 @@ static void mspro_block_submit_req(struct request_queue *q) if (msb->eject) { while ((req = elv_next_request(q)) != NULL) - __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); + __blk_end_request_all(req, -ENODEV); return; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d1815272c43..fabec95686b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1613,15 +1613,6 @@ void dasd_block_clear_timer(struct dasd_block *block) del_timer(&block->timer); } -/* - * posts the buffer_cache about a finalized request - */ -static inline void dasd_end_request(struct request *req, int error) -{ - if (__blk_end_request(req, error, blk_rq_bytes(req))) - BUG(); -} - /* * Process finished error recovery ccw. */ @@ -1676,7 +1667,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) "Rejecting write request %p", req); blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); continue; } cqr = basedev->discipline->build_cp(basedev, block, req); @@ -1705,7 +1696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) "on request %p", PTR_ERR(cqr), req); blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); continue; } /* @@ -1731,7 +1722,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) status = cqr->block->base->discipline->free_cp(cqr, req); if (status <= 0) error = status ? status : -EIO; - dasd_end_request(req, error); + __blk_end_request_all(req, error); } /* @@ -2040,7 +2031,7 @@ static void dasd_flush_request_queue(struct dasd_block *block) spin_lock_irq(&block->request_queue_lock); while ((req = elv_next_request(block->request_queue))) { blkdev_dequeue_request(req); - dasd_end_request(req, -EIO); + __blk_end_request_all(req, -EIO); } spin_unlock_irq(&block->request_queue_lock); } diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index f32e89e7c4f..86596d3813b 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -73,13 +73,6 @@ tapeblock_trigger_requeue(struct tape_device *device) /* * Post finished request. */ -static void -tapeblock_end_request(struct request *req, int error) -{ - if (blk_end_request(req, error, blk_rq_bytes(req))) - BUG(); -} - static void __tapeblock_end_request(struct tape_request *ccw_req, void *data) { @@ -90,7 +83,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) device = ccw_req->device; req = (struct request *) data; - tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); + blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO); if (ccw_req->rc == 0) /* Update position. */ device->blk_data.block_position = @@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req) ccw_req = device->discipline->bread(device, req); if (IS_ERR(ccw_req)) { DBF_EVENT(1, "TBLOCK: bread failed\n"); - tapeblock_end_request(req, -EIO); + blk_end_request_all(req, -EIO); return PTR_ERR(ccw_req); } ccw_req->callback = __tapeblock_end_request; @@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req) * Start/enqueueing failed. No retries in * this case. */ - tapeblock_end_request(req, -EIO); + blk_end_request_all(req, -EIO); device->discipline->free_bread(ccw_req); } @@ -177,7 +170,7 @@ tapeblock_requeue(struct work_struct *work) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); blkdev_dequeue_request(req); spin_unlock_irq(&device->blk_data.request_queue_lock); - tapeblock_end_request(req, -EIO); + blk_end_request_all(req, -EIO); spin_lock_irq(&device->blk_data.request_queue_lock); continue; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d1cb64ad1a3..756ac7c93de 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -922,7 +922,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (driver_byte(result) & DRIVER_SENSE) scsi_print_sense("", cmd); } - blk_end_request(req, -EIO, blk_rq_bytes(req)); + blk_end_request_all(req, -EIO); scsi_next_command(cmd); break; case ACTION_REPREP: diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 501f6845cc7..e33c8356b3d 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -882,6 +882,22 @@ static inline bool blk_end_request(struct request *rq, int error, return blk_end_bidi_request(rq, error, nr_bytes, 0); } +/** + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. + */ +static inline void blk_end_request_all(struct request *rq, int error) +{ + bool pending; + + pending = blk_end_request(rq, error, blk_rq_bytes(rq)); + BUG_ON(pending); +} + /** * __blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed @@ -901,6 +917,22 @@ static inline bool __blk_end_request(struct request *rq, int error, return __blk_end_bidi_request(rq, error, nr_bytes, 0); } +/** + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. Must be called with queue lock held. + */ +static inline void __blk_end_request_all(struct request *rq, int error) +{ + bool pending; + + pending = __blk_end_request(rq, error, blk_rq_bytes(rq)); + BUG_ON(pending); +} + /** * end_request - end I/O on the current segment of the request * @rq: the request being processed -- cgit v1.2.3-70-g09d2 From f06d9a2b52e246a66b606130cea3f0d7b7be17a7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:19 +0900 Subject: block: replace end_request() with [__]blk_end_request_cur() end_request() has been kept around for backward compatibility; however, it's about time for it to go away. * There aren't too many users left. * Its use of @updtodate is pretty confusing. * In some cases, newer code ends up using mixture of end_request() and [__]blk_end_request[_all](), which is way too confusing. So, add [__]blk_end_request_cur() and replace end_request() with it. Most conversions are straightforward. Noteworthy ones are... * paride/pcd: next_request() updated to take 0/-errno instead of 1/0. * paride/pf: pf_end_request() and next_request() updated to take 0/-errno instead of 1/0. * xd: xd_readwrite() updated to return 0/-errno instead of 1/0. * mtd/mtd_blkdevs: blktrans_discard_request() updated to return 0/-errno instead of 1/0. Unnecessary local variable res initialization removed from mtd_blktrans_thread(). [ Impact: cleanup ] Signed-off-by: Tejun Heo Acked-by: Joerg Dorchain Acked-by: Geert Uytterhoeven Acked-by: Grant Likely Acked-by: Laurent Vivier Cc: Tim Waugh Cc: Stephen Rothwell Cc: Paul Mackerras Cc: Jeremy Fitzhardinge Cc: Markus Lidel Cc: David Woodhouse Cc: Pete Zaitcev Cc: unsik Kim --- drivers/block/amiflop.c | 10 ++++----- drivers/block/ataflop.c | 14 ++++++------- drivers/block/hd.c | 14 ++++++------- drivers/block/mg_disk.c | 16 +++++++------- drivers/block/paride/pcd.c | 12 +++++------ drivers/block/paride/pd.c | 5 +++-- drivers/block/paride/pf.c | 28 ++++++++++++------------- drivers/block/ps3disk.c | 6 +++--- drivers/block/swim.c | 14 ++++++------- drivers/block/swim3.c | 26 +++++++++++------------ drivers/block/xd.c | 15 +++++++------- drivers/block/xen-blkfront.c | 2 +- drivers/block/xsysace.c | 4 ++-- drivers/block/z2ram.c | 4 ++-- drivers/cdrom/gdrom.c | 6 +++--- drivers/message/i2o/i2o_block.c | 2 +- drivers/mtd/mtd_blkdevs.c | 22 ++++++++++---------- drivers/sbus/char/jsflash.c | 8 +++---- include/linux/blkdev.h | 46 ++++++++++++++++++++--------------------- 19 files changed, 128 insertions(+), 126 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 8df436ff706..b99a2a606d0 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1359,7 +1359,7 @@ static void redo_fd_request(void) #endif block = CURRENT->sector + cnt; if ((int)block > floppy->blocks) { - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } @@ -1373,11 +1373,11 @@ static void redo_fd_request(void) if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { printk(KERN_WARNING "do_fd_request: unknown command\n"); - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } if (get_track(drive, track) == -1) { - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } @@ -1391,7 +1391,7 @@ static void redo_fd_request(void) /* keep the drive spinning while writes are scheduled */ if (!fd_motor_on(drive)) { - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } /* @@ -1410,7 +1410,7 @@ static void redo_fd_request(void) CURRENT->nr_sectors -= CURRENT->current_nr_sectors; CURRENT->sector += CURRENT->current_nr_sectors; - end_request(CURRENT, 1); + __blk_end_request_cur(CURRENT, 0); goto repeat; } diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 4234c11c1e4..44a8702136a 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -612,7 +612,7 @@ static void fd_error( void ) CURRENT->errors++; if (CURRENT->errors >= MAX_ERRORS) { printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); } else if (CURRENT->errors == RECALIBRATE_ERRORS) { printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); @@ -734,7 +734,7 @@ static void do_fd_action( int drive ) /* all sectors finished */ CURRENT->nr_sectors -= CURRENT->current_nr_sectors; CURRENT->sector += CURRENT->current_nr_sectors; - end_request(CURRENT, 1); + __blk_end_request_cur(CURRENT, 0); redo_fd_request(); return; } @@ -1141,7 +1141,7 @@ static void fd_rwsec_done1(int status) /* all sectors finished */ CURRENT->nr_sectors -= CURRENT->current_nr_sectors; CURRENT->sector += CURRENT->current_nr_sectors; - end_request(CURRENT, 1); + __blk_end_request_cur(CURRENT, 0); redo_fd_request(); } return; @@ -1414,7 +1414,7 @@ repeat: if (!UD.connected) { /* drive not connected */ printk(KERN_ERR "Unknown Device: fd%d\n", drive ); - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } @@ -1430,12 +1430,12 @@ repeat: /* user supplied disk type */ if (--type >= NUM_DISK_MINORS) { printk(KERN_WARNING "fd%d: invalid disk format", drive ); - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } if (minor2disktype[type].drive_types > DriveType) { printk(KERN_WARNING "fd%d: unsupported disk format", drive ); - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } type = minor2disktype[type].index; @@ -1445,7 +1445,7 @@ repeat: } if (CURRENT->sector + 1 > UDT->blocks) { - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); goto repeat; } diff --git a/drivers/block/hd.c b/drivers/block/hd.c index baaa9e486e5..5cb300b81c6 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -410,7 +410,7 @@ static void bad_rw_intr(void) if (req != NULL) { struct hd_i_struct *disk = req->rq_disk->private_data; if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); disk->special_op = disk->recalibrate = 1; } else if (req->errors % RESET_FREQ == 0) reset = 1; @@ -466,7 +466,7 @@ ok_to_read: req->buffer+512); #endif if (req->current_nr_sectors <= 0) - end_request(req, 1); + __blk_end_request_cur(req, 0); if (i > 0) { SET_HANDLER(&read_intr); return; @@ -505,7 +505,7 @@ ok_to_write: --req->current_nr_sectors; req->buffer += 512; if (!i || (req->bio && req->current_nr_sectors <= 0)) - end_request(req, 1); + __blk_end_request_cur(req, 0); if (i > 0) { SET_HANDLER(&write_intr); outsw(HD_DATA, req->buffer, 256); @@ -548,7 +548,7 @@ static void hd_times_out(unsigned long dummy) #ifdef DEBUG printk("%s: too many errors\n", name); #endif - end_request(CURRENT, 0); + __blk_end_request_cur(CURRENT, -EIO); } hd_request(); spin_unlock_irq(hd_queue->queue_lock); @@ -563,7 +563,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req) } if (disk->head > 16) { printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } disk->special_op = 0; return 1; @@ -607,7 +607,7 @@ repeat: ((block+nsect) > get_capacity(req->rq_disk))) { printk("%s: bad access: block=%d, count=%d\n", req->rq_disk->disk_name, block, nsect); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); goto repeat; } @@ -647,7 +647,7 @@ repeat: break; default: printk("unknown hd-command\n"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); break; } } diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index f3898353d0a..408c2bd8a43 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -285,7 +285,7 @@ static void mg_bad_rw_intr(struct mg_host *host) if (req != NULL) if (++req->errors >= MG_MAX_ERRORS || host->error == MG_ERR_TIMEOUT) - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } static unsigned int mg_out(struct mg_host *host, @@ -351,7 +351,7 @@ static void mg_read(struct request *req) if (req->current_nr_sectors <= 0) { MG_DBG("remain : %d sects\n", remains); - end_request(req, 1); + __blk_end_request_cur(req, 0); if (remains > 0) req = elv_next_request(host->breq); } @@ -395,7 +395,7 @@ static void mg_write(struct request *req) if (req->current_nr_sectors <= 0) { MG_DBG("remain : %d sects\n", remains); - end_request(req, 1); + __blk_end_request_cur(req, 0); if (remains > 0) req = elv_next_request(host->breq); } @@ -448,7 +448,7 @@ ok_to_read: /* let know if current segment done */ if (req->current_nr_sectors <= 0) - end_request(req, 1); + __blk_end_request_cur(req, 0); /* set handler if read remains */ if (i > 0) { @@ -497,7 +497,7 @@ ok_to_write: /* let know if current segment or all done */ if (!i || (req->bio && req->current_nr_sectors <= 0)) - end_request(req, 1); + __blk_end_request_cur(req, 0); /* write 1 sector and set handler if remains */ if (i > 0) { @@ -563,7 +563,7 @@ static void mg_request_poll(struct request_queue *q) default: printk(KERN_WARNING "%s:%d unknown command\n", __func__, __LINE__); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); break; } } @@ -617,7 +617,7 @@ static unsigned int mg_issue_req(struct request *req, default: printk(KERN_WARNING "%s:%d unknown command\n", __func__, __LINE__); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); break; } return MG_ERR_NONE; @@ -655,7 +655,7 @@ static void mg_request(struct request_queue *q) "%s: bad access: sector=%d, count=%d\n", req->rq_disk->disk_name, sect_num, sect_cnt); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e91d4b4b014..9fd57c2aa46 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -735,16 +735,16 @@ static void do_pcd_request(struct request_queue * q) ps_set_intr(do_pcd_read, NULL, 0, nice); return; } else - end_request(pcd_req, 0); + __blk_end_request_cur(pcd_req, -EIO); } } -static inline void next_request(int success) +static inline void next_request(int err) { unsigned long saved_flags; spin_lock_irqsave(&pcd_lock, saved_flags); - end_request(pcd_req, success); + __blk_end_request_cur(pcd_req, err); pcd_busy = 0; do_pcd_request(pcd_queue); spin_unlock_irqrestore(&pcd_lock, saved_flags); @@ -781,7 +781,7 @@ static void pcd_start(void) if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { pcd_bufblk = -1; - next_request(0); + next_request(-EIO); return; } @@ -796,7 +796,7 @@ static void do_pcd_read(void) pcd_retries = 0; pcd_transfer(); if (!pcd_count) { - next_request(1); + next_request(0); return; } @@ -815,7 +815,7 @@ static void do_pcd_read_drq(void) return; } pcd_bufblk = -1; - next_request(0); + next_request(-EIO); return; } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 9299455b0af..0732df4e901 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -410,7 +410,8 @@ static void run_fsm(void) pd_claimed = 0; phase = NULL; spin_lock_irqsave(&pd_lock, saved_flags); - end_request(pd_req, res); + __blk_end_request_cur(pd_req, + res == Ok ? 0 : -EIO); pd_req = elv_next_request(pd_queue); if (!pd_req) stop = 1; @@ -477,7 +478,7 @@ static int pd_next_buf(void) if (pd_count) return 0; spin_lock_irqsave(&pd_lock, saved_flags); - end_request(pd_req, 1); + __blk_end_request_cur(pd_req, 0); pd_count = pd_req->current_nr_sectors; pd_buf = pd_req->buffer; spin_unlock_irqrestore(&pd_lock, saved_flags); diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index bef3b997ba3..3871e3586d6 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -750,10 +750,10 @@ static int pf_ready(void) static struct request_queue *pf_queue; -static void pf_end_request(int uptodate) +static void pf_end_request(int err) { if (pf_req) { - end_request(pf_req, uptodate); + __blk_end_request_cur(pf_req, err); pf_req = NULL; } } @@ -773,7 +773,7 @@ repeat: pf_count = pf_req->current_nr_sectors; if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { - pf_end_request(0); + pf_end_request(-EIO); goto repeat; } @@ -788,7 +788,7 @@ repeat: pi_do_claimed(pf_current->pi, do_pf_write); else { pf_busy = 0; - pf_end_request(0); + pf_end_request(-EIO); goto repeat; } } @@ -805,7 +805,7 @@ static int pf_next_buf(void) return 1; if (!pf_count) { spin_lock_irqsave(&pf_spin_lock, saved_flags); - pf_end_request(1); + pf_end_request(0); pf_req = elv_next_request(pf_queue); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); if (!pf_req) @@ -816,12 +816,12 @@ static int pf_next_buf(void) return 0; } -static inline void next_request(int success) +static inline void next_request(int err) { unsigned long saved_flags; spin_lock_irqsave(&pf_spin_lock, saved_flags); - pf_end_request(success); + pf_end_request(err); pf_busy = 0; do_pf_request(pf_queue); spin_unlock_irqrestore(&pf_spin_lock, saved_flags); @@ -844,7 +844,7 @@ static void do_pf_read_start(void) pi_do_claimed(pf_current->pi, do_pf_read_start); return; } - next_request(0); + next_request(-EIO); return; } pf_mask = STAT_DRQ; @@ -863,7 +863,7 @@ static void do_pf_read_drq(void) pi_do_claimed(pf_current->pi, do_pf_read_start); return; } - next_request(0); + next_request(-EIO); return; } pi_read_block(pf_current->pi, pf_buf, 512); @@ -871,7 +871,7 @@ static void do_pf_read_drq(void) break; } pi_disconnect(pf_current->pi); - next_request(1); + next_request(0); } static void do_pf_write(void) @@ -890,7 +890,7 @@ static void do_pf_write_start(void) pi_do_claimed(pf_current->pi, do_pf_write_start); return; } - next_request(0); + next_request(-EIO); return; } @@ -903,7 +903,7 @@ static void do_pf_write_start(void) pi_do_claimed(pf_current->pi, do_pf_write_start); return; } - next_request(0); + next_request(-EIO); return; } pi_write_block(pf_current->pi, pf_buf, 512); @@ -923,11 +923,11 @@ static void do_pf_write_done(void) pi_do_claimed(pf_current->pi, do_pf_write_start); return; } - next_request(0); + next_request(-EIO); return; } pi_disconnect(pf_current->pi); - next_request(1); + next_request(0); } static int __init pf_init(void) diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index bccc42bb921..d23b54bc2f5 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, if (res) { dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, __LINE__, op, res); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); return 0; } @@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev, if (res) { dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", __func__, __LINE__, res); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); return 0; } @@ -205,7 +205,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, break; } else { blk_dump_rq_flags(req, DEVICE_NAME " bad request"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } } diff --git a/drivers/block/swim.c b/drivers/block/swim.c index d22cc385693..6544a7b06bf 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -532,39 +532,39 @@ static void redo_fd_request(struct request_queue *q) fs = req->rq_disk->private_data; if (req->sector < 0 || req->sector >= fs->total_secs) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (req->current_nr_sectors == 0) { - end_request(req, 1); + __blk_end_request_cur(req, 0); continue; } if (!fs->disk_in) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (rq_data_dir(req) == WRITE) { if (fs->write_protected) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } } switch (rq_data_dir(req)) { case WRITE: /* NOT IMPLEMENTED */ - end_request(req, 0); + __blk_end_request_cur(req, -EIO); break; case READ: if (floppy_read_sectors(fs, req->sector, req->current_nr_sectors, req->buffer)) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } req->nr_sectors -= req->current_nr_sectors; req->sector += req->current_nr_sectors; req->buffer += req->current_nr_sectors * 512; - end_request(req, 1); + __blk_end_request_cur(req, 0); break; } } diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 612965307ba..5904f7b73c6 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -320,15 +320,15 @@ static void start_request(struct floppy_state *fs) #endif if (req->sector < 0 || req->sector >= fs->total_secs) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (req->current_nr_sectors == 0) { - end_request(req, 1); + __blk_end_request_cur(req, 0); continue; } if (fs->ejected) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } @@ -336,7 +336,7 @@ static void start_request(struct floppy_state *fs) if (fs->write_prot < 0) fs->write_prot = swim3_readbit(fs, WRITE_PROT); if (fs->write_prot) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } } @@ -508,7 +508,7 @@ static void act(struct floppy_state *fs) case do_transfer: if (fs->cur_cyl != fs->req_cyl) { if (fs->retries > 5) { - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; return; } @@ -540,7 +540,7 @@ static void scan_timeout(unsigned long data) out_8(&sw->intr_enable, 0); fs->cur_cyl = -1; if (fs->retries > 5) { - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); } else { @@ -559,7 +559,7 @@ static void seek_timeout(unsigned long data) out_8(&sw->select, RELAX); out_8(&sw->intr_enable, 0); printk(KERN_ERR "swim3: seek timeout\n"); - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); } @@ -583,7 +583,7 @@ static void settle_timeout(unsigned long data) return; } printk(KERN_ERR "swim3: seek settle timeout\n"); - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); } @@ -615,7 +615,7 @@ static void xfer_timeout(unsigned long data) fd_req->current_nr_sectors -= s; printk(KERN_ERR "swim3: timeout %sing sector %ld\n", (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); } @@ -646,7 +646,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); fs->cur_cyl = -1; if (fs->retries > 5) { - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); } else { @@ -731,7 +731,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) printk("swim3: error %sing block %ld (err=%x)\n", rq_data_dir(fd_req) == WRITE? "writ": "read", (long)fd_req->sector, err); - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; } } else { @@ -740,7 +740,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", fs->state, rq_data_dir(fd_req), intr, err); - end_request(fd_req, 0); + __blk_end_request_cur(fd_req, -EIO); fs->state = idle; start_request(fs); break; @@ -749,7 +749,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) fd_req->current_nr_sectors -= fs->scount; fd_req->buffer += fs->scount * 512; if (fd_req->current_nr_sectors <= 0) { - end_request(fd_req, 1); + __blk_end_request_cur(fd_req, 0); fs->state = idle; } else { fs->req_sector += fs->scount; diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 64b496fce98..6f6ad82ec0c 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -314,21 +314,22 @@ static void do_xd_request (struct request_queue * q) int retry; if (!blk_fs_request(req)) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (block + count > get_capacity(req->rq_disk)) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (rw != READ && rw != WRITE) { printk("do_xd_request: unknown request\n"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } for (retry = 0; (retry < XD_RETRIES) && !res; retry++) res = xd_readwrite(rw, disk, req->buffer, block, count); - end_request(req, res); /* wrap up, 0 = fail, 1 = success */ + /* wrap up, 0 = success, -errno = fail */ + __blk_end_request_cur(req, res); } } @@ -418,7 +419,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); xd_recalibrate(drive); spin_lock_irq(&xd_lock); - return (0); + return -EIO; case 2: if (sense[0] & 0x30) { printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); @@ -439,7 +440,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ else printk(" - no valid disk address\n"); spin_lock_irq(&xd_lock); - return (0); + return -EIO; } if (xd_dma_buffer) for (i=0; i < (temp * 0x200); i++) @@ -448,7 +449,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_ count -= temp, buffer += temp * 0x200, block += temp; } spin_lock_irq(&xd_lock); - return (1); + return 0; } /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index cd6cfe3b51e..b4564479f64 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -302,7 +302,7 @@ static void do_blkif_request(struct request_queue *rq) while ((req = elv_next_request(rq)) != NULL) { info = req->rq_disk->private_data; if (!blk_fs_request(req)) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 4aecf5dc6a9..b1e1d7e5ab1 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -466,7 +466,7 @@ struct request *ace_get_next_request(struct request_queue * q) while ((req = elv_next_request(q)) != NULL) { if (blk_fs_request(req)) break; - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } return req; } @@ -494,7 +494,7 @@ static void ace_fsm_dostate(struct ace_device *ace) /* Drop all pending requests */ while ((req = elv_next_request(ace->queue)) != NULL) - end_request(req, 0); + __blk_end_request_cur(req, -EIO); /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 80754cdd311..b66ad58a3c3 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -77,7 +77,7 @@ static void do_z2_request(struct request_queue *q) if (start + len > z2ram_size) { printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", req->sector, req->current_nr_sectors); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } while (len) { @@ -93,7 +93,7 @@ static void do_z2_request(struct request_queue *q) start += size; len -= size; } - end_request(req, 1); + __blk_end_request_cur(req, 0); } } diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index fee9a9e83fc..cab2b1fb2fe 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -654,17 +654,17 @@ static void gdrom_request(struct request_queue *rq) while ((req = elv_next_request(rq)) != NULL) { if (!blk_fs_request(req)) { printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } if (rq_data_dir(req) != READ) { printk(KERN_NOTICE "GDROM: Read only device -"); printk(" write request ignored\n"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } if (req->nr_sectors) gdrom_request_handler_dma(req); else - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a443e136dc4..221317e6a00 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -923,7 +923,7 @@ static void i2o_block_request_fn(struct request_queue *q) break; } } else - end_request(req, 0); + __blk_end_request_cur(req, -EIO); } }; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index a49a9c8f2cb..76c4c8d1307 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -54,33 +54,33 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && req->cmd[0] == REQ_LB_OP_DISCARD) - return !tr->discard(dev, block, nsect); + return tr->discard(dev, block, nsect); if (!blk_fs_request(req)) - return 0; + return -EIO; if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) - return 0; + return -EIO; switch(rq_data_dir(req)) { case READ: for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->readsect(dev, block, buf)) - return 0; - return 1; + return -EIO; + return 0; case WRITE: if (!tr->writesect) - return 0; + return -EIO; for (; nsect > 0; nsect--, block++, buf += tr->blksize) if (tr->writesect(dev, block, buf)) - return 0; - return 1; + return -EIO; + return 0; default: printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); - return 0; + return -EIO; } } @@ -96,7 +96,7 @@ static int mtd_blktrans_thread(void *arg) while (!kthread_should_stop()) { struct request *req; struct mtd_blktrans_dev *dev; - int res = 0; + int res; req = elv_next_request(rq); @@ -119,7 +119,7 @@ static int mtd_blktrans_thread(void *arg) spin_lock_irq(rq->queue_lock); - end_request(req, res); + __blk_end_request_cur(req, res); } spin_unlock_irq(rq->queue_lock); diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index a85ad05e854..09617884a50 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -192,25 +192,25 @@ static void jsfd_do_request(struct request_queue *q) size_t len = req->current_nr_sectors << 9; if ((offset + len) > jdp->dsize) { - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if (rq_data_dir(req) != READ) { printk(KERN_ERR "jsfd: write\n"); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } if ((jdp->dbase & 0xff000000) != 0x20000000) { printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); - end_request(req, 0); + __blk_end_request_cur(req, -EIO); continue; } jsfd_read(req->buffer, jdp->dbase + offset, len); - end_request(req, 1); + __blk_end_request_cur(req, 0); } } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e33c8356b3d..cfeb3c2feb2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -845,9 +845,8 @@ extern unsigned int blk_rq_cur_bytes(struct request *rq); * blk_update_request() completes given number of bytes and updates * the request without completing it. * - * blk_end_request() and friends. __blk_end_request() and - * end_request() must be called with the request queue spinlock - * acquired. + * blk_end_request() and friends. __blk_end_request() must be called + * with the request queue spinlock acquired. * * Several drivers define their own end_request and call * blk_end_request() for parts of the original function. @@ -898,6 +897,19 @@ static inline void blk_end_request_all(struct request *rq, int error) BUG_ON(pending); } +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + */ +static inline void blk_end_request_cur(struct request *rq, int error) +{ + blk_end_request(rq, error, rq->hard_cur_sectors << 9); +} + /** * __blk_end_request - Helper function for drivers to complete the request. * @rq: the request being processed @@ -934,29 +946,17 @@ static inline void __blk_end_request_all(struct request *rq, int error) } /** - * end_request - end I/O on the current segment of the request - * @rq: the request being processed - * @uptodate: error value or %0/%1 uptodate flag + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error * * Description: - * Ends I/O on the current segment of a request. If that is the only - * remaining segment, the request is also completed and freed. - * - * This is a remnant of how older block drivers handled I/O completions. - * Modern drivers typically end I/O on the full request in one go, unless - * they have a residual value to account for. For that case this function - * isn't really useful, unless the residual just happens to be the - * full current segment. In other words, don't use this function in new - * code. Use blk_end_request() or __blk_end_request() to end a request. - **/ -static inline void end_request(struct request *rq, int uptodate) + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. + */ +static inline void __blk_end_request_cur(struct request *rq, int error) { - int error = 0; - - if (uptodate <= 0) - error = uptodate ? uptodate : -EIO; - - __blk_end_bidi_request(rq, error, rq->hard_cur_sectors << 9, 0); + __blk_end_request(rq, error, rq->hard_cur_sectors << 9); } extern void blk_complete_request(struct request *); -- cgit v1.2.3-70-g09d2 From 731ec497e5888c6792ad62613ae9be97eebcd7ca Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 23 Apr 2009 11:05:20 +0900 Subject: block: kill rq->data Now that all block request data transfer is done via bio, rq->data isn't used. Kill it. While at it, make the roles of rq->special and buffer clear. [ Impact: drop now unncessary field from struct request ] Signed-off-by: Tejun Heo Cc: Boaz Harrosh --- block/blk-core.c | 5 ++--- block/blk-map.c | 6 +++--- block/scsi_ioctl.c | 1 - drivers/scsi/scsi_lib.c | 1 - include/linux/blkdev.h | 5 ++--- 5 files changed, 7 insertions(+), 11 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 0520cc70458..6dd180cf15d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -189,10 +189,9 @@ void blk_dump_rq_flags(struct request *rq, char *msg) (unsigned long long)rq->sector, rq->nr_sectors, rq->current_nr_sectors); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", + printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", rq->bio, rq->biotail, - rq->buffer, rq->data, - rq->data_len); + rq->buffer, rq->data_len); if (blk_pc_request(rq)) { printk(KERN_INFO " cdb: "); diff --git a/block/blk-map.c b/block/blk-map.c index f103729b462..694fefad34e 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -156,7 +156,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -235,7 +235,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -313,7 +313,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, blk_rq_bio_prep(q, rq, bio); blk_queue_bounce(q, &rq->bio); - rq->buffer = rq->data = NULL; + rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 82a0ca2f672..bb9aa43551b 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -500,7 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, rq = blk_get_request(q, WRITE, __GFP_WAIT); rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->data = NULL; rq->data_len = 0; rq->extra_len = 0; rq->timeout = BLK_DEFAULT_SG_TIMEOUT; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 756ac7c93de..aa9fc572e45 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1088,7 +1088,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) return ret; } else { BUG_ON(req->data_len); - BUG_ON(req->data); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); req->buffer = NULL; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cfeb3c2feb2..12c545e2737 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -211,8 +211,8 @@ struct request { unsigned short ioprio; - void *special; - char *buffer; + void *special; /* opaque pointer available for LLD use */ + char *buffer; /* kaddr of the current segment if available */ int tag; int errors; @@ -229,7 +229,6 @@ struct request { unsigned int data_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; - void *data; void *sense; unsigned long deadline; -- cgit v1.2.3-70-g09d2 From 9fd8d0e1bcb848257968d9a7d73ca4d890ea8bd1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 28 Apr 2009 13:06:04 +0900 Subject: block: make blk_end_request_cur() return bool In the process of mindlessly copying [__]blk_end_request_all(), [__]blk_end_request_cur() ended up returning void even though they're partial completion functions. Fix it. [ Impact: fix braindead API ] Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 12c545e2737..3a5b1bd6582 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -903,10 +903,14 @@ static inline void blk_end_request_all(struct request *rq, int error) * * Description: * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request */ -static inline void blk_end_request_cur(struct request *rq, int error) +static inline bool blk_end_request_cur(struct request *rq, int error) { - blk_end_request(rq, error, rq->hard_cur_sectors << 9); + return blk_end_request(rq, error, rq->hard_cur_sectors << 9); } /** @@ -952,10 +956,14 @@ static inline void __blk_end_request_all(struct request *rq, int error) * Description: * Complete the current consecutively mapped chunk from @rq. Must * be called with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request */ -static inline void __blk_end_request_cur(struct request *rq, int error) +static inline bool __blk_end_request_cur(struct request *rq, int error) { - __blk_end_request(rq, error, rq->hard_cur_sectors << 9); + return __blk_end_request(rq, error, rq->hard_cur_sectors << 9); } extern void blk_complete_request(struct request *); -- cgit v1.2.3-70-g09d2 From c3a4d78c580de4edc9ef0f7c59812fb02ceb037f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 7 May 2009 22:24:37 +0900 Subject: block: add rq->resid_len rq->data_len served two purposes - the length of data buffer on issue and the residual count on completion. This duality creates some headaches. First of all, block layer and low level drivers can't really determine what rq->data_len contains while a request is executing. It could be the total request length or it coulde be anything else one of the lower layers is using to keep track of residual count. This complicates things because blk_rq_bytes() and thus [__]blk_end_request_all() relies on rq->data_len for PC commands. Drivers which want to report residual count should first cache the total request length, update rq->data_len and then complete the request with the cached data length. Secondly, it makes requests default to reporting full residual count, ie. reporting that no data transfer occurred. The residual count is an exception not the norm; however, the driver should clear rq->data_len to zero to signify the normal cases while leaving it alone means no data transfer occurred at all. This reverse default behavior complicates code unnecessarily and renders block PC on some drivers (ide-tape/floppy) unuseable. This patch adds rq->resid_len which is used only for residual count. While at it, remove now unnecessasry blk_rq_bytes() caching in ide_pc_intr() as rq->data_len is not changed anymore. Boaz : spotted missing conversion in osd Sergei : spotted too early conversion to blk_rq_bytes() in ide-tape [ Impact: cleanup residual count handling, report 0 resid by default ] Signed-off-by: Tejun Heo Cc: James Bottomley Cc: Bartlomiej Zolnierkiewicz Cc: Borislav Petkov Cc: Sergei Shtylyov Cc: Mike Miller Cc: Eric Moore Cc: Alan Stern Cc: FUJITA Tomonori Cc: Doug Gilbert Cc: Mike Miller Cc: Eric Moore Cc: Darrick J. Wong Cc: Pete Zaitcev Cc: Boaz Harrosh Signed-off-by: Jens Axboe --- block/bsg.c | 8 +++---- block/scsi_ioctl.c | 2 +- drivers/block/cciss.c | 13 ++++------- drivers/block/ub.c | 6 ++--- drivers/ide/ide-atapi.c | 9 +------- drivers/ide/ide-cd.c | 13 +++++------ drivers/ide/ide-tape.c | 4 ++-- drivers/message/fusion/mptsas.c | 3 +-- drivers/scsi/libsas/sas_expander.c | 6 +---- drivers/scsi/libsas/sas_host_smp.c | 38 +++++++++++++++++--------------- drivers/scsi/mpt2sas/mpt2sas_transport.c | 4 +--- drivers/scsi/scsi_lib.c | 24 ++++++++++---------- drivers/scsi/sg.c | 2 +- drivers/scsi/st.c | 2 +- fs/exofs/osd.c | 4 ++-- include/linux/blkdev.h | 1 + 16 files changed, 59 insertions(+), 80 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/bsg.c b/block/bsg.c index 206060e795d..2d746e34f4c 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -445,14 +445,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, } if (rq->next_rq) { - hdr->dout_resid = rq->data_len; - hdr->din_resid = rq->next_rq->data_len; + hdr->dout_resid = rq->resid_len; + hdr->din_resid = rq->next_rq->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) - hdr->din_resid = rq->data_len; + hdr->din_resid = rq->resid_len; else - hdr->dout_resid = rq->data_len; + hdr->dout_resid = rq->resid_len; /* * If the request generated a negative error number, return it diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 58cf4560f74..a9670dd4b5d 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, hdr->info = 0; if (hdr->masked_status || hdr->host_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; - hdr->resid = rq->data_len; + hdr->resid = rq->resid_len; hdr->sb_len_wr = 0; if (rq->sense_len && hdr->sbp) { diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 4d4d5e0d3fa..f22d4932433 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1299,7 +1299,6 @@ static void cciss_softirq_done(struct request *rq) { CommandList_struct *cmd = rq->completion_data; ctlr_info_t *h = hba[cmd->ctlr]; - unsigned int nr_bytes; unsigned long flags; u64bit temp64; int i, ddir; @@ -1321,15 +1320,11 @@ static void cciss_softirq_done(struct request *rq) printk("Done with %p\n", rq); #endif /* CCISS_DEBUG */ - /* - * Store the full size and set the residual count for pc requests - */ - nr_bytes = blk_rq_bytes(rq); + /* set the residual count for pc requests */ if (blk_pc_request(rq)) - rq->data_len = cmd->err_info->ResidualCnt; + rq->resid_len = cmd->err_info->ResidualCnt; - if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) - BUG(); + blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); spin_lock_irqsave(&h->lock, flags); cmd_free(h, cmd, 1); @@ -2691,7 +2686,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, printk(KERN_WARNING "cciss: cmd %p has" " completed with data underrun " "reported\n", cmd); - cmd->rq->data_len = cmd->err_info->ResidualCnt; + cmd->rq->resid_len = cmd->err_info->ResidualCnt; } break; case CMD_DATA_OVERRUN: diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 689cd27ac89..8c2cc71327e 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -783,10 +783,8 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if (cmd->error == 0) { if (blk_pc_request(rq)) { - if (cmd->act_len >= rq->data_len) - rq->data_len = 0; - else - rq->data_len -= cmd->act_len; + if (cmd->act_len < rq->data_len) + rq->resid_len = rq->data_len - cmd->act_len; scsi_status = 0; } else { if (cmd->act_len != cmd->len) { diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index afe5a432387..e4a02a05fc8 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -367,7 +367,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) /* No more interrupts */ if ((stat & ATA_DRQ) == 0) { int uptodate, error; - unsigned int done; debug_log("Packet command completed, %d bytes transferred\n", pc->xferred); @@ -406,12 +405,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0) dsc = 1; - /* - * ->pc_callback() might change rq->data_len for - * residual count, cache total length. - */ - done = blk_rq_bytes(rq); - /* Command finished - Call the callback function */ uptodate = drive->pc_callback(drive, dsc); @@ -431,7 +424,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) error = uptodate ? 0 : -EIO; } - ide_complete_rq(drive, error, done); + ide_complete_rq(drive, error, blk_rq_bytes(rq)); return ide_stopped; } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 673628790f1..8bbe222c5e4 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -519,7 +519,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, error = blk_execute_rq(drive->queue, info->disk, rq, 0); if (buffer) - *bufflen = rq->data_len; + *bufflen = rq->resid_len; flags = rq->cmd_flags; blk_put_request(rq); @@ -707,11 +707,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) out_end: if (blk_pc_request(rq) && rc == 0) { - unsigned int dlen = rq->data_len; - - rq->data_len = 0; - - if (blk_end_request(rq, 0, dlen)) + if (blk_end_request(rq, 0, rq->data_len)) BUG(); hwif->rq = NULL; @@ -740,9 +736,10 @@ out_end: nsectors = 1; if (blk_fs_request(rq) == 0) { - rq->data_len -= (cmd->nbytes - cmd->nleft); + rq->resid_len = rq->data_len - + (cmd->nbytes - cmd->nleft); if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) - rq->data_len += cmd->last_xfer_len; + rq->resid_len += cmd->last_xfer_len; } ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 7149224d1fe..65c5b705883 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -380,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) } tape->first_frame += blocks; - rq->data_len -= blocks * tape->blk_size; + rq->resid_len = rq->data_len - blocks * tape->blk_size; if (pc->error) { uptodate = 0; @@ -903,7 +903,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) blk_execute_rq(drive->queue, tape->disk, rq, 0); /* calculate the number of transferred bytes and update buffer state */ - size -= rq->data_len; + size -= rq->resid_len; tape->cur = tape->buf; if (cmd == REQ_IDETAPE_READ) tape->valid = size; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index a9019f081b9..5d5f34715de 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1357,8 +1357,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; memcpy(req->sense, smprep, sizeof(*smprep)); req->sense_len = sizeof(*smprep); - req->data_len = 0; - rsp->data_len -= smprep->ResponseDataLength; + rsp->resid_len = rsp->data_len - smprep->ResponseDataLength; } else { printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", ioc->name, __func__); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 3da02e43678..6605ec905cc 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1936,12 +1936,8 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, bio_data(rsp->bio), rsp->data_len); if (ret > 0) { /* positive number is the untransferred residual */ - rsp->data_len = ret; - req->data_len = 0; + rsp->resid_len = ret; ret = 0; - } else if (ret == 0) { - rsp->data_len = 0; - req->data_len = 0; } return ret; diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c index d110a366c48..89952edd0be 100644 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -134,7 +134,7 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, { u8 *req_data = NULL, *resp_data = NULL, *buf; struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); - int error = -EINVAL, resp_data_len = rsp->data_len; + int error = -EINVAL; /* eight is the minimum size for request and response frames */ if (req->data_len < 8 || rsp->data_len < 8) @@ -176,17 +176,20 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, resp_data[1] = req_data[1]; resp_data[2] = SMP_RESP_FUNC_UNK; + req->resid_len = req->data_len; + rsp->resid_len = rsp->data_len; + switch (req_data[1]) { case SMP_REPORT_GENERAL: - req->data_len -= 8; - resp_data_len -= 32; + req->resid_len -= 8; + rsp->resid_len -= 32; resp_data[2] = SMP_RESP_FUNC_ACC; resp_data[9] = sas_ha->num_phys; break; case SMP_REPORT_MANUF_INFO: - req->data_len -= 8; - resp_data_len -= 64; + req->resid_len -= 8; + rsp->resid_len -= 64; resp_data[2] = SMP_RESP_FUNC_ACC; memcpy(resp_data + 12, shost->hostt->name, SAS_EXPANDER_VENDOR_ID_LEN); @@ -199,13 +202,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_DISCOVER: - req->data_len -= 16; - if ((int)req->data_len < 0) { - req->data_len = 0; + req->resid_len -= 16; + if ((int)req->resid_len < 0) { + req->resid_len = 0; error = -EINVAL; goto out; } - resp_data_len -= 56; + rsp->resid_len -= 56; sas_host_smp_discover(sas_ha, resp_data, req_data[9]); break; @@ -215,13 +218,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_REPORT_PHY_SATA: - req->data_len -= 16; - if ((int)req->data_len < 0) { - req->data_len = 0; + req->resid_len -= 16; + if ((int)req->resid_len < 0) { + req->resid_len = 0; error = -EINVAL; goto out; } - resp_data_len -= 60; + rsp->resid_len -= 60; sas_report_phy_sata(sas_ha, resp_data, req_data[9]); break; @@ -238,13 +241,13 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, break; case SMP_PHY_CONTROL: - req->data_len -= 44; - if ((int)req->data_len < 0) { - req->data_len = 0; + req->resid_len -= 44; + if ((int)req->resid_len < 0) { + req->resid_len = 0; error = -EINVAL; goto out; } - resp_data_len -= 8; + rsp->resid_len -= 8; sas_phy_control(sas_ha, req_data[9], req_data[10], req_data[32] >> 4, req_data[33] >> 4, resp_data); @@ -265,7 +268,6 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, flush_kernel_dcache_page(bio_page(rsp->bio)); kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); local_irq_enable(); - rsp->data_len = resp_data_len; out: kfree(req_data); diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index e03dc0b1e1a..53759c566bf 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -1170,9 +1170,7 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); req->sense_len = sizeof(*mpi_reply); - req->data_len = 0; - rsp->data_len -= mpi_reply->ResponseDataLength; - + rsp->resid_len = rsp->data_len - mpi_reply->ResponseDataLength; } else { dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - no reply\n", ioc->name, __func__)); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index aa9fc572e45..7d49ef589f3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -240,11 +240,11 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, * is invalid. Prevent the garbage from being misinterpreted * and prevent security leaks by zeroing out the excess data. */ - if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) - memset(buffer + (bufflen - req->data_len), 0, req->data_len); + if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) + memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); if (resid) - *resid = req->data_len; + *resid = req->resid_len; ret = req->errors; out: blk_put_request(req); @@ -549,7 +549,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, int leftover = (req->hard_nr_sectors << 9); if (blk_pc_request(req)) - leftover = req->data_len; + leftover = req->resid_len; /* kill remainder if no retrys */ if (error && scsi_noretry_cmd(cmd)) @@ -673,11 +673,11 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) EXPORT_SYMBOL(scsi_release_buffers); /* - * Bidi commands Must be complete as a whole, both sides at once. - * If part of the bytes were written and lld returned - * scsi_in()->resid and/or scsi_out()->resid this information will be left - * in req->data_len and req->next_rq->data_len. The upper-layer driver can - * decide what to do with this information. + * Bidi commands Must be complete as a whole, both sides at once. If + * part of the bytes were written and lld returned scsi_in()->resid + * and/or scsi_out()->resid this information will be left in + * req->resid_len and req->next_rq->resid_len. The upper-layer driver + * can decide what to do with this information. */ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) { @@ -685,8 +685,8 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) unsigned int dlen = req->data_len; unsigned int next_dlen = req->next_rq->data_len; - req->data_len = scsi_out(cmd)->resid; - req->next_rq->data_len = scsi_in(cmd)->resid; + req->resid_len = scsi_out(cmd)->resid; + req->next_rq->resid_len = scsi_in(cmd)->resid; /* The req and req->next_rq have not been completed */ BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); @@ -778,7 +778,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_end_bidi_request(cmd); return; } - req->data_len = scsi_get_resid(cmd); + req->resid_len = scsi_get_resid(cmd); } BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 82312df9b0b..dec4c70677d 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1260,7 +1260,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate) sense = rq->sense; result = rq->errors; - resid = rq->data_len; + resid = rq->resid_len; SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", sdp->disk->disk_name, srp->header.pack_id, result)); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index eb24efea8f1..8681b708344 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -463,7 +463,7 @@ static void st_scsi_execute_end(struct request *req, int uptodate) struct scsi_tape *STp = SRpnt->stp; STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; - STp->buffer->cmdstat.residual = req->data_len; + STp->buffer->cmdstat.residual = req->resid_len; if (SRpnt->waiting) complete(SRpnt->waiting); diff --git a/fs/exofs/osd.c b/fs/exofs/osd.c index b249ae97fb1..06ca92672eb 100644 --- a/fs/exofs/osd.c +++ b/fs/exofs/osd.c @@ -50,10 +50,10 @@ int exofs_check_ok_resid(struct osd_request *or, u64 *in_resid, u64 *out_resid) /* FIXME: should be include in osd_sense_info */ if (in_resid) - *in_resid = or->in.req ? or->in.req->data_len : 0; + *in_resid = or->in.req ? or->in.req->resid_len : 0; if (out_resid) - *out_resid = or->out.req ? or->out.req->data_len : 0; + *out_resid = or->out.req ? or->out.req->resid_len : 0; return ret; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3a5b1bd6582..6a967cad89f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -229,6 +229,7 @@ struct request { unsigned int data_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; + unsigned int resid_len; /* residual count */ void *sense; unsigned long deadline; -- cgit v1.2.3-70-g09d2 From 5b93629b4509c03ffa87a9316412fedf6f58cb37 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 7 May 2009 22:24:38 +0900 Subject: block: implement blk_rq_pos/[cur_]sectors() and convert obvious ones Implement accessors - blk_rq_pos(), blk_rq_sectors() and blk_rq_cur_sectors() which return rq->hard_sector, rq->hard_nr_sectors and rq->hard_cur_sectors respectively and convert direct references of the said fields to the accessors. This is in preparation of request data length handling cleanup. Geert : suggested adding const to struct request * parameter to accessors Sergei : spotted error in patch description [ Impact: cleanup ] Signed-off-by: Tejun Heo Acked-by: Geert Uytterhoeven Acked-by: Stephen Rothwell Tested-by: Grant Likely Acked-by: Grant Likely Ackec-by: Sergei Shtylyov Cc: Bartlomiej Zolnierkiewicz Cc: Borislav Petkov Cc: James Bottomley Signed-off-by: Jens Axboe --- block/blk-barrier.c | 2 +- block/blk-core.c | 2 +- block/cfq-iosched.c | 2 +- drivers/block/ps3disk.c | 2 +- drivers/block/viodasd.c | 6 +++--- drivers/block/xsysace.c | 10 +++++----- drivers/ide/ide-cd.c | 8 ++++---- drivers/ide/ide-io.c | 4 ++-- drivers/message/i2o/i2o_block.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 23 ++++++++++++++++++++--- kernel/trace/blktrace.c | 4 ++-- 12 files changed, 42 insertions(+), 25 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index c8d087655ef..c167de5b9ef 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -163,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) * For an empty barrier, there's no actual BAR request, which * in turn makes POSTFLUSH unnecessary. Mask them off. */ - if (!rq->hard_nr_sectors) { + if (!blk_rq_sectors(rq)) { q->ordered &= ~(QUEUE_ORDERED_DO_BAR | QUEUE_ORDERED_DO_POSTFLUSH); /* diff --git a/block/blk-core.c b/block/blk-core.c index 394c5bd8127..895e55b74a4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1683,7 +1683,7 @@ static void blk_account_io_done(struct request *req) unsigned int blk_rq_bytes(struct request *rq) { if (blk_fs_request(rq)) - return rq->hard_nr_sectors << 9; + return blk_rq_sectors(rq) << 9; return rq->data_len; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index def0c698a4b..575083a9ffe 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", cfqd->rq_in_driver); - cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); } static void cfq_deactivate_request(struct request_queue *q, struct request *rq) diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index f6586e4d351..c2388673684 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -136,7 +136,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", __func__, __LINE__, op, n, req->nr_sectors, - req->hard_nr_sectors); + blk_rq_sectors(req)); #endif start_sector = req->sector * priv->blocking_factor; diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index ecccf65dce2..e821eed7132 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -368,12 +368,12 @@ static void do_viodasd_request(struct request_queue *q) blkdev_dequeue_request(req); /* check that request contains a valid command */ if (!blk_fs_request(req)) { - viodasd_end_request(req, -EIO, req->hard_nr_sectors); + viodasd_end_request(req, -EIO, blk_rq_sectors(req)); continue; } /* Try sending the request */ if (send_request(req) != 0) - viodasd_end_request(req, -EIO, req->hard_nr_sectors); + viodasd_end_request(req, -EIO, blk_rq_sectors(req)); } } @@ -590,7 +590,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent) err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", event->xRc, bevent->sub_result, err->msg); - num_sect = req->hard_nr_sectors; + num_sect = blk_rq_sectors(req); } qlock = req->q->queue_lock; spin_lock_irqsave(qlock, irq_flags); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index b1e1d7e5ab1..5722931d14c 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -645,8 +645,8 @@ static void ace_fsm_dostate(struct ace_device *ace) /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, - "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", - (unsigned long long) req->sector, req->hard_nr_sectors, + "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", + (unsigned long long) req->sector, blk_rq_sectors(req), req->current_nr_sectors, rq_data_dir(req)); ace->req = req; @@ -654,7 +654,7 @@ static void ace_fsm_dostate(struct ace_device *ace) ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); - count = req->hard_nr_sectors; + count = blk_rq_sectors(req); if (rq_data_dir(req)) { /* Kick off write request */ dev_dbg(ace->dev, "write data\n"); @@ -719,8 +719,8 @@ static void ace_fsm_dostate(struct ace_device *ace) /* bio finished; is there another one? */ if (__blk_end_request(ace->req, 0, blk_rq_cur_bytes(ace->req))) { - /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", - * ace->req->hard_nr_sectors, + /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", + * blk_rq_sectors(ace->req), * ace->req->current_nr_sectors); */ ace->data_ptr = ace->req->buffer; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 8bbe222c5e4..182320dd6ea 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -730,7 +730,7 @@ out_end: if (blk_pc_request(rq)) nsectors = (rq->data_len + 511) >> 9; else - nsectors = rq->hard_nr_sectors; + nsectors = blk_rq_sectors(rq); if (nsectors == 0) nsectors = 1; @@ -875,7 +875,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, return ide_issue_pc(drive, &cmd); out_end: - nsectors = rq->hard_nr_sectors; + nsectors = blk_rq_sectors(rq); if (nsectors == 0) nsectors = 1; @@ -1359,8 +1359,8 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) { int hard_sect = queue_hardsect_size(q); - long block = (long)rq->hard_sector / (hard_sect >> 9); - unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); + long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); + unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); memset(rq->cmd, 0, BLK_MAX_CDB); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index a0309ea661a..df23bcbd94b 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -118,7 +118,7 @@ unsigned int ide_rq_bytes(struct request *rq) if (blk_pc_request(rq)) return rq->data_len; else - return rq->hard_cur_sectors << 9; + return blk_rq_cur_sectors(rq) << 9; } EXPORT_SYMBOL_GPL(ide_rq_bytes); @@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) * and complete the whole request right now */ if (blk_noretry_request(rq) && error <= 0) - nr_bytes = rq->hard_nr_sectors << 9; + nr_bytes = blk_rq_sectors(rq) << 9; rc = ide_end_rq(drive, rq, error, nr_bytes); if (rc == 0) diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 221317e6a00..56e60f0d531 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -427,7 +427,7 @@ static void i2o_block_end_request(struct request *req, int error, unsigned long flags; if (blk_end_request(req, error, nr_bytes)) { - int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); + int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); if (blk_pc_request(req)) leftover = req->data_len; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7d49ef589f3..9ff0ca9988a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -546,7 +546,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, * to queue the remainder of them. */ if (blk_end_request(req, error, bytes)) { - int leftover = (req->hard_nr_sectors << 9); + int leftover = blk_rq_sectors(req) << 9; if (blk_pc_request(req)) leftover = req->resid_len; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6a967cad89f..4e5f8559872 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -832,13 +832,30 @@ static inline void blk_run_address_space(struct address_space *mapping) extern void blkdev_dequeue_request(struct request *req); /* - * blk_end_request() takes bytes instead of sectors as a complete size. - * blk_rq_bytes() returns bytes left to complete in the entire request. - * blk_rq_cur_bytes() returns bytes left to complete in the current segment. + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->hard_sector; +} + extern unsigned int blk_rq_bytes(struct request *rq); extern unsigned int blk_rq_cur_bytes(struct request *rq); +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return rq->hard_nr_sectors; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return rq->hard_cur_sectors; +} + /* * Request completion related functions. * diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 921ef5d1f0b..42f1c11e754 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -646,7 +646,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, rw, what, rq->errors, 0, NULL); } } @@ -857,7 +857,7 @@ void blk_add_driver_data(struct request_queue *q, __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, rq->errors, len, data); else - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, 0, BLK_TA_DRV_DATA, rq->errors, len, data); } EXPORT_SYMBOL_GPL(blk_add_driver_data); -- cgit v1.2.3-70-g09d2 From 2e46e8b27aa57c6bd34b3102b40ee4d0144b4fab Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 7 May 2009 22:24:41 +0900 Subject: block: drop request->hard_* and *nr_sectors struct request has had a few different ways to represent some properties of a request. ->hard_* represent block layer's view of the request progress (completion cursor) and the ones without the prefix are supposed to represent the issue cursor and allowed to be updated as necessary by the low level drivers. The thing is that as block layer supports partial completion, the two cursors really aren't necessary and only cause confusion. In addition, manual management of request detail from low level drivers is cumbersome and error-prone at the very least. Another interesting duplicate fields are rq->[hard_]nr_sectors and rq->{hard_cur|current}_nr_sectors against rq->data_len and rq->bio->bi_size. This is more convoluted than the hard_ case. rq->[hard_]nr_sectors are initialized for requests with bio but blk_rq_bytes() uses it only for !pc requests. rq->data_len is initialized for all request but blk_rq_bytes() uses it only for pc requests. This causes good amount of confusion throughout block layer and its drivers and determining the request length has been a bit of black magic which may or may not work depending on circumstances and what the specific LLD is actually doing. rq->{hard_cur|current}_nr_sectors represent the number of sectors in the contiguous data area at the front. This is mainly used by drivers which transfers data by walking request segment-by-segment. This value always equals rq->bio->bi_size >> 9. However, data length for pc requests may not be multiple of 512 bytes and using this field becomes a bit confusing. In general, having multiple fields to represent the same property leads only to confusion and subtle bugs. With recent block low level driver cleanups, no driver is accessing or manipulating these duplicate fields directly. Drop all the duplicates. Now rq->sector means the current sector, rq->data_len the current total length and rq->bio->bi_size the current segment length. Everything else is defined in terms of these three and available only through accessors. * blk_recalc_rq_sectors() is collapsed into blk_update_request() and now handles pc and fs requests equally other than rq->sector update. This means that now pc requests can use partial completion too (no in-kernel user yet tho). * bio_cur_sectors() is replaced with bio_cur_bytes() as block layer now uses byte count as the primary data length. * blk_rq_pos() is now guranteed to be always correct. In-block users converted. * blk_rq_bytes() is now guaranteed to be always valid as is blk_rq_sectors(). In-block users converted. * blk_rq_sectors() is now guaranteed to equal blk_rq_bytes() >> 9. More convenient one is used. * blk_rq_bytes() and blk_rq_cur_bytes() are now inlined and take const pointer to request. [ Impact: API cleanup, single way to represent one property of a request ] Signed-off-by: Tejun Heo Cc: Boaz Harrosh Signed-off-by: Jens Axboe --- block/blk-core.c | 81 ++++++++++++++++++------------------------------ block/blk-merge.c | 36 +++------------------ block/blk.h | 1 - block/cfq-iosched.c | 10 +++--- include/linux/bio.h | 6 ++-- include/linux/blkdev.h | 37 ++++++++++------------ include/linux/elevator.h | 2 +- kernel/trace/blktrace.c | 16 +++++----- 8 files changed, 67 insertions(+), 122 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 82dc20621c0..3596ca71909 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; - rq->sector = rq->hard_sector = (sector_t) -1; + rq->sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; @@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", - rq->bio, rq->biotail, - rq->buffer, rq->data_len); + rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); if (blk_pc_request(rq)) { printk(KERN_INFO " cdb: "); @@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_NOIDLE; req->errors = 0; - req->hard_sector = req->sector = bio->bi_sector; + req->sector = bio->bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q) static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; - int el_ret, nr_sectors; + int el_ret; + unsigned int bytes = bio->bi_size; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); const int unplug = bio_unplug(bio); int rw_flags; - nr_sectors = bio_sectors(bio); - /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even @@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->biotail->bi_next = bio; req->biotail = bio; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->current_nr_sectors = bio_cur_sectors(bio); - req->hard_cur_sectors = req->current_nr_sectors; - req->sector = req->hard_sector = bio->bi_sector; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->sector = bio->bi_sector; + req->data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio); int blk_rq_check_limits(struct request_queue *q, struct request *rq) { if (blk_rq_sectors(rq) > q->max_sectors || - rq->data_len > q->max_hw_sectors << 9) { + blk_rq_bytes(rq) > q->max_hw_sectors << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } @@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req) } } -/** - * blk_rq_bytes - Returns bytes left to complete in the entire request - * @rq: the request being processed - **/ -unsigned int blk_rq_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return blk_rq_sectors(rq) << 9; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_bytes); - -/** - * blk_rq_cur_bytes - Returns bytes left to complete in the current segment - * @rq: the request being processed - **/ -unsigned int blk_rq_cur_bytes(struct request *rq) -{ - if (blk_fs_request(rq)) - return rq->current_nr_sectors << 9; - - if (rq->bio) - return rq->bio->bi_size; - - return rq->data_len; -} -EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); - struct request *elv_next_request(struct request_queue *q) { struct request *rq; @@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q) if (rq->cmd_flags & REQ_DONTPREP) break; - if (q->dma_drain_size && rq->data_len) { + if (q->dma_drain_size && blk_rq_bytes(rq)) { /* * make sure space for the drain appears we * know we can do this because max_hw_segments @@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q) * avoid resource deadlock. REQ_STARTED will * prevent other fs requests from passing this one. */ - if (q->dma_drain_size && rq->data_len && + if (q->dma_drain_size && blk_rq_bytes(rq) && !(rq->cmd_flags & REQ_DONTPREP)) { /* * remove the space for the drain we added @@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) * can find how many bytes remain in the request * later. */ - req->nr_sectors = req->hard_nr_sectors = 0; - req->current_nr_sectors = req->hard_cur_sectors = 0; + req->data_len = 0; return false; } @@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) bio_iovec(bio)->bv_len -= nr_bytes; } - blk_recalc_rq_sectors(req, total_bytes >> 9); + req->data_len -= total_bytes; + req->buffer = bio_data(req->bio); + + /* update sector only for requests with clear definition of sector */ + if (blk_fs_request(req) || blk_discard_rq(req)) + req->sector += total_bytes >> 9; + + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + printk(KERN_ERR "blk: request botched\n"); + req->data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ blk_recalc_rq_segments(req); + return true; } EXPORT_SYMBOL_GPL(blk_update_request); @@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->current_nr_sectors = bio_cur_sectors(bio); - rq->hard_cur_sectors = rq->current_nr_sectors; - rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); rq->data_len = bio->bi_size; - rq->bio = rq->biotail = bio; if (bio->bi_bdev) diff --git a/block/blk-merge.c b/block/blk-merge.c index bf62a87a9da..b8df66aef0f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -9,35 +9,6 @@ #include "blk.h" -void blk_recalc_rq_sectors(struct request *rq, int nsect) -{ - if (blk_fs_request(rq) || blk_discard_rq(rq)) { - rq->hard_sector += nsect; - rq->hard_nr_sectors -= nsect; - - /* - * Move the I/O submission pointers ahead if required. - */ - if ((rq->nr_sectors >= rq->hard_nr_sectors) && - (rq->sector <= rq->hard_sector)) { - rq->sector = rq->hard_sector; - rq->nr_sectors = rq->hard_nr_sectors; - rq->hard_cur_sectors = bio_cur_sectors(rq->bio); - rq->current_nr_sectors = rq->hard_cur_sectors; - rq->buffer = bio_data(rq->bio); - } - - /* - * if total number of sectors is less than the first segment - * size, something has gone terribly wrong - */ - if (rq->nr_sectors < rq->current_nr_sectors) { - printk(KERN_ERR "blk: request botched\n"); - rq->nr_sectors = rq->current_nr_sectors; - } - } -} - static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { @@ -199,8 +170,9 @@ new_segment: if (unlikely(rq->cmd_flags & REQ_COPY_USER) && - (rq->data_len & q->dma_pad_mask)) { - unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; + (blk_rq_bytes(rq) & q->dma_pad_mask)) { + unsigned int pad_len = + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; sg->length += pad_len; rq->extra_len += pad_len; @@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = next->bio; req->biotail = next->biotail; - req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; + req->data_len += blk_rq_bytes(next); elv_merge_requests(q, req, next); diff --git a/block/blk.h b/block/blk.h index 51115599df9..ab54529103c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); void blk_recalc_rq_segments(struct request *rq); -void blk_recalc_rq_sectors(struct request *rq, int nsect); void blk_queue_congestion_threshold(struct request_queue *q); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index db4d990a66b..99ac4304d71 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, * Sort strictly based on sector. Smallest to the left, * largest to the right. */ - if (sector > cfqq->next_rq->sector) + if (sector > blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_right; - else if (sector < cfqq->next_rq->sector) + else if (sector < blk_rq_pos(cfqq->next_rq)) n = &(*p)->rb_left; else break; @@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) return; cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; - __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, - &parent, &p); + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, + blk_rq_pos(cfqq->next_rq), &parent, &p); if (!__cfqq) { rb_link_node(&cfqq->p_node, parent, p); rb_insert_color(&cfqq->p_node, cfqq->p_root); @@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, if (cfq_rq_close(cfqd, __cfqq->next_rq)) return __cfqq; - if (__cfqq->next_rq->sector < sector) + if (blk_rq_pos(__cfqq->next_rq) < sector) node = rb_next(&__cfqq->p_node); else node = rb_prev(&__cfqq->p_node); diff --git a/include/linux/bio.h b/include/linux/bio.h index f37ca8c726b..d30ec6f30dd 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -218,12 +218,12 @@ struct bio { #define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) -static inline unsigned int bio_cur_sectors(struct bio *bio) +static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio->bi_vcnt) - return bio_iovec(bio)->bv_len >> 9; + return bio_iovec(bio)->bv_len; else /* dataless requests such as discard */ - return bio->bi_size >> 9; + return bio->bi_size; } static inline void *bio_data(struct bio *bio) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4e5f8559872..ce2bf5efa9b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -166,19 +166,8 @@ struct request { enum rq_cmd_type_bits cmd_type; unsigned long atomic_flags; - /* Maintain bio traversal state for part by part I/O submission. - * hard_* are block layer internals, no driver should touch them! - */ - - sector_t sector; /* next sector to submit */ - sector_t hard_sector; /* next sector to complete */ - unsigned long nr_sectors; /* no. of sectors left to submit */ - unsigned long hard_nr_sectors; /* no. of sectors left to complete */ - /* no. of sectors left to submit in the current segment */ - unsigned int current_nr_sectors; - - /* no. of sectors left to complete in the current segment */ - unsigned int hard_cur_sectors; + sector_t sector; /* sector cursor */ + unsigned int data_len; /* total data len, don't access directly */ struct bio *bio; struct bio *biotail; @@ -226,7 +215,6 @@ struct request { unsigned char __cmd[BLK_MAX_CDB]; unsigned char *cmd; - unsigned int data_len; unsigned int extra_len; /* length of alignment and padding */ unsigned int sense_len; unsigned int resid_len; /* residual count */ @@ -840,20 +828,27 @@ extern void blkdev_dequeue_request(struct request *req); */ static inline sector_t blk_rq_pos(const struct request *rq) { - return rq->hard_sector; + return rq->sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->data_len; } -extern unsigned int blk_rq_bytes(struct request *rq); -extern unsigned int blk_rq_cur_bytes(struct request *rq); +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} static inline unsigned int blk_rq_sectors(const struct request *rq) { - return rq->hard_nr_sectors; + return blk_rq_bytes(rq) >> 9; } static inline unsigned int blk_rq_cur_sectors(const struct request *rq) { - return rq->hard_cur_sectors; + return blk_rq_cur_bytes(rq) >> 9; } /* @@ -928,7 +923,7 @@ static inline void blk_end_request_all(struct request *rq, int error) */ static inline bool blk_end_request_cur(struct request *rq, int error) { - return blk_end_request(rq, error, rq->hard_cur_sectors << 9); + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } /** @@ -981,7 +976,7 @@ static inline void __blk_end_request_all(struct request *rq, int error) */ static inline bool __blk_end_request_cur(struct request *rq, int error) { - return __blk_end_request(rq, error, rq->hard_cur_sectors << 9); + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); } extern void blk_complete_request(struct request *); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c59b769f62b..4e462878c9c 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -171,7 +171,7 @@ enum { ELV_MQUEUE_MUST, }; -#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) +#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) /* diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 42f1c11e754..5708a14bee5 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, if (blk_pc_request(rq)) { what |= BLK_TC_ACT(BLK_TC_PC); - __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, - rq->cmd_len, rq->cmd); + __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, + what, rq->errors, rq->cmd_len, rq->cmd); } else { what |= BLK_TC_ACT(BLK_TC_FS); - __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, - rw, what, rq->errors, 0, NULL); + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw, + what, rq->errors, 0, NULL); } } @@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q, return; if (blk_pc_request(rq)) - __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, - rq->errors, len, data); + __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, + BLK_TA_DRV_DATA, rq->errors, len, data); else - __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, - 0, BLK_TA_DRV_DATA, rq->errors, len, data); + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, + BLK_TA_DRV_DATA, rq->errors, len, data); } EXPORT_SYMBOL_GPL(blk_add_driver_data); -- cgit v1.2.3-70-g09d2 From a2dec7b36364a5cc564c4d76cf16d2e7d33f5c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 7 May 2009 22:24:44 +0900 Subject: block: hide request sector and data_len Block low level drivers for some reason have been pretty good at abusing block layer API. Especially struct request's fields tend to get violated in all possible ways. Make it clear that low level drivers MUST NOT access or manipulate rq->sector and rq->data_len directly by prefixing them with double underscores. This change is also necessary to break build of out-of-tree codes which assume the previous block API where internal fields can be manipulated and rq->data_len carries residual count on completion. [ Impact: hide internal fields, block API change ] Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-core.c | 20 ++++++++++---------- block/blk-map.c | 2 +- block/blk-merge.c | 2 +- include/linux/blkdev.h | 9 +++++---- 4 files changed, 17 insertions(+), 16 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 3596ca71909..6226a380fb6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->timeout_list); rq->cpu = -1; rq->q = q; - rq->sector = (sector_t) -1; + rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->cmd = rq->__cmd; @@ -1095,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_NOIDLE; req->errors = 0; - req->sector = bio->bi_sector; + req->__sector = bio->bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1143,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) req->biotail->bi_next = bio; req->biotail = bio; - req->data_len += bytes; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1169,8 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->sector = bio->bi_sector; - req->data_len += bytes; + req->__sector = bio->bi_sector; + req->__data_len += bytes; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; @@ -1878,7 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) * can find how many bytes remain in the request * later. */ - req->data_len = 0; + req->__data_len = 0; return false; } @@ -1892,12 +1892,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) bio_iovec(bio)->bv_len -= nr_bytes; } - req->data_len -= total_bytes; + req->__data_len -= total_bytes; req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (blk_fs_request(req) || blk_discard_rq(req)) - req->sector += total_bytes >> 9; + req->__sector += total_bytes >> 9; /* * If total number of sectors is less than the first segment @@ -1905,7 +1905,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { printk(KERN_ERR "blk: request botched\n"); - req->data_len = blk_rq_cur_bytes(req); + req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ @@ -2032,7 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->data_len = bio->bi_size; + rq->__data_len = bio->bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) diff --git a/block/blk-map.c b/block/blk-map.c index 694fefad34e..56082bea450 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, rq->biotail->bi_next = bio; rq->biotail = bio; - rq->data_len += bio->bi_size; + rq->__data_len += bio->bi_size; } return 0; } diff --git a/block/blk-merge.c b/block/blk-merge.c index b8df66aef0f..4974dd5767e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -370,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = next->bio; req->biotail = next->biotail; - req->data_len += blk_rq_bytes(next); + req->__data_len += blk_rq_bytes(next); elv_merge_requests(q, req, next); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ce2bf5efa9b..c7558034570 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -166,8 +166,9 @@ struct request { enum rq_cmd_type_bits cmd_type; unsigned long atomic_flags; - sector_t sector; /* sector cursor */ - unsigned int data_len; /* total data len, don't access directly */ + /* the following two fields are internal, NEVER access directly */ + sector_t __sector; /* sector cursor */ + unsigned int __data_len; /* total data len */ struct bio *bio; struct bio *biotail; @@ -828,12 +829,12 @@ extern void blkdev_dequeue_request(struct request *req); */ static inline sector_t blk_rq_pos(const struct request *rq) { - return rq->sector; + return rq->__sector; } static inline unsigned int blk_rq_bytes(const struct request *rq) { - return rq->data_len; + return rq->__data_len; } static inline int blk_rq_cur_bytes(const struct request *rq) -- cgit v1.2.3-70-g09d2 From 9934c8c04561413609d2bc38c6b9f268cba774a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 8 May 2009 11:54:16 +0900 Subject: block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo Cc: Rusty Russell Cc: James Bottomley Cc: Mike Miller Cc: unsik Kim Cc: Paul Clements Cc: Tim Waugh Cc: Geert Uytterhoeven Cc: David S. Miller Cc: Laurent Vivier Cc: Jeff Garzik Cc: Jeremy Fitzhardinge Cc: Grant Likely Cc: Adrian McMenamin Cc: Stephen Rothwell Cc: Bartlomiej Zolnierkiewicz Cc: Borislav Petkov Cc: Sergei Shtylyov Cc: Alex Dubov Cc: Pierre Ossman Cc: David Woodhouse Cc: Markus Lidel Cc: Stefan Weinhuber Cc: Martin Schwidefsky Cc: Pete Zaitcev Cc: FUJITA Tomonori Signed-off-by: Jens Axboe --- arch/arm/plat-omap/mailbox.c | 12 ++--- arch/um/drivers/ubd_kern.c | 3 +- block/blk-barrier.c | 4 +- block/blk-core.c | 105 ++++++++++++++++++++++++++---------- block/blk-tag.c | 2 +- block/blk.h | 1 + drivers/block/DAC960.c | 4 +- drivers/block/amiflop.c | 3 +- drivers/block/ataflop.c | 3 +- drivers/block/cciss.c | 4 +- drivers/block/cpqarray.c | 4 +- drivers/block/floppy.c | 6 +-- drivers/block/hd.c | 3 +- drivers/block/mg_disk.c | 12 ++--- drivers/block/nbd.c | 4 +- drivers/block/paride/pcd.c | 3 +- drivers/block/paride/pd.c | 7 +-- drivers/block/paride/pf.c | 3 +- drivers/block/ps3disk.c | 4 +- drivers/block/sunvdc.c | 3 +- drivers/block/swim.c | 12 ++--- drivers/block/swim3.c | 3 +- drivers/block/sx8.c | 8 ++- drivers/block/ub.c | 8 +-- drivers/block/viodasd.c | 4 +- drivers/block/virtio_blk.c | 4 +- drivers/block/xd.c | 12 ++--- drivers/block/xen-blkfront.c | 4 +- drivers/block/xsysace.c | 10 ++-- drivers/block/z2ram.c | 12 ++--- drivers/cdrom/gdrom.c | 4 +- drivers/cdrom/viocd.c | 4 +- drivers/ide/ide-atapi.c | 2 +- drivers/ide/ide-io.c | 9 ++-- drivers/memstick/core/mspro_block.c | 9 ++-- drivers/message/i2o/i2o_block.c | 6 +-- drivers/mmc/card/queue.c | 11 ++-- drivers/mtd/mtd_blkdevs.c | 7 +-- drivers/s390/block/dasd.c | 16 ++---- drivers/s390/char/tape_block.c | 7 +-- drivers/sbus/char/jsflash.c | 12 ++--- drivers/scsi/scsi_lib.c | 10 ++-- drivers/scsi/scsi_transport_sas.c | 4 +- include/linux/blkdev.h | 9 +++- include/linux/elevator.h | 2 - 45 files changed, 172 insertions(+), 207 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 7a1f5c25fd1..40424edae93 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c @@ -197,9 +197,7 @@ static void mbox_tx_work(struct work_struct *work) struct omap_msg_tx_data *tx_data; spin_lock(q->queue_lock); - rq = elv_next_request(q); - if (rq) - blkdev_dequeue_request(rq); + rq = blk_fetch_request(q); spin_unlock(q->queue_lock); if (!rq) @@ -242,9 +240,7 @@ static void mbox_rx_work(struct work_struct *work) while (1) { spin_lock_irqsave(q->queue_lock, flags); - rq = elv_next_request(q); - if (rq) - blkdev_dequeue_request(rq); + rq = blk_fetch_request(q); spin_unlock_irqrestore(q->queue_lock, flags); if (!rq) break; @@ -351,9 +347,7 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf) while (1) { spin_lock_irqsave(q->queue_lock, flags); - rq = elv_next_request(q); - if (rq) - blkdev_dequeue_request(rq); + rq = blk_fetch_request(q); spin_unlock_irqrestore(q->queue_lock, flags); if (!rq) diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 402ba8f70fc..aa9e926e13d 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1228,12 +1228,11 @@ static void do_ubd_request(struct request_queue *q) while(1){ struct ubd *dev = q->queuedata; if(dev->end_sg == 0){ - struct request *req = elv_next_request(q); + struct request *req = blk_fetch_request(q); if(req == NULL) return; dev->request = req; - blkdev_dequeue_request(req); dev->start_sg = 0; dev->end_sg = blk_rq_map_sg(q, req, dev->sg); } diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 8713c2fbc4f..0ab81a0a750 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -180,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) } /* stash away the original request */ - elv_dequeue_request(q, rq); + blk_dequeue_request(rq); q->orig_bar_rq = rq; rq = NULL; @@ -248,7 +248,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) * Queue ordering not supported. Terminate * with prejudice. */ - elv_dequeue_request(q, rq); + blk_dequeue_request(rq); __blk_end_request_all(rq, -EOPNOTSUPP); *rqp = NULL; return false; diff --git a/block/blk-core.c b/block/blk-core.c index 6226a380fb6..93691d2ac5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -902,6 +902,8 @@ EXPORT_SYMBOL(blk_get_request); */ void blk_requeue_request(struct request_queue *q, struct request *rq) { + BUG_ON(blk_queued_rq(rq)); + blk_delete_timer(rq); blk_clear_rq_complete(rq); trace_block_rq_requeue(q, rq); @@ -1610,28 +1612,6 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); -/** - * blkdev_dequeue_request - dequeue request and start timeout timer - * @req: request to dequeue - * - * Dequeue @req and start timeout timer on it. This hands off the - * request to the driver. - * - * Block internal functions which don't want to start timer should - * call elv_dequeue_request(). - */ -void blkdev_dequeue_request(struct request *req) -{ - elv_dequeue_request(req->q, req); - - /* - * We are now handing the request to the hardware, add the - * timeout handler. - */ - blk_add_timer(req); -} -EXPORT_SYMBOL(blkdev_dequeue_request); - static void blk_account_io_completion(struct request *req, unsigned int bytes) { if (blk_do_io_stat(req)) { @@ -1671,7 +1651,23 @@ static void blk_account_io_done(struct request *req) } } -struct request *elv_next_request(struct request_queue *q) +/** + * blk_peek_request - peek at the top of a request queue + * @q: request queue to peek at + * + * Description: + * Return the request at the top of @q. The returned request + * should be started using blk_start_request() before LLD starts + * processing it. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_peek_request(struct request_queue *q) { struct request *rq; int ret; @@ -1748,10 +1744,12 @@ struct request *elv_next_request(struct request_queue *q) return rq; } -EXPORT_SYMBOL(elv_next_request); +EXPORT_SYMBOL(blk_peek_request); -void elv_dequeue_request(struct request_queue *q, struct request *rq) +void blk_dequeue_request(struct request *rq) { + struct request_queue *q = rq->q; + BUG_ON(list_empty(&rq->queuelist)); BUG_ON(ELV_ON_HASH(rq)); @@ -1766,6 +1764,58 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) q->in_flight++; } +/** + * blk_start_request - start request processing on the driver + * @req: request to dequeue + * + * Description: + * Dequeue @req and start timeout timer on it. This hands off the + * request to the driver. + * + * Block internal functions which don't want to start timer should + * call blk_dequeue_request(). + * + * Context: + * queue_lock must be held. + */ +void blk_start_request(struct request *req) +{ + blk_dequeue_request(req); + + /* + * We are now handing the request to the hardware, add the + * timeout handler. + */ + blk_add_timer(req); +} +EXPORT_SYMBOL(blk_start_request); + +/** + * blk_fetch_request - fetch a request from a request queue + * @q: request queue to fetch a request from + * + * Description: + * Return the request at the top of @q. The request is started on + * return and LLD can start processing it immediately. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_fetch_request(struct request_queue *q) +{ + struct request *rq; + + rq = blk_peek_request(q); + if (rq) + blk_start_request(rq); + return rq; +} +EXPORT_SYMBOL(blk_fetch_request); + /** * blk_update_request - Special helper function for request stacking drivers * @rq: the request being processed @@ -1937,12 +1987,11 @@ static bool blk_update_bidi_request(struct request *rq, int error, */ static void blk_finish_request(struct request *req, int error) { + BUG_ON(blk_queued_rq(req)); + if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); - if (blk_queued_rq(req)) - elv_dequeue_request(req->q, req); - if (unlikely(laptop_mode) && blk_fs_request(req)) laptop_io_completion(); diff --git a/block/blk-tag.c b/block/blk-tag.c index 3c518e3303a..c260f7c30dd 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -374,7 +374,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; - blkdev_dequeue_request(rq); + blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); return 0; } diff --git a/block/blk.h b/block/blk.h index ab54529103c..9e0042ca949 100644 --- a/block/blk.h +++ b/block/blk.h @@ -13,6 +13,7 @@ extern struct kobj_type blk_queue_ktype; void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); +void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); void blk_unplug_work(struct work_struct *work); diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 774ab05973a..668dc234b8e 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_ DAC960_Command_T *Command; while(1) { - Request = elv_next_request(req_q); + Request = blk_peek_request(req_q); if (!Request) return 1; @@ -3341,7 +3341,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_ Command->BlockNumber = blk_rq_pos(Request); Command->BlockCount = blk_rq_sectors(Request); Command->Request = Request; - blkdev_dequeue_request(Request); + blk_start_request(Request); Command->SegmentCount = blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist); /* pci_map_sg MAY change the value of SegCount */ diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 80a68b2e045..9c6e5b0fe89 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1342,12 +1342,11 @@ static void redo_fd_request(void) int err; next_req: - rq = elv_next_request(floppy_queue); + rq = blk_fetch_request(floppy_queue); if (!rq) { /* Nothing left to do */ return; } - blkdev_dequeue_request(rq); floppy = rq->rq_disk->private_data; drive = floppy - unit; diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 89a591d9c83..f5e7180d7f4 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1404,10 +1404,9 @@ static void redo_fd_request(void) repeat: if (!fd_request) { - fd_request = elv_next_request(floppy_queue); + fd_request = blk_fetch_request(floppy_queue); if (!fd_request) goto the_end; - blkdev_dequeue_request(fd_request); } floppy = fd_request->rq_disk->private_data; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index ab7b04c0db7..e714e7cce6f 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -2801,7 +2801,7 @@ static void do_cciss_request(struct request_queue *q) goto startio; queue: - creq = elv_next_request(q); + creq = blk_peek_request(q); if (!creq) goto startio; @@ -2810,7 +2810,7 @@ static void do_cciss_request(struct request_queue *q) if ((c = cmd_alloc(h, 1)) == NULL) goto full; - blkdev_dequeue_request(creq); + blk_start_request(creq); spin_unlock_irq(q->queue_lock); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index a5caeff4718..a02dcfc00f1 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q) goto startio; queue_next: - creq = elv_next_request(q); + creq = blk_peek_request(q); if (!creq) goto startio; @@ -912,7 +912,7 @@ queue_next: if ((c = cmd_alloc(h,1)) == NULL) goto startio; - blkdev_dequeue_request(creq); + blk_start_request(creq); c->ctlr = h->ctlr; c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index e2c70d2085a..90877fee0ee 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -931,7 +931,7 @@ static inline void unlock_fdc(void) del_timer(&fd_timeout); cont = NULL; clear_bit(0, &fdc_busy); - if (current_req || elv_next_request(floppy_queue)) + if (current_req || blk_peek_request(floppy_queue)) do_fd_request(floppy_queue); spin_unlock_irqrestore(&floppy_lock, flags); wake_up(&fdc_wait); @@ -2912,9 +2912,7 @@ static void redo_fd_request(void) struct request *req; spin_lock_irq(floppy_queue->queue_lock); - req = elv_next_request(floppy_queue); - if (req) - blkdev_dequeue_request(req); + req = blk_fetch_request(floppy_queue); spin_unlock_irq(floppy_queue->queue_lock); if (!req) { do_floppy = NULL; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 288ab63c102..961de56d00a 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -592,12 +592,11 @@ repeat: del_timer(&device_timer); if (!hd_req) { - hd_req = elv_next_request(hd_queue); + hd_req = blk_fetch_request(hd_queue); if (!hd_req) { do_hd = NULL; return; } - blkdev_dequeue_request(hd_req); } req = hd_req; diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 1ca5d1423fa..c0cd0a03f69 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -671,10 +671,8 @@ static void mg_request_poll(struct request_queue *q) while (1) { if (!host->req) { - host->req = elv_next_request(q); - if (host->req) - blkdev_dequeue_request(host->req); - else + host->req = blk_fetch_request(q); + if (!host->req) break; } @@ -744,10 +742,8 @@ static void mg_request(struct request_queue *q) while (1) { if (!host->req) { - host->req = elv_next_request(q); - if (host->req) - blkdev_dequeue_request(host->req); - else + host->req = blk_fetch_request(q); + if (!host->req) break; } req = host->req; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index fad167de23b..5d23ffad7c7 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q) { struct request *req; - while ((req = elv_next_request(q)) != NULL) { + while ((req = blk_fetch_request(q)) != NULL) { struct nbd_device *lo; - blkdev_dequeue_request(req); - spin_unlock_irq(q->queue_lock); dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 425f81586a3..911dfd98d81 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -720,10 +720,9 @@ static void do_pcd_request(struct request_queue * q) return; while (1) { if (!pcd_req) { - pcd_req = elv_next_request(q); + pcd_req = blk_fetch_request(q); if (!pcd_req) return; - blkdev_dequeue_request(pcd_req); } if (rq_data_dir(pcd_req) == READ) { diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index d2ca3f55206..bf5955b3d87 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -412,11 +412,9 @@ static void run_fsm(void) spin_lock_irqsave(&pd_lock, saved_flags); if (!__blk_end_request_cur(pd_req, res == Ok ? 0 : -EIO)) { - pd_req = elv_next_request(pd_queue); + pd_req = blk_fetch_request(pd_queue); if (!pd_req) stop = 1; - else - blkdev_dequeue_request(pd_req); } spin_unlock_irqrestore(&pd_lock, saved_flags); if (stop) @@ -706,10 +704,9 @@ static void do_pd_request(struct request_queue * q) { if (pd_req) return; - pd_req = elv_next_request(q); + pd_req = blk_fetch_request(q); if (!pd_req) return; - blkdev_dequeue_request(pd_req); schedule_fsm(); } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index d6f7bd84ed3..68a90834e99 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -762,10 +762,9 @@ static void do_pf_request(struct request_queue * q) return; repeat: if (!pf_req) { - pf_req = elv_next_request(q); + pf_req = blk_fetch_request(q); if (!pf_req) return; - blkdev_dequeue_request(pf_req); } pf_current = pf_req->rq_disk->private_data; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index f4d8db944e7..338cee4cc0b 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -194,9 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev, dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); - while ((req = elv_next_request(q))) { - blkdev_dequeue_request(req); - + while ((req = blk_fetch_request(q))) { if (blk_fs_request(req)) { if (ps3disk_submit_request_sg(dev, req)) break; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 9f351bfa15e..cbfd9c0aef0 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -441,12 +441,11 @@ out: static void do_vdc_request(struct request_queue *q) { while (1) { - struct request *req = elv_next_request(q); + struct request *req = blk_fetch_request(q); if (!req) break; - blkdev_dequeue_request(req); if (__send_request(req) < 0) __blk_end_request_all(req, -EIO); } diff --git a/drivers/block/swim.c b/drivers/block/swim.c index dedd4893f5e..cf7877fb8a7 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -528,10 +528,7 @@ static void redo_fd_request(struct request_queue *q) struct request *req; struct floppy_state *fs; - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - + req = blk_fetch_request(q); while (req) { int err = -EIO; @@ -554,11 +551,8 @@ static void redo_fd_request(struct request_queue *q) break; } done: - if (!__blk_end_request_cur(req, err)) { - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - } + if (!__blk_end_request_cur(req, err)) + req = blk_fetch_request(q); } } diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index f48c6dd47e0..80df93e3cdd 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -326,10 +326,9 @@ static void start_request(struct floppy_state *fs) } while (fs->state == idle) { if (!fd_req) { - fd_req = elv_next_request(swim3_queue); + fd_req = blk_fetch_request(swim3_queue); if (!fd_req) break; - blkdev_dequeue_request(fd_req); } req = fd_req; #if 0 diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 087c94c8b2d..da403b6a7f4 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c @@ -810,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q) while (1) { DPRINTK("get req\n"); - rq = elv_next_request(q); + rq = blk_fetch_request(q); if (!rq) break; - blkdev_dequeue_request(rq); - crq = rq->special; assert(crq != NULL); assert(crq->rq == rq); @@ -846,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q) queue_one_request: VPRINTK("get req\n"); - rq = elv_next_request(q); + rq = blk_peek_request(q); if (!rq) return; @@ -857,7 +855,7 @@ queue_one_request: } crq->rq = rq; - blkdev_dequeue_request(rq); + blk_start_request(rq); if (rq_data_dir(rq) == WRITE) { writing = 1; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 40d03cf63f2..178f459a50e 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -627,7 +627,7 @@ static void ub_request_fn(struct request_queue *q) struct ub_lun *lun = q->queuedata; struct request *rq; - while ((rq = elv_next_request(q)) != NULL) { + while ((rq = blk_peek_request(q)) != NULL) { if (ub_request_fn_1(lun, rq) != 0) { blk_stop_queue(q); break; @@ -643,13 +643,13 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) int n_elem; if (atomic_read(&sc->poison)) { - blkdev_dequeue_request(rq); + blk_start_request(rq); ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); return 0; } if (lun->changed && !blk_pc_request(rq)) { - blkdev_dequeue_request(rq); + blk_start_request(rq); ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); return 0; } @@ -660,7 +660,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) return -1; memset(cmd, 0, sizeof(struct ub_scsi_cmd)); - blkdev_dequeue_request(rq); + blk_start_request(rq); urq = &lun->urq; memset(urq, 0, sizeof(struct ub_request)); diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 2086cb12d3e..390d69bb7c4 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -361,11 +361,9 @@ static void do_viodasd_request(struct request_queue *q) * back later. */ while (num_req_outstanding < VIOMAXREQ) { - req = elv_next_request(q); + req = blk_fetch_request(q); if (req == NULL) return; - /* dequeue the current request from the queue */ - blkdev_dequeue_request(req); /* check that request contains a valid command */ if (!blk_fs_request(req)) { viodasd_end_request(req, -EIO, blk_rq_sectors(req)); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 1980ab45635..29a9daf4862 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -128,7 +128,7 @@ static void do_virtblk_request(struct request_queue *q) struct request *req; unsigned int issued = 0; - while ((req = elv_next_request(q)) != NULL) { + while ((req = blk_peek_request(q)) != NULL) { vblk = req->rq_disk->private_data; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -138,7 +138,7 @@ static void do_virtblk_request(struct request_queue *q) blk_stop_queue(q); break; } - blkdev_dequeue_request(req); + blk_start_request(req); issued++; } diff --git a/drivers/block/xd.c b/drivers/block/xd.c index d4c4352354b..ce242921992 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -305,10 +305,7 @@ static void do_xd_request (struct request_queue * q) if (xdc_busy) return; - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - + req = blk_fetch_request(q); while (req) { unsigned block = blk_rq_pos(req); unsigned count = blk_rq_cur_sectors(req); @@ -325,11 +322,8 @@ static void do_xd_request (struct request_queue * q) block, count); done: /* wrap up, 0 = success, -errno = fail */ - if (!__blk_end_request_cur(req, res)) { - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - } + if (!__blk_end_request_cur(req, res)) + req = blk_fetch_request(q); } } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 66f834571b8..6d4ac76c280 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -299,13 +299,13 @@ static void do_blkif_request(struct request_queue *rq) queued = 0; - while ((req = elv_next_request(rq)) != NULL) { + while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; - blkdev_dequeue_request(req); + blk_start_request(req); if (!blk_fs_request(req)) { __blk_end_request_all(req, -EIO); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index edf137b6c37..3a4397edab7 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -463,10 +463,10 @@ struct request *ace_get_next_request(struct request_queue * q) { struct request *req; - while ((req = elv_next_request(q)) != NULL) { + while ((req = blk_peek_request(q)) != NULL) { if (blk_fs_request(req)) break; - blkdev_dequeue_request(req); + blk_start_request(req); __blk_end_request_all(req, -EIO); } return req; @@ -498,10 +498,8 @@ static void ace_fsm_dostate(struct ace_device *ace) __blk_end_request_all(ace->req, -EIO); ace->req = NULL; } - while ((req = elv_next_request(ace->queue)) != NULL) { - blkdev_dequeue_request(req); + while ((req = blk_fetch_request(ace->queue)) != NULL) __blk_end_request_all(req, -EIO); - } /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; @@ -649,7 +647,7 @@ static void ace_fsm_dostate(struct ace_device *ace) ace->fsm_state = ACE_FSM_STATE_IDLE; break; } - blkdev_dequeue_request(req); + blk_start_request(req); /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index c909c1a3f65..4575171e5be 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -71,10 +71,7 @@ static void do_z2_request(struct request_queue *q) { struct request *req; - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - + req = blk_fetch_request(q); while (req) { unsigned long start = blk_rq_pos(req) << 9; unsigned long len = blk_rq_cur_bytes(req); @@ -100,11 +97,8 @@ static void do_z2_request(struct request_queue *q) len -= size; } done: - if (!__blk_end_request_cur(req, err)) { - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - } + if (!__blk_end_request_cur(req, err)) + req = blk_fetch_request(q); } } diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 3cc02bfe828..1e366ad8f68 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -642,9 +642,7 @@ static void gdrom_request(struct request_queue *rq) { struct request *req; - while ((req = elv_next_request(rq)) != NULL) { - blkdev_dequeue_request(req); - + while ((req = blk_fetch_request(rq)) != NULL) { if (!blk_fs_request(req)) { printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); __blk_end_request_all(req, -EIO); diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index bbe9f086734..ca741c21e4a 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -297,9 +297,7 @@ static void do_viocd_request(struct request_queue *q) { struct request *req; - while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { - blkdev_dequeue_request(req); - + while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { if (!blk_fs_request(req)) __blk_end_request_all(req, -EIO); else if (send_request(req) < 0) { diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 2874c3d703a..8a894fa37b5 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -269,7 +269,7 @@ void ide_retry_pc(ide_drive_t *drive) blk_requeue_request(failed_rq->q, failed_rq); drive->hwif->rq = NULL; if (ide_queue_sense_rq(drive, pc)) { - blkdev_dequeue_request(failed_rq); + blk_start_request(failed_rq); ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); } } diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index abda7337b3f..e4e3a0e3201 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -519,11 +519,8 @@ repeat: * we know that the queue isn't empty, but this can happen * if the q->prep_rq_fn() decides to kill a request */ - if (!rq) { - rq = elv_next_request(drive->queue); - if (rq) - blkdev_dequeue_request(rq); - } + if (!rq) + rq = blk_fetch_request(drive->queue); spin_unlock_irq(q->queue_lock); spin_lock_irq(&hwif->lock); @@ -536,7 +533,7 @@ repeat: /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as - * blk_stop_queue() doesn't prevent the elv_next_request() + * blk_stop_queue() doesn't prevent the blk_fetch_request() * above to return us whatever is in the queue. Since we call * ide_do_request() ourselves, we end up taking requests while * the queue is blocked... diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 58f5be8cd69..c0bebc6a2f2 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -704,13 +704,12 @@ try_again: return 0; } - dev_dbg(&card->dev, "elv_next\n"); - msb->block_req = elv_next_request(msb->queue); + dev_dbg(&card->dev, "blk_fetch\n"); + msb->block_req = blk_fetch_request(msb->queue); if (!msb->block_req) { dev_dbg(&card->dev, "issue end\n"); return -EAGAIN; } - blkdev_dequeue_request(msb->block_req); dev_dbg(&card->dev, "trying again\n"); chunk = 1; @@ -825,10 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q) return; if (msb->eject) { - while ((req = elv_next_request(q)) != NULL) { - blkdev_dequeue_request(req); + while ((req = blk_fetch_request(q)) != NULL) __blk_end_request_all(req, -ENODEV); - } return; } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 8b5cbfc3ba9..6573ef4408f 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -877,7 +877,7 @@ static void i2o_block_request_fn(struct request_queue *q) struct request *req; while (!blk_queue_plugged(q)) { - req = elv_next_request(q); + req = blk_peek_request(q); if (!req) break; @@ -890,7 +890,7 @@ static void i2o_block_request_fn(struct request_queue *q) if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { if (!i2o_block_transfer(req)) { - blkdev_dequeue_request(req); + blk_start_request(req); continue; } else osm_info("transfer error\n"); @@ -917,7 +917,7 @@ static void i2o_block_request_fn(struct request_queue *q) break; } } else { - blkdev_dequeue_request(req); + blk_start_request(req); __blk_end_request_all(req, -EIO); } } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 4b70f1e2834..49e582356c6 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -54,11 +54,8 @@ static int mmc_queue_thread(void *d) spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); - if (!blk_queue_plugged(q)) { - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - } + if (!blk_queue_plugged(q)) + req = blk_fetch_request(q); mq->req = req; spin_unlock_irq(q->queue_lock); @@ -94,10 +91,8 @@ static void mmc_request(struct request_queue *q) if (!mq) { printk(KERN_ERR "MMC: killing requests for dead queue\n"); - while ((req = elv_next_request(q)) != NULL) { - blkdev_dequeue_request(req); + while ((req = blk_fetch_request(q)) != NULL) __blk_end_request_all(req, -EIO); - } return; } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 3e10442615d..502622f628b 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -100,12 +100,7 @@ static int mtd_blktrans_thread(void *arg) struct mtd_blktrans_dev *dev; int res; - if (!req) { - req = elv_next_request(rq); - if (req) - blkdev_dequeue_request(req); - } - if (!req) { + if (!req && !(req = blk_fetch_request(rq))) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irq(rq->queue_lock); schedule(); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 7df03c7aea0..e64f62d5e0f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1656,17 +1656,13 @@ static void __dasd_process_request_queue(struct dasd_block *block) if (basedev->state < DASD_STATE_READY) return; /* Now we try to fetch requests from the request queue */ - while (!blk_queue_plugged(queue) && - elv_next_request(queue)) { - - req = elv_next_request(queue); - + while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { if (basedev->features & DASD_FEATURE_READONLY && rq_data_dir(req) == WRITE) { DBF_DEV_EVENT(DBF_ERR, basedev, "Rejecting write request %p", req); - blkdev_dequeue_request(req); + blk_start_request(req); __blk_end_request_all(req, -EIO); continue; } @@ -1695,7 +1691,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) "CCW creation failed (rc=%ld) " "on request %p", PTR_ERR(cqr), req); - blkdev_dequeue_request(req); + blk_start_request(req); __blk_end_request_all(req, -EIO); continue; } @@ -1705,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block) */ cqr->callback_data = (void *) req; cqr->status = DASD_CQR_FILLED; - blkdev_dequeue_request(req); + blk_start_request(req); list_add_tail(&cqr->blocklist, &block->ccw_queue); dasd_profile_start(block, cqr, req); } @@ -2029,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block) return; spin_lock_irq(&block->request_queue_lock); - while ((req = elv_next_request(block->request_queue))) { - blkdev_dequeue_request(req); + while ((req = blk_fetch_request(block->request_queue))) __blk_end_request_all(req, -EIO); - } spin_unlock_irq(&block->request_queue_lock); } diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 5d035e4939d..1e796767598 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -93,7 +93,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) device->blk_data.block_position = -1; device->discipline->free_bread(ccw_req); if (!list_empty(&device->req_queue) || - elv_next_request(device->blk_data.request_queue)) + blk_peek_request(device->blk_data.request_queue)) tapeblock_trigger_requeue(device); } @@ -162,19 +162,16 @@ tapeblock_requeue(struct work_struct *work) { spin_lock_irq(&device->blk_data.request_queue_lock); while ( !blk_queue_plugged(queue) && - elv_next_request(queue) && + (req = blk_fetch_request(queue)) && nr_queued < TAPEBLOCK_MIN_REQUEUE ) { - req = elv_next_request(queue); if (rq_data_dir(req) == WRITE) { DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); - blkdev_dequeue_request(req); spin_unlock_irq(&device->blk_data.request_queue_lock); blk_end_request_all(req, -EIO); spin_lock_irq(&device->blk_data.request_queue_lock); continue; } - blkdev_dequeue_request(req); nr_queued++; spin_unlock_irq(&device->blk_data.request_queue_lock); rc = tapeblock_start_request(device, req); diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index f572a4a1d14..6d465168468 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -186,10 +186,7 @@ static void jsfd_do_request(struct request_queue *q) { struct request *req; - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - + req = blk_fetch_request(q); while (req) { struct jsfd_part *jdp = req->rq_disk->private_data; unsigned long offset = blk_rq_pos(req) << 9; @@ -212,11 +209,8 @@ static void jsfd_do_request(struct request_queue *q) jsfd_read(req->buffer, jdp->dbase + offset, len); err = 0; end: - if (!__blk_end_request_cur(req, err)) { - req = elv_next_request(q); - if (req) - blkdev_dequeue_request(req); - } + if (!__blk_end_request_cur(req, err)) + req = blk_fetch_request(q); } } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ee308f6f798..b12750f8216 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1207,7 +1207,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) break; case BLKPREP_DEFER: /* - * If we defer, the elv_next_request() returns NULL, but the + * If we defer, the blk_peek_request() returns NULL, but the * queue must be restarted, so we plug here if no returning * command will automatically do that. */ @@ -1385,7 +1385,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) struct scsi_target *starget = scsi_target(sdev); struct Scsi_Host *shost = sdev->host; - blkdev_dequeue_request(req); + blk_start_request(req); if (unlikely(cmd == NULL)) { printk(KERN_CRIT "impossible request in %s.\n", @@ -1477,7 +1477,7 @@ static void scsi_request_fn(struct request_queue *q) if (!sdev) { printk("scsi: killing requests for dead queue\n"); - while ((req = elv_next_request(q)) != NULL) + while ((req = blk_peek_request(q)) != NULL) scsi_kill_request(req, q); return; } @@ -1498,7 +1498,7 @@ static void scsi_request_fn(struct request_queue *q) * that the request is fully prepared even if we cannot * accept it. */ - req = elv_next_request(q); + req = blk_peek_request(q); if (!req || !scsi_dev_queue_ready(q, sdev)) break; @@ -1514,7 +1514,7 @@ static void scsi_request_fn(struct request_queue *q) * Remove the request from the request list. */ if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) - blkdev_dequeue_request(req); + blk_start_request(req); sdev->device_busy++; spin_unlock(q->queue_lock); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 50988cbf7b2..d606452297c 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -163,12 +163,10 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost, int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); while (!blk_queue_plugged(q)) { - req = elv_next_request(q); + req = blk_fetch_request(q); if (!req) break; - blkdev_dequeue_request(req); - spin_unlock_irq(q->queue_lock); handler = to_sas_internal(shost->transportt)->f->smp_handler; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7558034570..6e59d3b92ff 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -818,8 +818,6 @@ static inline void blk_run_address_space(struct address_space *mapping) blk_run_backing_dev(mapping->backing_dev_info, NULL); } -extern void blkdev_dequeue_request(struct request *req); - /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -852,6 +850,13 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) return blk_rq_cur_bytes(rq) >> 9; } +/* + * Request issue related functions. + */ +extern struct request *blk_peek_request(struct request_queue *q); +extern void blk_start_request(struct request *rq); +extern struct request *blk_fetch_request(struct request_queue *q); + /* * Request completion related functions. * diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 4e462878c9c..1cb3372e65d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); extern void elv_merge_requests(struct request_queue *, struct request *, struct request *); extern void elv_merged_request(struct request_queue *, struct request *, int); -extern void elv_dequeue_request(struct request_queue *, struct request *); extern void elv_requeue_request(struct request_queue *, struct request *); extern int elv_queue_empty(struct request_queue *); -extern struct request *elv_next_request(struct request_queue *q); extern struct request *elv_former_request(struct request_queue *, struct request *); extern struct request *elv_latter_request(struct request_queue *, struct request *); extern int elv_register_queue(struct request_queue *q); -- cgit v1.2.3-70-g09d2 From 1822952ba2b9f22f79019d07ebbeca31dc14b718 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 11 May 2009 17:56:07 +0900 Subject: block: let blk_end_request_all handle bidi requests blk_end_request_all() and __blk_end_request_all() should finish all bytes including bidi, by definition. That's what all bidi users need , bidi requests must be complete as a whole (partial completion is impossible). Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6e59d3b92ff..1069f4483c6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -910,8 +910,12 @@ static inline bool blk_end_request(struct request *rq, int error, static inline void blk_end_request_all(struct request *rq, int error) { bool pending; + unsigned int bidi_bytes = 0; - pending = blk_end_request(rq, error, blk_rq_bytes(rq)); + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } @@ -962,8 +966,12 @@ static inline bool __blk_end_request(struct request *rq, int error, static inline void __blk_end_request_all(struct request *rq, int error) { bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); - pending = __blk_end_request(rq, error, blk_rq_bytes(rq)); + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); BUG_ON(pending); } -- cgit v1.2.3-70-g09d2 From b1f744937f1be3e6d3009382a755679133cf782d Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 11 May 2009 17:56:09 +0900 Subject: block: move completion related functions back to blk-core.c Let's put the completion related functions back to block/blk-core.c where they have lived. We can also unexport blk_end_bidi_request() and __blk_end_bidi_request(), which nobody uses. Signed-off-by: FUJITA Tomonori Signed-off-by: Jens Axboe --- block/blk-core.c | 128 ++++++++++++++++++++++++++++++++++++++++++++++--- include/linux/blkdev.h | 128 ++++--------------------------------------------- 2 files changed, 130 insertions(+), 126 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 93691d2ac5a..a2d97de1a12 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2026,8 +2026,8 @@ static void blk_finish_request(struct request *req, int error) * %false - we are done with this request * %true - still buffers pending for this request **/ -bool blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) +static bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { struct request_queue *q = rq->q; unsigned long flags; @@ -2041,7 +2041,6 @@ bool blk_end_bidi_request(struct request *rq, int error, return false; } -EXPORT_SYMBOL_GPL(blk_end_bidi_request); /** * __blk_end_bidi_request - Complete a bidi request with queue lock held @@ -2058,8 +2057,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request); * %false - we are done with this request * %true - still buffers pending for this request **/ -bool __blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, unsigned int bidi_bytes) +static bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) return true; @@ -2068,7 +2067,124 @@ bool __blk_end_bidi_request(struct request *rq, int error, return false; } -EXPORT_SYMBOL_GPL(__blk_end_bidi_request); + +/** + * blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Ends I/O on a number of bytes attached to @rq. + * If @rq has leftover, sets it up for the next range of segments. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL_GPL(blk_end_request); + +/** + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. + */ +void blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL_GPL(blk_end_request_all); + +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_cur(struct request *rq, int error) +{ + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL_GPL(blk_end_request_cur); + +/** + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Must be called with queue lock held unlike blk_end_request(). + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return __blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL_GPL(__blk_end_request); + +/** + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @err: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. Must be called with queue lock held. + */ +void __blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL_GPL(__blk_end_request_all); + +/** + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @err: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_cur(struct request *rq, int error) +{ + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL_GPL(__blk_end_request_cur); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1069f4483c6..f9d60a78c08 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -872,126 +872,14 @@ extern struct request *blk_fetch_request(struct request_queue *q); */ extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); -extern bool blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, - unsigned int bidi_bytes); -extern bool __blk_end_bidi_request(struct request *rq, int error, - unsigned int nr_bytes, - unsigned int bidi_bytes); - -/** - * blk_end_request - Helper function for drivers to complete the request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * - * Description: - * Ends I/O on a number of bytes attached to @rq. - * If @rq has leftover, sets it up for the next range of segments. - * - * Return: - * %false - we are done with this request - * %true - still buffers pending for this request - **/ -static inline bool blk_end_request(struct request *rq, int error, - unsigned int nr_bytes) -{ - return blk_end_bidi_request(rq, error, nr_bytes, 0); -} - -/** - * blk_end_request_all - Helper function for drives to finish the request. - * @rq: the request to finish - * @err: %0 for success, < %0 for error - * - * Description: - * Completely finish @rq. - */ -static inline void blk_end_request_all(struct request *rq, int error) -{ - bool pending; - unsigned int bidi_bytes = 0; - - if (unlikely(blk_bidi_rq(rq))) - bidi_bytes = blk_rq_bytes(rq->next_rq); - - pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); - BUG_ON(pending); -} - -/** - * blk_end_request_cur - Helper function to finish the current request chunk. - * @rq: the request to finish the current chunk for - * @err: %0 for success, < %0 for error - * - * Description: - * Complete the current consecutively mapped chunk from @rq. - * - * Return: - * %false - we are done with this request - * %true - still buffers pending for this request - */ -static inline bool blk_end_request_cur(struct request *rq, int error) -{ - return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); -} - -/** - * __blk_end_request - Helper function for drivers to complete the request. - * @rq: the request being processed - * @error: %0 for success, < %0 for error - * @nr_bytes: number of bytes to complete - * - * Description: - * Must be called with queue lock held unlike blk_end_request(). - * - * Return: - * %false - we are done with this request - * %true - still buffers pending for this request - **/ -static inline bool __blk_end_request(struct request *rq, int error, - unsigned int nr_bytes) -{ - return __blk_end_bidi_request(rq, error, nr_bytes, 0); -} - -/** - * __blk_end_request_all - Helper function for drives to finish the request. - * @rq: the request to finish - * @err: %0 for success, < %0 for error - * - * Description: - * Completely finish @rq. Must be called with queue lock held. - */ -static inline void __blk_end_request_all(struct request *rq, int error) -{ - bool pending; - unsigned int bidi_bytes = 0; - - if (unlikely(blk_bidi_rq(rq))) - bidi_bytes = blk_rq_bytes(rq->next_rq); - - pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); - BUG_ON(pending); -} - -/** - * __blk_end_request_cur - Helper function to finish the current request chunk. - * @rq: the request to finish the current chunk for - * @err: %0 for success, < %0 for error - * - * Description: - * Complete the current consecutively mapped chunk from @rq. Must - * be called with queue lock held. - * - * Return: - * %false - we are done with this request - * %true - still buffers pending for this request - */ -static inline bool __blk_end_request_cur(struct request *rq, int error) -{ - return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); -} +extern bool blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void blk_end_request_all(struct request *rq, int error); +extern bool blk_end_request_cur(struct request *rq, int error); +extern bool __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern void __blk_end_request_all(struct request *rq, int error); +extern bool __blk_end_request_cur(struct request *rq, int error); extern void blk_complete_request(struct request *); extern void __blk_complete_request(struct request *); -- cgit v1.2.3-70-g09d2 From 79eb63e9e5875b84341a3a05f8e6ae9cdb4bb6f6 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 17 May 2009 18:57:15 +0300 Subject: block: Add blk_make_request(), takes bio, returns a request New block API: given a struct bio allocates a new request. This is the parallel of generic_make_request for BLOCK_PC commands users. The passed bio may be a chained-bio. The bio is bounced if needed inside the call to this member. This is in the effort of un-exporting blk_rq_append_bio(). Signed-off-by: Boaz Harrosh CC: Jeff Garzik Signed-off-by: Jens Axboe --- block/blk-core.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 2 ++ 2 files changed, 47 insertions(+) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index e3f7e6a3a09..bec1d69952d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -890,6 +890,51 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) } EXPORT_SYMBOL(blk_get_request); +/** + * blk_make_request - given a bio, allocate a corresponding struct request. + * + * @bio: The bio describing the memory mappings that will be submitted for IO. + * It may be a chained-bio properly constructed by block/bio layer. + * + * blk_make_request is the parallel of generic_make_request for BLOCK_PC + * type commands. Where the struct request needs to be farther initialized by + * the caller. It is passed a &struct bio, which describes the memory info of + * the I/O transfer. + * + * The caller of blk_make_request must make sure that bi_io_vec + * are set to describe the memory buffers. That bio_data_dir() will return + * the needed direction of the request. (And all bio's in the passed bio-chain + * are properly set accordingly) + * + * If called under none-sleepable conditions, mapped bio buffers must not + * need bouncing, by calling the appropriate masked or flagged allocator, + * suitable for the target device. Otherwise the call to blk_queue_bounce will + * BUG. + */ +struct request *blk_make_request(struct request_queue *q, struct bio *bio, + gfp_t gfp_mask) +{ + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (unlikely(!rq)) + return ERR_PTR(-ENOMEM); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + blk_queue_bounce(q, &bounce_bio); + ret = blk_rq_append_bio(q, rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; +} +EXPORT_SYMBOL(blk_make_request); + /** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f9d60a78c08..88a83e112c9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -740,6 +740,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); +extern struct request *blk_make_request(struct request_queue *, struct bio *, + gfp_t); extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); -- cgit v1.2.3-70-g09d2 From a411f4bbb89f1f08687b344064d6775bce1e4658 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Sun, 17 May 2009 19:00:01 +0300 Subject: block: Un-export blk_rq_append_bio OSD was the last in-tree user of blk_rq_append_bio(). Now that it is fixed blk_rq_append_bio is un-exported and is only used internally by block layer. Signed-off-by: Boaz Harrosh Signed-off-by: Jens Axboe --- block/blk-map.c | 1 - block/blk.h | 2 ++ include/linux/blkdev.h | 6 ------ 3 files changed, 2 insertions(+), 7 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-map.c b/block/blk-map.c index caa05a66774..ef2492adca7 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -24,7 +24,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, } return 0; } -EXPORT_SYMBOL(blk_rq_append_bio); static int __blk_rq_unmap_user(struct bio *bio) { diff --git a/block/blk.h b/block/blk.h index 9e0042ca949..c863ec2281e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -13,6 +13,8 @@ extern struct kobj_type blk_queue_ktype; void init_request_from_bio(struct request *req, struct bio *bio); void blk_rq_bio_prep(struct request_queue *q, struct request *rq, struct bio *bio); +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 88a83e112c9..564445be7a6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -757,12 +757,6 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, struct scsi_ioctl_command __user *); -/* - * Temporary export, until SCSI gets fixed up. - */ -extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, - struct bio *bio); - /* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be -- cgit v1.2.3-70-g09d2 From 0a7ae2ff0d29bb3b327edff4c8ab67b3834fa811 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 20 May 2009 08:54:31 +0200 Subject: block: change the tag sync vs async restriction logic Make them fully share the tag space, but disallow async requests using the last any two slots. Signed-off-by: Jens Axboe --- block/blk-barrier.c | 2 +- block/blk-core.c | 2 +- block/blk-tag.c | 15 +++++++++------ block/elevator.c | 8 ++++---- include/linux/blkdev.h | 7 ++++++- 5 files changed, 21 insertions(+), 13 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 0ab81a0a750..0d98054cdbd 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -218,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) } else skip |= QUEUE_ORDSEQ_PREFLUSH; - if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) rq = NULL; else skip |= QUEUE_ORDSEQ_DRAIN; diff --git a/block/blk-core.c b/block/blk-core.c index 49065075d46..1c748403882 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1815,7 +1815,7 @@ void blk_dequeue_request(struct request *rq) * the driver side. */ if (blk_account_rq(rq)) - q->in_flight++; + q->in_flight[rq_is_sync(rq)]++; } /** diff --git a/block/blk-tag.c b/block/blk-tag.c index c260f7c30dd..2e5cfeb5933 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - unsigned max_depth, offset; + unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { @@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) * to starve sync IO on behalf of flooding async IO. */ max_depth = bqt->max_depth; - if (rq_is_sync(rq)) - offset = 0; - else - offset = max_depth >> 2; + if (!rq_is_sync(rq) && max_depth > 1) { + max_depth -= 2; + if (!max_depth) + max_depth = 1; + if (q->in_flight[0] > max_depth) + return 1; + } do { - tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); + tag = find_first_zero_bit(bqt->tag_map, max_depth); if (tag >= max_depth) return 1; diff --git a/block/elevator.c b/block/elevator.c index 918920056e4..ebee948293e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -546,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) * in_flight count again */ if (blk_account_rq(rq)) { - q->in_flight--; + q->in_flight[rq_is_sync(rq)]--; if (blk_sorted_rq(rq)) elv_deactivate_rq(q, rq); } @@ -685,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) if (unplug_it && blk_queue_plugged(q)) { int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] - - q->in_flight; + - queue_in_flight(q); if (nrq >= q->unplug_thresh) __generic_unplug_device(q); @@ -823,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) * request is released from the driver, io must be done */ if (blk_account_rq(rq)) { - q->in_flight--; + q->in_flight[rq_is_sync(rq)]--; if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) e->ops->elevator_completed_req_fn(q, rq); } @@ -838,7 +838,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq) if (!list_empty(&q->queue_head)) next = list_entry_rq(q->queue_head.next); - if (!q->in_flight && + if (!queue_in_flight(q) && blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 564445be7a6..a967dd775db 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -404,7 +404,7 @@ struct request_queue struct list_head tag_busy_list; unsigned int nr_sorted; - unsigned int in_flight; + unsigned int in_flight[2]; unsigned int rq_timeout; struct timer_list timeout; @@ -511,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, __clear_bit(flag, &q->queue_flags); } +static inline int queue_in_flight(struct request_queue *q) +{ + return q->in_flight[0] + q->in_flight[1]; +} + static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) { WARN_ON_ONCE(!queue_is_locked(q)); -- cgit v1.2.3-70-g09d2 From e1defc4ff0cf57aca6c5e3ff99fa503f5943c1f1 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 22 May 2009 17:17:49 -0400 Subject: block: Do away with the notion of hardsect_size Until now we have had a 1:1 mapping between storage device physical block size and the logical block sized used when addressing the device. With SATA 4KB drives coming out that will no longer be the case. The sector size will be 4KB but the logical block size will remain 512-bytes. Hence we need to distinguish between the physical block size and the logical ditto. This patch renames hardsect_size to logical_block_size. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- arch/powerpc/sysdev/axonram.c | 2 +- block/blk-integrity.c | 2 +- block/blk-settings.c | 21 ++++++++++----------- block/blk-sysfs.c | 12 +++++++++--- block/compat_ioctl.c | 2 +- block/ioctl.c | 2 +- drivers/block/cciss.c | 6 +++--- drivers/block/cpqarray.c | 4 ++-- drivers/block/hd.c | 2 +- drivers/block/mg_disk.c | 2 +- drivers/block/pktcdvd.c | 2 +- drivers/block/ps3disk.c | 2 +- drivers/block/ub.c | 6 +++--- drivers/block/virtio_blk.c | 2 +- drivers/block/xen-blkfront.c | 2 +- drivers/block/xsysace.c | 2 +- drivers/cdrom/gdrom.c | 2 +- drivers/cdrom/viocd.c | 4 ++-- drivers/char/raw.c | 2 +- drivers/ide/ide-cd.c | 12 ++++++------ drivers/md/bitmap.c | 4 ++-- drivers/md/dm-exception-store.c | 2 +- drivers/md/dm-log.c | 3 ++- drivers/md/dm-snap-persistent.c | 2 +- drivers/md/dm-table.c | 12 +++++++----- drivers/md/md.c | 2 +- drivers/memstick/core/mspro_block.c | 2 +- drivers/message/i2o/i2o_block.c | 5 +++-- drivers/mmc/card/block.c | 2 +- drivers/mtd/mtd_blkdevs.c | 2 +- drivers/s390/block/dasd.c | 2 +- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/xpram.c | 2 +- drivers/s390/char/tape_block.c | 2 +- drivers/scsi/sd.c | 2 +- drivers/scsi/sr.c | 2 +- fs/bio.c | 3 ++- fs/block_dev.c | 6 +++--- fs/buffer.c | 6 +++--- fs/direct-io.c | 2 +- fs/ext3/super.c | 4 ++-- fs/ext4/super.c | 2 +- fs/gfs2/ops_fstype.c | 4 ++-- fs/gfs2/rgrp.c | 2 +- fs/nilfs2/the_nilfs.c | 2 +- fs/ntfs/super.c | 6 +++--- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/ocfs2/super.c | 2 +- fs/partitions/ibm.c | 2 +- fs/partitions/msdos.c | 4 ++-- fs/udf/super.c | 2 +- fs/xfs/linux-2.6/xfs_buf.c | 2 +- include/linux/blkdev.h | 14 +++++++------- include/linux/device-mapper.h | 2 +- 54 files changed, 108 insertions(+), 98 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 9e105cbc5e5..a4779912a5c 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id) set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); blk_queue_make_request(bank->disk->queue, axon_ram_make_request); - blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); + blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); add_disk(bank->disk); bank->irq_id = irq_of_parse_and_map(device->node, 0); diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 91fa8e06b6a..73e28d35568 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) kobject_uevent(&bi->kobj, KOBJ_ADD); bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; - bi->sector_size = disk->queue->hardsect_size; + bi->sector_size = queue_logical_block_size(disk->queue); disk->integrity = bi; } else bi = disk->integrity; diff --git a/block/blk-settings.c b/block/blk-settings.c index 57af728d94b..15c3164537b 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; blk_queue_max_sectors(q, SAFE_MAX_SECTORS); - blk_queue_hardsect_size(q, 512); + blk_queue_logical_block_size(q, 512); blk_queue_dma_alignment(q, 511); blk_queue_congestion_threshold(q); q->nr_batching = BLK_BATCH_REQ; @@ -288,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) EXPORT_SYMBOL(blk_queue_max_segment_size); /** - * blk_queue_hardsect_size - set hardware sector size for the queue + * blk_queue_logical_block_size - set logical block size for the queue * @q: the request queue for the device - * @size: the hardware sector size, in bytes + * @size: the logical block size, in bytes * * Description: - * This should typically be set to the lowest possible sector size - * that the hardware can operate on (possible without reverting to - * even internal read-modify-write operations). Usually the default - * of 512 covers most hardware. + * This should be set to the lowest possible block size that the + * storage device can address. The default of 512 covers most + * hardware. **/ -void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) +void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) { - q->hardsect_size = size; + q->logical_block_size = size; } -EXPORT_SYMBOL(blk_queue_hardsect_size); +EXPORT_SYMBOL(blk_queue_logical_block_size); /* * Returns the minimum that is _not_ zero, unless both are zero. @@ -324,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); - t->hardsect_size = max(t->hardsect_size, b->hardsect_size); + t->logical_block_size = max(t->logical_block_size, b->logical_block_size); if (!t->queue_lock) WARN_ON_ONCE(1); else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3ff9bba3379..13d38b7e4d0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -100,9 +100,9 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) return queue_var_show(max_sectors_kb, (page)); } -static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) +static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) { - return queue_var_show(q->hardsect_size, page); + return queue_var_show(queue_logical_block_size(q), page); } static ssize_t @@ -249,7 +249,12 @@ static struct queue_sysfs_entry queue_iosched_entry = { static struct queue_sysfs_entry queue_hw_sector_size_entry = { .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, - .show = queue_hw_sector_size_show, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_logical_block_size_entry = { + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, }; static struct queue_sysfs_entry queue_nonrot_entry = { @@ -283,6 +288,7 @@ static struct attribute *default_attrs[] = { &queue_max_sectors_entry.attr, &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, + &queue_logical_block_size_entry.attr, &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f87615dea46..9eaa1940273 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -763,7 +763,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ return compat_put_int(arg, block_size(bdev)); case BLKSSZGET: /* get block device hardware sector size */ - return compat_put_int(arg, bdev_hardsect_size(bdev)); + return compat_put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: return compat_put_ushort(arg, bdev_get_queue(bdev)->max_sectors); diff --git a/block/ioctl.c b/block/ioctl.c index ad474d4bbcc..7aa97f65da8 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -311,7 +311,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ return put_int(arg, block_size(bdev)); case BLKSSZGET: /* get block device hardware sector size */ - return put_int(arg, bdev_hardsect_size(bdev)); + return put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); case BLKRASET: diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index e714e7cce6f..94474f5f8bc 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1389,8 +1389,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, disk->queue->queuedata = h; - blk_queue_hardsect_size(disk->queue, - h->drv[drv_index].block_size); + blk_queue_logical_block_size(disk->queue, + h->drv[drv_index].block_size); /* Make sure all queue data is written out before */ /* setting h->drv[drv_index].queue, as setting this */ @@ -2298,7 +2298,7 @@ static int cciss_revalidate(struct gendisk *disk) cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv); - blk_queue_hardsect_size(drv->queue, drv->block_size); + blk_queue_logical_block_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); kfree(inq_buff); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index a02dcfc00f1..44fa2018f6b 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) disk->fops = &ida_fops; if (j && !drv->nr_blks) continue; - blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); + blk_queue_logical_block_size(hba[i]->queue, drv->blk_size); set_capacity(disk, drv->nr_blks); disk->queue = hba[i]->queue; disk->private_data = drv; @@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host) drv_info_t *drv = &host->drv[i]; if (i && !drv->nr_blks) continue; - blk_queue_hardsect_size(host->queue, drv->blk_size); + blk_queue_logical_block_size(host->queue, drv->blk_size); set_capacity(disk, drv->nr_blks); disk->queue = host->queue; disk->private_data = drv; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 961de56d00a..f65b3f369eb 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -724,7 +724,7 @@ static int __init hd_init(void) blk_queue_max_sectors(hd_queue, 255); init_timer(&device_timer); device_timer.function = hd_times_out; - blk_queue_hardsect_size(hd_queue, 512); + blk_queue_logical_block_size(hd_queue, 512); if (!NR_HD) { /* diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index c0cd0a03f69..60de5a01e71 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -996,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev) goto probe_err_6; } blk_queue_max_sectors(host->breq, MG_MAX_SECTS); - blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); + blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); init_timer(&host->timer); host->timer.function = mg_times_out; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index dc7a8c352da..293f5858921 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2657,7 +2657,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd) struct request_queue *q = pd->disk->queue; blk_queue_make_request(q, pkt_make_request); - blk_queue_hardsect_size(q, CD_FRAMESIZE); + blk_queue_logical_block_size(q, CD_FRAMESIZE); blk_queue_max_sectors(q, PACKET_MAX_SECTORS); blk_queue_merge_bvec(q, pkt_merge_bvec); q->queuedata = pd; diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 338cee4cc0b..aaeeb544228 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -477,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_max_sectors(queue, dev->bounce_size >> 9); blk_queue_segment_boundary(queue, -1UL); blk_queue_dma_alignment(queue, dev->blk_size-1); - blk_queue_hardsect_size(queue, dev->blk_size); + blk_queue_logical_block_size(queue, dev->blk_size); blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, ps3disk_prepare_flush); diff --git a/drivers/block/ub.c b/drivers/block/ub.c index e67bbae9547..cc54473b8e7 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -722,7 +722,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, /* * build the command * - * The call to blk_queue_hardsect_size() guarantees that request + * The call to blk_queue_logical_block_size() guarantees that request * is aligned, but it is given in terms of 512 byte units, always. */ block = blk_rq_pos(rq) >> lun->capacity.bshift; @@ -1749,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk) ub_revalidate(lun->udev, lun); /* XXX Support sector size switching like in sr.c */ - blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); + blk_queue_logical_block_size(disk->queue, lun->capacity.bsize); set_capacity(disk, lun->capacity.nsec); // set_disk_ro(sdkp->disk, lun->readonly); @@ -2324,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ blk_queue_max_sectors(q, UB_MAX_SECTORS); - blk_queue_hardsect_size(q, lun->capacity.bsize); + blk_queue_logical_block_size(q, lun->capacity.bsize); lun->disk = disk; q->queuedata = lun; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 511d4ae2d17..c4845b16946 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -347,7 +347,7 @@ static int virtblk_probe(struct virtio_device *vdev) offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) - blk_queue_hardsect_size(vblk->disk->queue, blk_size); + blk_queue_logical_block_size(vblk->disk->queue, blk_size); add_disk(vblk->disk); return 0; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 132120ae4bd..c1996829d5e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); /* Hard sector size and max sectors impersonate the equiv. hardware. */ - blk_queue_hardsect_size(rq, sector_size); + blk_queue_logical_block_size(rq, sector_size); blk_queue_max_sectors(rq, 512); /* Each segment in a request is up to an aligned page in size. */ diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 3a4397edab7..f08491a3a81 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -984,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace) ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; - blk_queue_hardsect_size(ace->queue, 512); + blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 1e366ad8f68..b5621f27c4b 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -739,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void) static int __devinit probe_gdrom_setupqueue(void) { - blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); + blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); /* using DMA so memory will need to be contiguous */ blk_queue_max_hw_segments(gd.gdrom_rq, 1); /* set a large max size to get most from DMA */ diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index f177c2d4017..0fff646cc2f 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -469,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event) case viocdopen: if (event->xRc == 0) { di = &viocd_diskinfo[bevent->disk]; - blk_queue_hardsect_size(di->viocd_disk->queue, - bevent->block_size); + blk_queue_logical_block_size(di->viocd_disk->queue, + bevent->block_size); set_capacity(di->viocd_disk, bevent->media_size * bevent->block_size / 512); diff --git a/drivers/char/raw.c b/drivers/char/raw.c index 20d90e6a6e5..db32f0e4c7d 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp) err = bd_claim(bdev, raw_open); if (err) goto out1; - err = set_blocksize(bdev, bdev_hardsect_size(bdev)); + err = set_blocksize(bdev, bdev_logical_block_size(bdev)); if (err) goto out2; filp->f_flags |= O_DIRECT; diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 1799328decf..424140c6c40 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, (sense->information[2] << 8) | (sense->information[3]); - if (drive->queue->hardsect_size == 2048) + if (queue_logical_block_size(drive->queue) == 2048) /* device sector size is 2K */ sector <<= 2; @@ -737,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) struct request_queue *q = drive->queue; int write = rq_data_dir(rq) == WRITE; unsigned short sectors_per_frame = - queue_hardsect_size(q) >> SECTOR_BITS; + queue_logical_block_size(q) >> SECTOR_BITS; ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " "secs_per_frame: %u", @@ -1021,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) /* save a private copy of the TOC capacity for error handling */ drive->probed_capacity = toc->capacity * sectors_per_frame; - blk_queue_hardsect_size(drive->queue, - sectors_per_frame << SECTOR_BITS); + blk_queue_logical_block_size(drive->queue, + sectors_per_frame << SECTOR_BITS); /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, @@ -1338,7 +1338,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) /* standard prep_rq_fn that builds 10 byte cmds */ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) { - int hard_sect = queue_hardsect_size(q); + int hard_sect = queue_logical_block_size(q); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); @@ -1543,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) nslots = ide_cdrom_probe_capabilities(drive); - blk_queue_hardsect_size(q, CD_FRAMESIZE); + blk_queue_logical_block_size(q, CD_FRAMESIZE); if (ide_cdrom_register(drive, nslots)) { printk(KERN_ERR PFX "%s: %s failed to register device with the" diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 47c68bc75a1..06b0ded1ce2 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, target = rdev->sb_start + offset + index * (PAGE_SIZE/512); if (sync_page_io(rdev->bdev, target, - roundup(size, bdev_hardsect_size(rdev->bdev)), + roundup(size, bdev_logical_block_size(rdev->bdev)), page, READ)) { page->index = index; attach_page_buffers(page, NULL); /* so that free_buffer will @@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) int size = PAGE_SIZE; if (page->index == bitmap->file_pages-1) size = roundup(bitmap->last_page_size, - bdev_hardsect_size(rdev->bdev)); + bdev_logical_block_size(rdev->bdev)); /* Just make sure we aren't corrupting data or * metadata */ diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index a2e26c24214..75d8081a904 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store, } /* Validate the chunk size against the device block size */ - if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { + if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { *error = "Chunk size is not a multiple of device blocksize"; return -EINVAL; } diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index be233bc4d91..6fa8ccf91c7 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, * Buffer holds both header and bitset. */ buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + - bitset_size, ti->limits.hardsect_size); + bitset_size, + ti->limits.logical_block_size); if (buf_size > dev->bdev->bd_inode->i_size) { DMWARN("log device %s too small: need %llu bytes", diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index e75c6dd76a9..2662a41337e 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, - bdev_hardsect_size(ps->store->cow->bdev) >> 9); + bdev_logical_block_size(ps->store->cow->bdev) >> 9); ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; chunk_size_supplied = 0; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 429b50b975d..65e2d975985 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, lhs->max_hw_segments = min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); - lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); + lhs->logical_block_size = max(lhs->logical_block_size, + rhs->logical_block_size); lhs->max_segment_size = min_not_zero(lhs->max_segment_size, rhs->max_segment_size); @@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) rs->max_hw_segments = min_not_zero(rs->max_hw_segments, q->max_hw_segments); - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); + rs->logical_block_size = max(rs->logical_block_size, + queue_logical_block_size(q)); rs->max_segment_size = min_not_zero(rs->max_segment_size, q->max_segment_size); @@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs) rs->max_phys_segments = MAX_PHYS_SEGMENTS; if (!rs->max_hw_segments) rs->max_hw_segments = MAX_HW_SEGMENTS; - if (!rs->hardsect_size) - rs->hardsect_size = 1 << SECTOR_SHIFT; + if (!rs->logical_block_size) + rs->logical_block_size = 1 << SECTOR_SHIFT; if (!rs->max_segment_size) rs->max_segment_size = MAX_SEGMENT_SIZE; if (!rs->seg_boundary_mask) @@ -914,7 +916,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) blk_queue_max_sectors(q, t->limits.max_sectors); q->max_phys_segments = t->limits.max_phys_segments; q->max_hw_segments = t->limits.max_hw_segments; - q->hardsect_size = t->limits.hardsect_size; + q->logical_block_size = t->limits.logical_block_size; q->max_segment_size = t->limits.max_segment_size; q->max_hw_sectors = t->limits.max_hw_sectors; q->seg_boundary_mask = t->limits.seg_boundary_mask; diff --git a/drivers/md/md.c b/drivers/md/md.c index fccc8343a25..4cbc19f5c30 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; - bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; if (rdev->sb_size & bmask) rdev->sb_size = (rdev->sb_size | bmask) + 1; diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index c0bebc6a2f2..7847bbc1440 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1242,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card) sprintf(msb->disk->disk_name, "mspblk%d", disk_id); - blk_queue_hardsect_size(msb->queue, msb->page_size); + blk_queue_logical_block_size(msb->queue, msb->page_size); capacity = be16_to_cpu(sys_info->user_block_count); capacity *= be16_to_cpu(sys_info->block_size); diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 6573ef4408f..335d4c78a77 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -794,8 +794,9 @@ static int i2o_block_transfer(struct request *req) if (c->adaptec) { u8 cmd[10]; u32 scsi_flags; - u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; + u16 hwsec; + hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; memset(cmd, 0, 10); sgl_offset = SGL_OFFSET_12; @@ -1078,7 +1079,7 @@ static int i2o_block_probe(struct device *dev) */ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { - blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); + blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); } else osm_warn("unable to get blocksize of %s\n", gd->disk_name); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c5df8654645..98ffc41eaf2 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) sprintf(md->disk->disk_name, "mmcblk%d", devidx); - blk_queue_hardsect_size(md->queue.queue, 512); + blk_queue_logical_block_size(md->queue.queue, 512); if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 502622f628b..aaac3b6800b 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -378,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) } tr->blkcore_priv->rq->queuedata = tr; - blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); + blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); if (tr->discard) blk_queue_set_discard(tr->blkcore_priv->rq, blktrans_discard_request); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e64f62d5e0f..27a1be0cd4d 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1990,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block) { int max; - blk_queue_hardsect_size(block->request_queue, block->bp_block); + blk_queue_logical_block_size(block->request_queue, block->bp_block); max = block->base->discipline->max_blocks << block->s2b_shift; blk_queue_max_sectors(block->request_queue, max); blk_queue_max_phys_segments(block->request_queue, -1L); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index cfdcf1aed33..a4c7ffcd998 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->private_data = dev_info; dev_info->gd->driverfs_dev = &dev_info->dev; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); - blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); + blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 76814f3e898..0ae0c83ef87 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void) goto out; } blk_queue_make_request(xpram_queues[i], xpram_make_request); - blk_queue_hardsect_size(xpram_queues[i], 4096); + blk_queue_logical_block_size(xpram_queues[i], 4096); } /* diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 1e796767598..47ff695255e 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -222,7 +222,7 @@ tapeblock_setup_device(struct tape_device * device) if (rc) goto cleanup_queue; - blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); + blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); blk_queue_max_phys_segments(blkdat->request_queue, -1L); blk_queue_max_hw_segments(blkdat->request_queue, -1L); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 40d2860f235..bcf3bd40bbd 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1510,7 +1510,7 @@ got_data: */ sector_size = 512; } - blk_queue_hardsect_size(sdp->request_queue, sector_size); + blk_queue_logical_block_size(sdp->request_queue, sector_size); { char cap_str_2[10], cap_str_10[10]; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index fddba53c7fe..cd350dfc121 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -727,7 +727,7 @@ static void get_sectorsize(struct scsi_cd *cd) } queue = cd->device->request_queue; - blk_queue_hardsect_size(queue, sector_size); + blk_queue_logical_block_size(queue, sector_size); return; } diff --git a/fs/bio.c b/fs/bio.c index 81dc93e7253..4445c382173 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1490,11 +1490,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) sector_t bio_sector_offset(struct bio *bio, unsigned short index, unsigned int offset) { - unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); + unsigned int sector_sz; struct bio_vec *bv; sector_t sectors; int i; + sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); sectors = 0; if (index >= bio->bi_idx) diff --git a/fs/block_dev.c b/fs/block_dev.c index a85fe310fc6..a29b4dcc1bc 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -76,7 +76,7 @@ int set_blocksize(struct block_device *bdev, int size) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ - if (size < bdev_hardsect_size(bdev)) + if (size < bdev_logical_block_size(bdev)) return -EINVAL; /* Don't change the size if it is same as current */ @@ -106,7 +106,7 @@ EXPORT_SYMBOL(sb_set_blocksize); int sb_min_blocksize(struct super_block *sb, int size) { - int minsize = bdev_hardsect_size(sb->s_bdev); + int minsize = bdev_logical_block_size(sb->s_bdev); if (size < minsize) size = minsize; return sb_set_blocksize(sb, size); @@ -1117,7 +1117,7 @@ EXPORT_SYMBOL(check_disk_change); void bd_set_size(struct block_device *bdev, loff_t size) { - unsigned bsize = bdev_hardsect_size(bdev); + unsigned bsize = bdev_logical_block_size(bdev); bdev->bd_inode->i_size = size; while (bsize < PAGE_CACHE_SIZE) { diff --git a/fs/buffer.c b/fs/buffer.c index aed297739eb..36e2bbc60ec 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1085,12 +1085,12 @@ static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { /* Size must be multiple of hard sectorsize */ - if (unlikely(size & (bdev_hardsect_size(bdev)-1) || + if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { printk(KERN_ERR "getblk(): invalid block size %d requested\n", size); - printk(KERN_ERR "hardsect size: %d\n", - bdev_hardsect_size(bdev)); + printk(KERN_ERR "logical block size: %d\n", + bdev_logical_block_size(bdev)); dump_stack(); return NULL; diff --git a/fs/direct-io.c b/fs/direct-io.c index 05763bbc205..8b10b87dc01 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1127,7 +1127,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, rw = WRITE_ODIRECT; if (bdev) - bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); + bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); if (offset & blocksize_mask) { if (bdev) diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 599dbfe504c..acbb94fdf90 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1696,7 +1696,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) goto failed_mount; } - hblock = bdev_hardsect_size(sb->s_bdev); + hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger @@ -2119,7 +2119,7 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, } blocksize = sb->s_blocksize; - hblock = bdev_hardsect_size(bdev); + hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { printk(KERN_ERR "EXT3-fs: blocksize too small for journal device.\n"); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2958f4e6f22..a30549f7a30 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2962,7 +2962,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, } blocksize = sb->s_blocksize; - hblock = bdev_hardsect_size(bdev); + hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { printk(KERN_ERR "EXT4-fs: blocksize too small for journal device.\n"); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 1ff9473ea75..a3b2ac989fc 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -526,11 +526,11 @@ static int init_sb(struct gfs2_sbd *sdp, int silent) } /* Set up the buffer cache and SB for real */ - if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { + if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { ret = -EINVAL; fs_err(sdp, "FS block size (%u) is too small for device " "block size (%u)\n", - sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); + sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); goto out; } if (sdp->sd_sb.sb_bsize > PAGE_SIZE) { diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 565038243fa..a971d24e10c 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -845,7 +845,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, struct super_block *sb = sdp->sd_vfs; struct block_device *bdev = sb->s_bdev; const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / - bdev_hardsect_size(sb->s_bdev); + bdev_logical_block_size(sb->s_bdev); u64 blk; sector_t start = 0; sector_t nr_sects = 0; diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 7f65b3be4aa..a91f15b8673 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -515,7 +515,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data) blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); if (sb->s_blocksize != blocksize) { - int hw_blocksize = bdev_hardsect_size(sb->s_bdev); + int hw_blocksize = bdev_logical_block_size(sb->s_bdev); if (blocksize < hw_blocksize) { printk(KERN_ERR diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index f76951dcd4a..6aa7c471353 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -25,7 +25,7 @@ #include #include #include -#include /* For bdev_hardsect_size(). */ +#include /* For bdev_logical_block_size(). */ #include #include #include @@ -2785,13 +2785,13 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) goto err_out_now; /* We support sector sizes up to the PAGE_CACHE_SIZE. */ - if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) { + if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { if (!silent) ntfs_error(sb, "Device has unsupported sector size " "(%i). The maximum supported sector " "size on this architecture is %lu " "bytes.", - bdev_hardsect_size(sb->s_bdev), + bdev_logical_block_size(sb->s_bdev), PAGE_CACHE_SIZE); goto err_out_now; } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 4f85eceab37..09cc25d0461 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1371,7 +1371,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, bdevname(reg->hr_bdev, reg->hr_dev_name); - sectsize = bdev_hardsect_size(reg->hr_bdev); + sectsize = bdev_logical_block_size(reg->hr_bdev); if (sectsize != reg->hr_block_bytes) { mlog(ML_ERROR, "blocksize %u incorrect for device, expected %d", diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 79ff8d9d37e..5c6163f5503 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -713,7 +713,7 @@ static int ocfs2_sb_probe(struct super_block *sb, *bh = NULL; /* may be > 512 */ - *sector_size = bdev_hardsect_size(sb->s_bdev); + *sector_size = bdev_logical_block_size(sb->s_bdev); if (*sector_size > OCFS2_MAX_BLOCKSIZE) { mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", *sector_size, OCFS2_MAX_BLOCKSIZE); diff --git a/fs/partitions/ibm.c b/fs/partitions/ibm.c index 46297683cd3..fc71aab0846 100644 --- a/fs/partitions/ibm.c +++ b/fs/partitions/ibm.c @@ -76,7 +76,7 @@ ibm_partition(struct parsed_partitions *state, struct block_device *bdev) Sector sect; res = 0; - blocksize = bdev_hardsect_size(bdev); + blocksize = bdev_logical_block_size(bdev); if (blocksize <= 0) goto out_exit; i_size = i_size_read(bdev->bd_inode); diff --git a/fs/partitions/msdos.c b/fs/partitions/msdos.c index 796511886f2..0028d2ef066 100644 --- a/fs/partitions/msdos.c +++ b/fs/partitions/msdos.c @@ -110,7 +110,7 @@ parse_extended(struct parsed_partitions *state, struct block_device *bdev, Sector sect; unsigned char *data; u32 this_sector, this_size; - int sector_size = bdev_hardsect_size(bdev) / 512; + int sector_size = bdev_logical_block_size(bdev) / 512; int loopct = 0; /* number of links followed without finding a data partition */ int i; @@ -415,7 +415,7 @@ static struct { int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) { - int sector_size = bdev_hardsect_size(bdev) / 512; + int sector_size = bdev_logical_block_size(bdev) / 512; Sector sect; unsigned char *data; struct partition *p; diff --git a/fs/udf/super.c b/fs/udf/super.c index 72348cc855a..0ba44107d8f 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1915,7 +1915,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { ret = udf_load_vrs(sb, &uopt, silent, &fileset); } else { - uopt.blocksize = bdev_hardsect_size(sb->s_bdev); + uopt.blocksize = bdev_logical_block_size(sb->s_bdev); ret = udf_load_vrs(sb, &uopt, silent, &fileset); if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { if (!silent) diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index e28800a9f2b..1418b916fc2 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1501,7 +1501,7 @@ xfs_setsize_buftarg_early( struct block_device *bdev) { return xfs_setsize_buftarg_flags(btp, - PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0); + PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); } int diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56ce53fce72..872b78b7a10 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -391,7 +391,7 @@ struct request_queue unsigned int max_hw_sectors; unsigned short max_phys_segments; unsigned short max_hw_segments; - unsigned short hardsect_size; + unsigned short logical_block_size; unsigned int max_segment_size; unsigned long seg_boundary_mask; @@ -901,7 +901,7 @@ extern void blk_queue_max_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); -extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); +extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); @@ -988,19 +988,19 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) -static inline int queue_hardsect_size(struct request_queue *q) +static inline unsigned short queue_logical_block_size(struct request_queue *q) { int retval = 512; - if (q && q->hardsect_size) - retval = q->hardsect_size; + if (q && q->logical_block_size) + retval = q->logical_block_size; return retval; } -static inline int bdev_hardsect_size(struct block_device *bdev) +static inline unsigned short bdev_logical_block_size(struct block_device *bdev) { - return queue_hardsect_size(bdev_get_queue(bdev)); + return queue_logical_block_size(bdev_get_queue(bdev)); } static inline int queue_dma_alignment(struct request_queue *q) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c4266..49c2362977f 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -149,7 +149,7 @@ struct io_restrictions { unsigned max_hw_sectors; unsigned max_sectors; unsigned max_segment_size; - unsigned short hardsect_size; + unsigned short logical_block_size; unsigned short max_hw_segments; unsigned short max_phys_segments; unsigned char no_cluster; /* inverted so that 0 is default */ -- cgit v1.2.3-70-g09d2 From ae03bf639a5027d27270123f5f6e3ee6a412781d Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 22 May 2009 17:17:50 -0400 Subject: block: Use accessor functions for queue limits Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/blk-barrier.c | 8 ++++---- block/blk-core.c | 16 ++++++++-------- block/blk-map.c | 4 ++-- block/blk-merge.c | 27 ++++++++++++++------------- block/blk-settings.c | 15 ++++++++++++--- block/blk-sysfs.c | 8 ++++---- block/compat_ioctl.c | 2 +- block/ioctl.c | 10 +++++----- block/scsi_ioctl.c | 8 ++++---- drivers/block/pktcdvd.c | 6 ++++-- drivers/cdrom/cdrom.c | 4 ++-- drivers/md/dm-table.c | 28 ++++++++++++++-------------- drivers/md/linear.c | 2 +- drivers/md/multipath.c | 4 ++-- drivers/md/raid0.c | 2 +- drivers/md/raid1.c | 4 ++-- drivers/md/raid10.c | 8 ++++---- drivers/md/raid5.c | 4 ++-- drivers/scsi/sg.c | 15 ++++++++------- drivers/scsi/st.c | 4 ++-- drivers/usb/storage/scsiglue.c | 4 ++-- fs/bio.c | 19 ++++++++++--------- include/linux/bio.h | 2 +- include/linux/blkdev.h | 36 ++++++++++++++++++++++++++++++++++++ mm/bounce.c | 4 ++-- 25 files changed, 147 insertions(+), 97 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 0d98054cdbd..30022b4e2f6 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev, bio->bi_sector = sector; - if (nr_sects > q->max_hw_sectors) { - bio->bi_size = q->max_hw_sectors << 9; - nr_sects -= q->max_hw_sectors; - sector += q->max_hw_sectors; + if (nr_sects > queue_max_hw_sectors(q)) { + bio->bi_size = queue_max_hw_sectors(q) << 9; + nr_sects -= queue_max_hw_sectors(q); + sector += queue_max_hw_sectors(q); } else { bio->bi_size = nr_sects << 9; nr_sects = 0; diff --git a/block/blk-core.c b/block/blk-core.c index 59c4af52311..7a4c40184a6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; } - if (unlikely(nr_sectors > q->max_hw_sectors)) { + if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", - bdevname(bio->bi_bdev, b), - bio_sectors(bio), - q->max_hw_sectors); + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); goto end_io; } @@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio); */ int blk_rq_check_limits(struct request_queue *q, struct request *rq) { - if (blk_rq_sectors(rq) > q->max_sectors || - blk_rq_bytes(rq) > q->max_hw_sectors << 9) { + if (blk_rq_sectors(rq) > queue_max_sectors(q) || + blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { printk(KERN_ERR "%s: over max size limit.\n", __func__); return -EIO; } @@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) * limitation. */ blk_recalc_rq_segments(rq); - if (rq->nr_phys_segments > q->max_phys_segments || - rq->nr_phys_segments > q->max_hw_segments) { + if (rq->nr_phys_segments > queue_max_phys_segments(q) || + rq->nr_phys_segments > queue_max_hw_segments(q)) { printk(KERN_ERR "%s: over max segments limit.\n", __func__); return -EIO; } diff --git a/block/blk-map.c b/block/blk-map.c index ef2492adca7..9083cf0180c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, struct bio *bio = NULL; int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len) return -EINVAL; @@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, struct bio *bio; int ret; - if (len > (q->max_hw_sectors << 9)) + if (len > (queue_max_hw_sectors(q) << 9)) return -EINVAL; if (!len || !kbuf) return -EINVAL; diff --git a/block/blk-merge.c b/block/blk-merge.c index 4974dd5767e..39ce64432ba 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, * never considered part of another segment, since that * might change with the bounce page. */ - high = page_to_pfn(bv->bv_page) > q->bounce_pfn; + high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); if (high || highprv) goto new_segment; if (cluster) { - if (seg_size + bv->bv_len > q->max_segment_size) + if (seg_size + bv->bv_len + > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) goto new_segment; @@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return 0; if (bio->bi_seg_back_size + nxt->bi_seg_front_size > - q->max_segment_size) + queue_max_segment_size(q)) return 0; if (!bio_has_data(bio)) @@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, int nbytes = bvec->bv_len; if (bvprv && cluster) { - if (sg->length + nbytes > q->max_segment_size) + if (sg->length + nbytes > queue_max_segment_size(q)) goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) @@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q, { int nr_phys_segs = bio_phys_segments(q, bio); - if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments - || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { + if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || + req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; @@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, unsigned short max_sectors; if (unlikely(blk_pc_request(req))) - max_sectors = q->max_hw_sectors; + max_sectors = queue_max_hw_sectors(q); else - max_sectors = q->max_sectors; + max_sectors = queue_max_sectors(q); if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { req->cmd_flags |= REQ_NOMERGE; @@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, unsigned short max_sectors; if (unlikely(blk_pc_request(req))) - max_sectors = q->max_hw_sectors; + max_sectors = queue_max_hw_sectors(q); else - max_sectors = q->max_sectors; + max_sectors = queue_max_sectors(q); if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { @@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, /* * Will it become too large? */ - if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) return 0; total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; @@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, total_phys_segments--; } - if (total_phys_segments > q->max_phys_segments) + if (total_phys_segments > queue_max_phys_segments(q)) return 0; - if (total_phys_segments > q->max_hw_segments) + if (total_phys_segments > queue_max_hw_segments(q)) return 0; /* Merge is OK... */ diff --git a/block/blk-settings.c b/block/blk-settings.c index 15c3164537b..0b32f984eed 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) } EXPORT_SYMBOL(blk_queue_max_sectors); +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) +{ + if (BLK_DEF_MAX_SECTORS > max_sectors) + q->max_hw_sectors = BLK_DEF_MAX_SECTORS; + else + q->max_hw_sectors = max_sectors; +} +EXPORT_SYMBOL(blk_queue_max_hw_sectors); + /** * blk_queue_max_phys_segments - set max phys segments for a request for this queue * @q: the request queue for the device @@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q, dma_drain_needed_fn *dma_drain_needed, void *buf, unsigned int size) { - if (q->max_hw_segments < 2 || q->max_phys_segments < 2) + if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) return -EINVAL; /* make room for appending the drain */ - --q->max_hw_segments; - --q->max_phys_segments; + blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); + blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); q->dma_drain_needed = dma_drain_needed; q->dma_drain_buffer = buf; q->dma_drain_size = size; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 13d38b7e4d0..142a4acddd4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) { - int max_sectors_kb = q->max_sectors >> 1; + int max_sectors_kb = queue_max_sectors(q) >> 1; return queue_var_show(max_sectors_kb, (page)); } @@ -109,7 +109,7 @@ static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, - max_hw_sectors_kb = q->max_hw_sectors >> 1, + max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, page_kb = 1 << (PAGE_CACHE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); @@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) return -EINVAL; spin_lock_irq(q->queue_lock); - q->max_sectors = max_sectors_kb << 1; + blk_queue_max_sectors(q, max_sectors_kb << 1); spin_unlock_irq(q->queue_lock); return ret; @@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) { - int max_hw_sectors_kb = q->max_hw_sectors >> 1; + int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; return queue_var_show(max_hw_sectors_kb, (page)); } diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 9eaa1940273..df18a156d01 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) return compat_put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: return compat_put_ushort(arg, - bdev_get_queue(bdev)->max_sectors); + queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: /* compatible, but no compat_ptr (!) */ case BLKFRASET: if (!capable(CAP_SYS_ADMIN)) diff --git a/block/ioctl.c b/block/ioctl.c index 7aa97f65da8..500e4c73cc5 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, bio->bi_private = &wait; bio->bi_sector = start; - if (len > q->max_hw_sectors) { - bio->bi_size = q->max_hw_sectors << 9; - len -= q->max_hw_sectors; - start += q->max_hw_sectors; + if (len > queue_max_hw_sectors(q)) { + bio->bi_size = queue_max_hw_sectors(q) << 9; + len -= queue_max_hw_sectors(q); + start += queue_max_hw_sectors(q); } else { bio->bi_size = len << 9; len = 0; @@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKSSZGET: /* get block device hardware sector size */ return put_int(arg, bdev_logical_block_size(bdev)); case BLKSECTGET: - return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); + return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: case BLKFRASET: if(!capable(CAP_SYS_ADMIN)) diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index a9670dd4b5d..5f8e798ede4 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p) static int sg_get_reserved_size(struct request_queue *q, int __user *p) { - unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); + unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9); return put_user(val, p); } @@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p) if (size < 0) return -EINVAL; - if (size > (q->max_sectors << 9)) - size = q->max_sectors << 9; + if (size > (queue_max_sectors(q) << 9)) + size = queue_max_sectors(q) << 9; q->sg_reserved_size = size; return 0; @@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, if (hdr->cmd_len > BLK_MAX_CDB) return -EINVAL; - if (hdr->dxfer_len > (q->max_hw_sectors << 9)) + if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) return -EIO; if (hdr->dxfer_len) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 293f5858921..d57f1175948 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) */ static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) { - if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { + if ((pd->settings.size << 9) / CD_FRAMESIZE + <= queue_max_phys_segments(q)) { /* * The cdrom device can handle one segment/frame */ clear_bit(PACKET_MERGE_SEGS, &pd->flags); return 0; - } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { + } else if ((pd->settings.size << 9) / PAGE_SIZE + <= queue_max_phys_segments(q)) { /* * We can handle this case at the expense of some extra memory * copies during write operations diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index cceace61ef2..71d1b9bab70 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, nr = nframes; if (cdi->cdda_method == CDDA_BPC_SINGLE) nr = 1; - if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) - nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; + if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9)) + nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW; len = nr * CD_FRAMESIZE_RAW; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 65e2d975985..e9a73bb242b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) * combine_restrictions_low() */ rs->max_sectors = - min_not_zero(rs->max_sectors, q->max_sectors); + min_not_zero(rs->max_sectors, queue_max_sectors(q)); /* * Check if merge fn is supported. @@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) rs->max_phys_segments = min_not_zero(rs->max_phys_segments, - q->max_phys_segments); + queue_max_phys_segments(q)); rs->max_hw_segments = - min_not_zero(rs->max_hw_segments, q->max_hw_segments); + min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); rs->logical_block_size = max(rs->logical_block_size, queue_logical_block_size(q)); rs->max_segment_size = - min_not_zero(rs->max_segment_size, q->max_segment_size); + min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); rs->max_hw_sectors = - min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); + min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); rs->seg_boundary_mask = min_not_zero(rs->seg_boundary_mask, - q->seg_boundary_mask); + queue_segment_boundary(q)); - rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); + rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); } @@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) * restrictions. */ blk_queue_max_sectors(q, t->limits.max_sectors); - q->max_phys_segments = t->limits.max_phys_segments; - q->max_hw_segments = t->limits.max_hw_segments; - q->logical_block_size = t->limits.logical_block_size; - q->max_segment_size = t->limits.max_segment_size; - q->max_hw_sectors = t->limits.max_hw_sectors; - q->seg_boundary_mask = t->limits.seg_boundary_mask; - q->bounce_pfn = t->limits.bounce_pfn; + blk_queue_max_phys_segments(q, t->limits.max_phys_segments); + blk_queue_max_hw_segments(q, t->limits.max_hw_segments); + blk_queue_logical_block_size(q, t->limits.logical_block_size); + blk_queue_max_segment_size(q, t->limits.max_segment_size); + blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); + blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); + blk_queue_bounce_limit(q, t->limits.bounce_pfn); if (t->limits.no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 7a36e38393a..64f1f3e046e 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) * a one page request is never in violation. */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->num_sectors = rdev->sectors; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 41ced0cbe82..4ee31aa13c4 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) * merge_bvec_fn will be involved in multipath.) */ if (q->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(q) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); conf->working_disks++; @@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev) * violating it, not that we ever expect a device with * a merge_bvec_fn to be involved in multipath */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); if (!test_bit(Faulty, &rdev->flags)) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c08d7559be5..925507e7d67 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev) */ if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); if (!smallest || (rdev1->sectors < smallest->sectors)) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 36df9109cde..e23758b4a34 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) * a one page request is never in violation. */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); p->head_position = 0; @@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev) * a one page request is never in violation. */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 499620afb44..750550c1166 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) * a one page request is never in violation. */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) - mddev->queue->max_sectors = (PAGE_SIZE>>9); + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); p->head_position = 0; rdev->raid_disk = mirror; @@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev) * a one page request is never in violation. */ if (rdev->bdev->bd_disk->queue->merge_bvec_fn && - mddev->queue->max_sectors > (PAGE_SIZE>>9)) - mddev->queue->max_sectors = (PAGE_SIZE>>9); + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); disk->head_position = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4616bc3a6e7..7970dc8c522 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi) { struct request_queue *q = bdev_get_queue(bi->bi_bdev); - if ((bi->bi_size>>9) > q->max_sectors) + if ((bi->bi_size>>9) > queue_max_sectors(q)) return 0; blk_recount_segments(q, bi); - if (bi->bi_phys_segments > q->max_phys_segments) + if (bi->bi_phys_segments > queue_max_phys_segments(q)) return 0; if (q->merge_bvec_fn) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0fc2c0ae769..9bd407fa98e 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp) if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; - sdp->sg_tablesize = min(q->max_hw_segments, - q->max_phys_segments); + sdp->sg_tablesize = min(queue_max_hw_segments(q), + queue_max_phys_segments(q)); } if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; @@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp, if (val < 0) return -EINVAL; val = min_t(int, val, - sdp->device->request_queue->max_sectors * 512); + queue_max_sectors(sdp->device->request_queue) * 512); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; @@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp, return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, - sdp->device->request_queue->max_sectors * 512); + queue_max_sectors(sdp->device->request_queue) * 512); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); @@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp, return -ENODEV; return scsi_ioctl(sdp->device, cmd_in, p); case BLKSECTGET: - return put_user(sdp->device->request_queue->max_sectors * 512, + return put_user(queue_max_sectors(sdp->device->request_queue) * 512, ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, @@ -1377,7 +1377,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) sdp->device = scsidp; INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->o_excl_wait); - sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); + sdp->sg_tablesize = min(queue_max_hw_segments(q), + queue_max_phys_segments(q)); sdp->index = k; kref_init(&sdp->d_ref); @@ -2055,7 +2056,7 @@ sg_add_sfp(Sg_device * sdp, int dev) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, - sdp->device->request_queue->max_sectors * 512); + queue_max_sectors(sdp->device->request_queue) * 512); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 8681b708344..89bd438e1fe 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev) return -ENODEV; } - i = min(SDp->request_queue->max_hw_segments, - SDp->request_queue->max_phys_segments); + i = min(queue_max_hw_segments(SDp->request_queue), + queue_max_phys_segments(SDp->request_queue)); if (st_max_sg_segs < i) i = st_max_sg_segs; buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 4ca3b586064..cfa26d56ce6 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev) if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_CACHE_SIZE >> 9; - if (sdev->request_queue->max_sectors > max_sectors) + if (queue_max_sectors(sdev->request_queue) > max_sectors) blk_queue_max_sectors(sdev->request_queue, max_sectors); } else if (sdev->type == TYPE_TAPE) { @@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att { struct scsi_device *sdev = to_scsi_device(dev); - return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); + return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); } /* Input routine for the sysfs max_sectors file */ diff --git a/fs/bio.c b/fs/bio.c index 4445c382173..ab423a1024a 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -499,11 +499,11 @@ int bio_get_nr_vecs(struct block_device *bdev) struct request_queue *q = bdev_get_queue(bdev); int nr_pages; - nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (nr_pages > q->max_phys_segments) - nr_pages = q->max_phys_segments; - if (nr_pages > q->max_hw_segments) - nr_pages = q->max_hw_segments; + nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (nr_pages > queue_max_phys_segments(q)) + nr_pages = queue_max_phys_segments(q); + if (nr_pages > queue_max_hw_segments(q)) + nr_pages = queue_max_hw_segments(q); return nr_pages; } @@ -562,8 +562,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * make this too complex. */ - while (bio->bi_phys_segments >= q->max_phys_segments - || bio->bi_phys_segments >= q->max_hw_segments) { + while (bio->bi_phys_segments >= queue_max_phys_segments(q) + || bio->bi_phys_segments >= queue_max_hw_segments(q)) { if (retried_segments) return 0; @@ -634,7 +634,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); + return __bio_add_page(q, bio, page, len, offset, + queue_max_hw_sectors(q)); } /** @@ -654,7 +655,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); - return __bio_add_page(q, bio, page, len, offset, q->max_sectors); + return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); } struct bio_map_data { diff --git a/include/linux/bio.h b/include/linux/bio.h index d30ec6f30dd..12737be5860 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ (((addr1) | (mask)) == (((addr2) - 1) | (mask))) #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ - __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) #define BIO_SEG_BOUNDARY(q, b1, b2) \ BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 872b78b7a10..29b48f7b4ba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -898,6 +898,7 @@ extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_max_sectors(struct request_queue *, unsigned int); +extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); @@ -988,6 +989,41 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) +static inline unsigned long queue_bounce_pfn(struct request_queue *q) +{ + return q->bounce_pfn; +} + +static inline unsigned long queue_segment_boundary(struct request_queue *q) +{ + return q->seg_boundary_mask; +} + +static inline unsigned int queue_max_sectors(struct request_queue *q) +{ + return q->max_sectors; +} + +static inline unsigned int queue_max_hw_sectors(struct request_queue *q) +{ + return q->max_hw_sectors; +} + +static inline unsigned short queue_max_hw_segments(struct request_queue *q) +{ + return q->max_hw_segments; +} + +static inline unsigned short queue_max_phys_segments(struct request_queue *q) +{ + return q->max_phys_segments; +} + +static inline unsigned int queue_max_segment_size(struct request_queue *q) +{ + return q->max_segment_size; +} + static inline unsigned short queue_logical_block_size(struct request_queue *q) { int retval = 512; diff --git a/mm/bounce.c b/mm/bounce.c index e590272fe7a..8dcd4315e01 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -192,7 +192,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, /* * is destination page below bounce pfn? */ - if (page_to_pfn(page) <= q->bounce_pfn) + if (page_to_pfn(page) <= queue_bounce_pfn(q)) continue; /* @@ -284,7 +284,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) * don't waste time iterating over bio segments */ if (!(q->bounce_gfp & GFP_DMA)) { - if (q->bounce_pfn >= blk_max_pfn) + if (queue_bounce_pfn(q) >= blk_max_pfn) return; pool = page_pool; } else { -- cgit v1.2.3-70-g09d2 From 025146e13b63483add912706c101fb0fb6f015cc Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 22 May 2009 17:17:51 -0400 Subject: block: Move queue limits to an embedded struct To accommodate stacking drivers that do not have an associated request queue we're moving the limits to a separate, embedded structure. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/blk-settings.c | 55 +++++++++++++++++++++++++++++++------------------- include/linux/blkdev.h | 44 +++++++++++++++++++++++----------------- 2 files changed, 60 insertions(+), 39 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-settings.c b/block/blk-settings.c index 0b32f984eed..b0f547cecfb 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) */ if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; - q->bounce_pfn = max_low_pfn; + q->limits.bounce_pfn = max_low_pfn; #else if (b_pfn < blk_max_low_pfn) dma = 1; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; #endif if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; - q->bounce_pfn = b_pfn; + q->limits.bounce_pfn = b_pfn; } } EXPORT_SYMBOL(blk_queue_bounce_limit); @@ -211,10 +211,10 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) } if (BLK_DEF_MAX_SECTORS > max_sectors) - q->max_hw_sectors = q->max_sectors = max_sectors; + q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; else { - q->max_sectors = BLK_DEF_MAX_SECTORS; - q->max_hw_sectors = max_sectors; + q->limits.max_sectors = BLK_DEF_MAX_SECTORS; + q->limits.max_hw_sectors = max_sectors; } } EXPORT_SYMBOL(blk_queue_max_sectors); @@ -222,9 +222,9 @@ EXPORT_SYMBOL(blk_queue_max_sectors); void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) { if (BLK_DEF_MAX_SECTORS > max_sectors) - q->max_hw_sectors = BLK_DEF_MAX_SECTORS; + q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; else - q->max_hw_sectors = max_sectors; + q->limits.max_hw_sectors = max_sectors; } EXPORT_SYMBOL(blk_queue_max_hw_sectors); @@ -247,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q, __func__, max_segments); } - q->max_phys_segments = max_segments; + q->limits.max_phys_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_phys_segments); @@ -271,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q, __func__, max_segments); } - q->max_hw_segments = max_segments; + q->limits.max_hw_segments = max_segments; } EXPORT_SYMBOL(blk_queue_max_hw_segments); @@ -292,7 +292,7 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) __func__, max_size); } - q->max_segment_size = max_size; + q->limits.max_segment_size = max_size; } EXPORT_SYMBOL(blk_queue_max_segment_size); @@ -308,7 +308,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); **/ void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) { - q->logical_block_size = size; + q->limits.logical_block_size = size; } EXPORT_SYMBOL(blk_queue_logical_block_size); @@ -325,14 +325,27 @@ EXPORT_SYMBOL(blk_queue_logical_block_size); void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { /* zero is "infinity" */ - t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); - t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); - t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); - - t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); - t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); - t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); - t->logical_block_size = max(t->logical_block_size, b->logical_block_size); + t->limits.max_sectors = min_not_zero(queue_max_sectors(t), + queue_max_sectors(b)); + + t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), + queue_max_hw_sectors(b)); + + t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), + queue_segment_boundary(b)); + + t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), + queue_max_phys_segments(b)); + + t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), + queue_max_hw_segments(b)); + + t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), + queue_max_segment_size(b)); + + t->limits.logical_block_size = max(queue_logical_block_size(t), + queue_logical_block_size(b)); + if (!t->queue_lock) WARN_ON_ONCE(1); else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { @@ -430,7 +443,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) __func__, mask); } - q->seg_boundary_mask = mask; + q->limits.seg_boundary_mask = mask; } EXPORT_SYMBOL(blk_queue_segment_boundary); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 29b48f7b4ba..b7bb6fdba12 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -307,6 +307,21 @@ struct blk_cmd_filter { struct kobject kobj; }; +struct queue_limits { + unsigned long bounce_pfn; + unsigned long seg_boundary_mask; + + unsigned int max_hw_sectors; + unsigned int max_sectors; + unsigned int max_segment_size; + + unsigned short logical_block_size; + unsigned short max_hw_segments; + unsigned short max_phys_segments; + + unsigned char no_cluster; +}; + struct request_queue { /* @@ -358,7 +373,6 @@ struct request_queue /* * queue needs bounce pages for pages above this limit */ - unsigned long bounce_pfn; gfp_t bounce_gfp; /* @@ -387,14 +401,6 @@ struct request_queue unsigned int nr_congestion_off; unsigned int nr_batching; - unsigned int max_sectors; - unsigned int max_hw_sectors; - unsigned short max_phys_segments; - unsigned short max_hw_segments; - unsigned short logical_block_size; - unsigned int max_segment_size; - - unsigned long seg_boundary_mask; void *dma_drain_buffer; unsigned int dma_drain_size; unsigned int dma_pad_mask; @@ -410,6 +416,8 @@ struct request_queue struct timer_list timeout; struct list_head timeout_list; + struct queue_limits limits; + /* * sg stuff */ @@ -991,45 +999,45 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); static inline unsigned long queue_bounce_pfn(struct request_queue *q) { - return q->bounce_pfn; + return q->limits.bounce_pfn; } static inline unsigned long queue_segment_boundary(struct request_queue *q) { - return q->seg_boundary_mask; + return q->limits.seg_boundary_mask; } static inline unsigned int queue_max_sectors(struct request_queue *q) { - return q->max_sectors; + return q->limits.max_sectors; } static inline unsigned int queue_max_hw_sectors(struct request_queue *q) { - return q->max_hw_sectors; + return q->limits.max_hw_sectors; } static inline unsigned short queue_max_hw_segments(struct request_queue *q) { - return q->max_hw_segments; + return q->limits.max_hw_segments; } static inline unsigned short queue_max_phys_segments(struct request_queue *q) { - return q->max_phys_segments; + return q->limits.max_phys_segments; } static inline unsigned int queue_max_segment_size(struct request_queue *q) { - return q->max_segment_size; + return q->limits.max_segment_size; } static inline unsigned short queue_logical_block_size(struct request_queue *q) { int retval = 512; - if (q && q->logical_block_size) - retval = q->logical_block_size; + if (q && q->limits.logical_block_size) + retval = q->limits.logical_block_size; return retval; } -- cgit v1.2.3-70-g09d2 From c72758f33784e5e2a1a4bb9421ef3e6de8f9fcf3 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 22 May 2009 17:17:53 -0400 Subject: block: Export I/O topology for block devices and partitions To support devices with physical block sizes bigger than 512 bytes we need to ensure proper alignment. This patch adds support for exposing I/O topology characteristics as devices are stacked. logical_block_size is the smallest unit the device can address. physical_block_size indicates the smallest I/O the device can write without incurring a read-modify-write penalty. The io_min parameter is the smallest preferred I/O size reported by the device. In many cases this is the same as the physical block size. However, the io_min parameter can be scaled up when stacking (RAID5 chunk size > physical block size). The io_opt characteristic indicates the optimal I/O size reported by the device. This is usually the stripe width for arrays. The alignment_offset parameter indicates the number of bytes the start of the device/partition is offset from the device's natural alignment. Partition tools and MD/DM utilities can use this to pad their offsets so filesystems start on proper boundaries. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- Documentation/ABI/testing/sysfs-block | 59 +++++++++++ block/blk-settings.c | 186 ++++++++++++++++++++++++++++++++++ block/blk-sysfs.c | 33 ++++++ block/genhd.c | 11 ++ fs/partitions/check.c | 10 ++ include/linux/blkdev.h | 47 +++++++++ include/linux/genhd.h | 1 + 7 files changed, 347 insertions(+) (limited to 'include/linux/blkdev.h') diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index 44f52a4f590..cbbd3e06994 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block @@ -60,3 +60,62 @@ Description: Indicates whether the block layer should automatically generate checksums for write requests bound for devices that support receiving integrity metadata. + +What: /sys/block//alignment_offset +Date: April 2009 +Contact: Martin K. Petersen +Description: + Storage devices may report a physical block size that is + bigger than the logical block size (for instance a drive + with 4KB physical sectors exposing 512-byte logical + blocks to the operating system). This parameter + indicates how many bytes the beginning of the device is + offset from the disk's natural alignment. + +What: /sys/block///alignment_offset +Date: April 2009 +Contact: Martin K. Petersen +Description: + Storage devices may report a physical block size that is + bigger than the logical block size (for instance a drive + with 4KB physical sectors exposing 512-byte logical + blocks to the operating system). This parameter + indicates how many bytes the beginning of the partition + is offset from the disk's natural alignment. + +What: /sys/block//queue/logical_block_size +Date: May 2009 +Contact: Martin K. Petersen +Description: + This is the smallest unit the storage device can + address. It is typically 512 bytes. + +What: /sys/block//queue/physical_block_size +Date: May 2009 +Contact: Martin K. Petersen +Description: + This is the smallest unit the storage device can write + without resorting to read-modify-write operation. It is + usually the same as the logical block size but may be + bigger. One example is SATA drives with 4KB sectors + that expose a 512-byte logical block size to the + operating system. + +What: /sys/block//queue/minimum_io_size +Date: April 2009 +Contact: Martin K. Petersen +Description: + Storage devices may report a preferred minimum I/O size, + which is the smallest request the device can perform + without incurring a read-modify-write penalty. For disk + drives this is often the physical block size. For RAID + arrays it is often the stripe chunk size. + +What: /sys/block//queue/optimal_io_size +Date: April 2009 +Contact: Martin K. Petersen +Description: + Storage devices may report an optimal I/O size, which is + the device's preferred unit of receiving I/O. This is + rarely reported for disk drives. For RAID devices it is + usually the stripe width or the internal block size. diff --git a/block/blk-settings.c b/block/blk-settings.c index b0f547cecfb..5649f34adb4 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -309,9 +309,94 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) { q->limits.logical_block_size = size; + + if (q->limits.physical_block_size < size) + q->limits.physical_block_size = size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; } EXPORT_SYMBOL(blk_queue_logical_block_size); +/** + * blk_queue_physical_block_size - set physical block size for the queue + * @q: the request queue for the device + * @size: the physical block size, in bytes + * + * Description: + * This should be set to the lowest possible sector size that the + * hardware can operate on without reverting to read-modify-write + * operations. + */ +void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.physical_block_size = size; + + if (q->limits.physical_block_size < q->limits.logical_block_size) + q->limits.physical_block_size = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_physical_block_size); + +/** + * blk_queue_alignment_offset - set physical block alignment offset + * @q: the request queue for the device + * @alignment: alignment offset in bytes + * + * Description: + * Some devices are naturally misaligned to compensate for things like + * the legacy DOS partition table 63-sector offset. Low-level drivers + * should call this function for devices whose first sector is not + * naturally aligned. + */ +void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) +{ + q->limits.alignment_offset = + offset & (q->limits.physical_block_size - 1); + q->limits.misaligned = 0; +} +EXPORT_SYMBOL(blk_queue_alignment_offset); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @io_min: smallest I/O size in bytes + * + * Description: + * Some devices have an internal block size bigger than the reported + * hardware sector size. This function can be used to signal the + * smallest I/O the device can perform without incurring a performance + * penalty. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + q->limits.io_min = min; + + if (q->limits.io_min < q->limits.logical_block_size) + q->limits.io_min = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_io_min); + +/** + * blk_queue_io_opt - set optimal request size for the queue + * @q: the request queue for the device + * @io_opt: optimal request size in bytes + * + * Description: + * Drivers can call this function to set the preferred I/O request + * size for devices that report such a value. + */ +void blk_queue_io_opt(struct request_queue *q, unsigned int opt) +{ + q->limits.io_opt = opt; +} +EXPORT_SYMBOL(blk_queue_io_opt); + /* * Returns the minimum that is _not_ zero, unless both are zero. */ @@ -357,6 +442,107 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) } EXPORT_SYMBOL(blk_queue_stack_limits); +/** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top) + * @bdev: the underlying queue limits (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges two queue_limit structs. Returns 0 if alignment didn't + * change. Returns -1 if adding the bottom device caused + * misalignment. + */ +int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset) +{ + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, + b->seg_boundary_mask); + + t->max_phys_segments = min_not_zero(t->max_phys_segments, + b->max_phys_segments); + + t->max_hw_segments = min_not_zero(t->max_hw_segments, + b->max_hw_segments); + + t->max_segment_size = min_not_zero(t->max_segment_size, + b->max_segment_size); + + t->logical_block_size = max(t->logical_block_size, + b->logical_block_size); + + t->physical_block_size = max(t->physical_block_size, + b->physical_block_size); + + t->io_min = max(t->io_min, b->io_min); + t->no_cluster |= b->no_cluster; + + /* Bottom device offset aligned? */ + if (offset && + (offset & (b->physical_block_size - 1)) != b->alignment_offset) { + t->misaligned = 1; + return -1; + } + + /* If top has no alignment offset, inherit from bottom */ + if (!t->alignment_offset) + t->alignment_offset = + b->alignment_offset & (b->physical_block_size - 1); + + /* Top device aligned on logical block boundary? */ + if (t->alignment_offset & (t->logical_block_size - 1)) { + t->misaligned = 1; + return -1; + } + + return 0; +} + +/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @t: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges the limits for two queues. Returns 0 if alignment + * didn't change. Returns -1 if adding the bottom device caused + * misalignment. + */ +void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset) +{ + struct request_queue *t = disk->queue; + struct request_queue *b = bdev_get_queue(bdev); + + offset += get_start_sect(bdev) << 9; + + if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; + + disk_name(disk, 0, top); + bdevname(bdev, bottom); + + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } + + if (!t->queue_lock) + WARN_ON_ONCE(1); + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { + unsigned long flags; + + spin_lock_irqsave(t->queue_lock, flags); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); + spin_unlock_irqrestore(t->queue_lock, flags); + } +} +EXPORT_SYMBOL(disk_stack_limits); + /** * blk_queue_dma_pad - set pad mask * @q: the request queue for the device diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3ccdadb8e20..9337e17f911 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -105,6 +105,21 @@ static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page return queue_var_show(queue_logical_block_size(q), page); } +static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_physical_block_size(q), page); +} + +static ssize_t queue_io_min_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_min(q), page); +} + +static ssize_t queue_io_opt_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_opt(q), page); +} + static ssize_t queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { @@ -257,6 +272,21 @@ static struct queue_sysfs_entry queue_logical_block_size_entry = { .show = queue_logical_block_size_show, }; +static struct queue_sysfs_entry queue_physical_block_size_entry = { + .attr = {.name = "physical_block_size", .mode = S_IRUGO }, + .show = queue_physical_block_size_show, +}; + +static struct queue_sysfs_entry queue_io_min_entry = { + .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, + .show = queue_io_min_show, +}; + +static struct queue_sysfs_entry queue_io_opt_entry = { + .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, + .show = queue_io_opt_show, +}; + static struct queue_sysfs_entry queue_nonrot_entry = { .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, .show = queue_nonrot_show, @@ -289,6 +319,9 @@ static struct attribute *default_attrs[] = { &queue_iosched_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, + &queue_physical_block_size_entry.attr, + &queue_io_min_entry.attr, + &queue_io_opt_entry.attr, &queue_nonrot_entry.attr, &queue_nomerges_entry.attr, &queue_rq_affinity_entry.attr, diff --git a/block/genhd.c b/block/genhd.c index 1a4916e0173..fe7ccc0a618 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev, return sprintf(buf, "%x\n", disk->flags); } +static ssize_t disk_alignment_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); +} + static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST @@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_removable.attr, &dev_attr_ro.attr, &dev_attr_size.attr, + &dev_attr_alignment_offset.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 99e33ef40be..0af36085eb2 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -219,6 +219,13 @@ ssize_t part_size_show(struct device *dev, return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); } +ssize_t part_alignment_offset_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); +} + ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -272,6 +279,7 @@ ssize_t part_fail_store(struct device *dev, static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = @@ -282,6 +290,7 @@ static struct attribute *part_attrs[] = { &dev_attr_partition.attr, &dev_attr_start.attr, &dev_attr_size.attr, + &dev_attr_alignment_offset.attr, &dev_attr_stat.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, @@ -383,6 +392,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, pdev = part_to_dev(p); p->start_sect = start; + p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b7bb6fdba12..5e740a135e7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -314,11 +314,16 @@ struct queue_limits { unsigned int max_hw_sectors; unsigned int max_sectors; unsigned int max_segment_size; + unsigned int physical_block_size; + unsigned int alignment_offset; + unsigned int io_min; + unsigned int io_opt; unsigned short logical_block_size; unsigned short max_hw_segments; unsigned short max_phys_segments; + unsigned char misaligned; unsigned char no_cluster; }; @@ -911,6 +916,15 @@ extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_alignment_offset(struct request_queue *q, + unsigned int alignment); +extern void blk_queue_io_min(struct request_queue *q, unsigned int min); +extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); +extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t offset); +extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_dma_pad(struct request_queue *, unsigned int); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); @@ -1047,6 +1061,39 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev) return queue_logical_block_size(bdev_get_queue(bdev)); } +static inline unsigned int queue_physical_block_size(struct request_queue *q) +{ + return q->limits.physical_block_size; +} + +static inline unsigned int queue_io_min(struct request_queue *q) +{ + return q->limits.io_min; +} + +static inline unsigned int queue_io_opt(struct request_queue *q) +{ + return q->limits.io_opt; +} + +static inline int queue_alignment_offset(struct request_queue *q) +{ + if (q && q->limits.misaligned) + return -1; + + if (q && q->limits.alignment_offset) + return q->limits.alignment_offset; + + return 0; +} + +static inline int queue_sector_alignment_offset(struct request_queue *q, + sector_t sector) +{ + return ((sector << 9) - q->limits.alignment_offset) + & (q->limits.io_min - 1); +} + static inline int queue_dma_alignment(struct request_queue *q) { return q ? q->dma_alignment : 511; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23..149fda264c8 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -90,6 +90,7 @@ struct disk_stats { struct hd_struct { sector_t start_sect; sector_t nr_sects; + sector_t alignment_offset; struct device __dev; struct kobject *holder_dir; int policy, partno; -- cgit v1.2.3-70-g09d2 From a05c0205ba031c01bba33a21bf0a35920eb64833 Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Wed, 3 Jun 2009 09:33:18 +0200 Subject: block: Fix bounce limit setting in DM blk_queue_bounce_limit() is more than a wrapper about the request queue limits.bounce_pfn variable. Introduce blk_queue_bounce_pfn() which can be called by stacking drivers that wish to set the bounce limit explicitly. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/blk-settings.c | 17 +++++++++++++++++ drivers/md/dm-table.c | 2 +- include/linux/blkdev.h | 1 + 3 files changed, 19 insertions(+), 1 deletion(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-settings.c b/block/blk-settings.c index 8d339349289..9acd0b7e802 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -193,6 +193,23 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) } EXPORT_SYMBOL(blk_queue_bounce_limit); +/** + * blk_queue_bounce_pfn - set the bounce buffer limit for queue + * @q: the request queue for the device + * @pfn: max address + * + * Description: + * This function is similar to blk_queue_bounce_limit except it + * neither changes allocation flags, nor does it set up the ISA DMA + * pool. This function should only be used by stacking drivers. + * Hardware drivers should use blk_queue_bounce_limit instead. + */ +void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn) +{ + q->limits.bounce_pfn = pfn; +} +EXPORT_SYMBOL(blk_queue_bounce_pfn); + /** * blk_queue_max_sectors - set max sectors for a request for this queue * @q: the request queue for the device diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e9a73bb242b..3ca1604ddd5 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) blk_queue_max_segment_size(q, t->limits.max_segment_size); blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); - blk_queue_bounce_limit(q, t->limits.bounce_pfn); + blk_queue_bounce_pfn(q, t->limits.bounce_pfn); if (t->limits.no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5e740a135e7..989aa1790f4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -910,6 +910,7 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); +extern void blk_queue_bounce_pfn(struct request_queue *, u64); extern void blk_queue_max_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); -- cgit v1.2.3-70-g09d2 From 9df1bb9b516daeece159ab7fb262d01a0359247c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 9 Jun 2009 06:22:57 +0200 Subject: Revert "block: Fix bounce limit setting in DM" This reverts commit a05c0205ba031c01bba33a21bf0a35920eb64833. DM doesn't need to access the bounce_pfn directly. Signed-off-by: Jens Axboe --- block/blk-settings.c | 17 ----------------- drivers/md/dm-table.c | 2 +- include/linux/blkdev.h | 1 - 3 files changed, 1 insertion(+), 19 deletions(-) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-settings.c b/block/blk-settings.c index 9acd0b7e802..8d339349289 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -193,23 +193,6 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) } EXPORT_SYMBOL(blk_queue_bounce_limit); -/** - * blk_queue_bounce_pfn - set the bounce buffer limit for queue - * @q: the request queue for the device - * @pfn: max address - * - * Description: - * This function is similar to blk_queue_bounce_limit except it - * neither changes allocation flags, nor does it set up the ISA DMA - * pool. This function should only be used by stacking drivers. - * Hardware drivers should use blk_queue_bounce_limit instead. - */ -void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn) -{ - q->limits.bounce_pfn = pfn; -} -EXPORT_SYMBOL(blk_queue_bounce_pfn); - /** * blk_queue_max_sectors - set max sectors for a request for this queue * @q: the request queue for the device diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3ca1604ddd5..e9a73bb242b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) blk_queue_max_segment_size(q, t->limits.max_segment_size); blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); - blk_queue_bounce_pfn(q, t->limits.bounce_pfn); + blk_queue_bounce_limit(q, t->limits.bounce_pfn); if (t->limits.no_cluster) queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 989aa1790f4..5e740a135e7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -910,7 +910,6 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern void blk_cleanup_queue(struct request_queue *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); -extern void blk_queue_bounce_pfn(struct request_queue *, u64); extern void blk_queue_max_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); -- cgit v1.2.3-70-g09d2 From b0fd271d5fba0b2d00888363f3869e3f9b26caa9 Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Thu, 11 Jun 2009 13:10:16 +0200 Subject: block: add request clone interface (v2) This patch adds the following 2 interfaces for request-stacking drivers: - blk_rq_prep_clone(struct request *clone, struct request *orig, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio*, void *), void *data) * Clones bios in the original request to the clone request (bio_ctr is called for each cloned bios.) * Copies attributes of the original request to the clone request. The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. - blk_rq_unprep_clone(struct request *clone) * Frees cloned bios from the clone request. Request stacking drivers (e.g. request-based dm) need to make a clone request for a submitted request and dispatch it to other devices. To allocate request for the clone, request stacking drivers may not be able to use blk_get_request() because the allocation may be done in an irq-disabled context. So blk_rq_prep_clone() takes a request allocated by the caller as an argument. For each clone bio in the clone request, request stacking drivers should be able to set up their own completion handler. So blk_rq_prep_clone() takes a callback function which is called for each clone bio, and a pointer for private data which is passed to the callback. NOTE: blk_rq_prep_clone() doesn't copy any actual data of the original request. Pages are shared between original bios and cloned bios. So caller must not complete the original request before the clone request. Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Cc: Boaz Harrosh Signed-off-by: Jens Axboe --- block/blk-core.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/blkdev.h | 5 +++ 2 files changed, 105 insertions(+) (limited to 'include/linux/blkdev.h') diff --git a/block/blk-core.c b/block/blk-core.c index 03c5a64b6cc..02a9252107a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2295,6 +2295,106 @@ int blk_lld_busy(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_lld_busy); +/** + * blk_rq_unprep_clone - Helper function to free all bios in a cloned request + * @rq: the clone request to be cleaned up + * + * Description: + * Free all bios in @rq for a cloned request. + */ +void blk_rq_unprep_clone(struct request *rq) +{ + struct bio *bio; + + while ((bio = rq->bio) != NULL) { + rq->bio = bio->bi_next; + + bio_put(bio); + } +} +EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); + +/* + * Copy attributes of the original request to the clone request. + * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + */ +static void __blk_rq_prep_clone(struct request *dst, struct request *src) +{ + dst->cpu = src->cpu; + dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); + dst->cmd_type = src->cmd_type; + dst->__sector = blk_rq_pos(src); + dst->__data_len = blk_rq_bytes(src); + dst->nr_phys_segments = src->nr_phys_segments; + dst->ioprio = src->ioprio; + dst->extra_len = src->extra_len; +} + +/** + * blk_rq_prep_clone - Helper function to setup clone request + * @rq: the request to be setup + * @rq_src: original request to be cloned + * @bs: bio_set that bios for clone are allocated from + * @gfp_mask: memory allocation mask for bio + * @bio_ctr: setup function to be called for each clone bio. + * Returns %0 for success, non %0 for failure. + * @data: private data to be passed to @bio_ctr + * + * Description: + * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. + * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * are not copied, and copying such parts is the caller's responsibility. + * Also, pages which the original bios are pointing to are not copied + * and the cloned bios just point same pages. + * So cloned bios must be completed before original bios, which means + * the caller must complete @rq before @rq_src. + */ +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data) +{ + struct bio *bio, *bio_src; + + if (!bs) + bs = fs_bio_set; + + blk_rq_init(NULL, rq); + + __rq_for_each_bio(bio_src, rq_src) { + bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); + if (!bio) + goto free_and_out; + + __bio_clone(bio, bio_src); + + if (bio_integrity(bio_src) && + bio_integrity_clone(bio, bio_src, gfp_mask)) + goto free_and_out; + + if (bio_ctr && bio_ctr(bio, bio_src, data)) + goto free_and_out; + + if (rq->bio) { + rq->biotail->bi_next = bio; + rq->biotail = bio; + } else + rq->bio = rq->biotail = bio; + } + + __blk_rq_prep_clone(rq, rq_src); + + return 0; + +free_and_out: + if (bio) + bio_free(bio, bs); + blk_rq_unprep_clone(rq); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(blk_rq_prep_clone); + int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) { return queue_work(kblockd_workqueue, work); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 5e740a135e7..ebdfde8fe55 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -765,6 +765,11 @@ extern void blk_insert_request(struct request_queue *, struct request *, int, vo extern void blk_requeue_request(struct request_queue *, struct request *); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_lld_busy(struct request_queue *q); +extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data); +extern void blk_rq_unprep_clone(struct request *rq); extern int blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern void blk_plug_device(struct request_queue *); -- cgit v1.2.3-70-g09d2