From 2d507a01dac338831266b44ccbb01c69e84606ed Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sat, 29 Dec 2007 10:59:53 -0600 Subject: [SCSI] libsas, bsg: pass errors through correctly Currently in BSG, errors returned in req->errors aren't passed back to the calling programme (either via SG_IO or via read/write). Fix this, while preserving the SCSI convention of returning status in req->errors. Now update libsas to return errors correctly instead of to ignore them. Acked-by: FUJITA Tomonori Signed-off-by: James Bottomley --- block/bsg.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'block') diff --git a/block/bsg.c b/block/bsg.c index 8e181ab3afb..69b0a9d3330 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -445,6 +445,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, else hdr->dout_resid = rq->data_len; + /* + * If the request generated a negative error number, return it + * (providing we aren't already returning an error); if it's + * just a protocol response (i.e. non negative), that gets + * processed above. + */ + if (!ret && rq->errors < 0) + ret = rq->errors; + blk_rq_unmap_user(bio); blk_put_request(rq); @@ -837,6 +846,7 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; + int ret; switch (cmd) { /* @@ -889,12 +899,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (rq->next_rq) bidi_bio = rq->next_rq->bio; blk_execute_rq(bd->queue, NULL, rq, 0); - blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); + ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; - return 0; + return ret; } /* * block device ioctls -- cgit v1.2.3-70-g09d2 From 11c3e689f1c3a73e3af7b0ea767b1b0626da8033 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Mon, 31 Dec 2007 16:37:00 -0600 Subject: [SCSI] block: Introduce new blk_queue_update_dma_alignment interface The purpose of this is to allow stacked alignment settings, with the ultimate queue alignment being set to the largest alignment requirement in the stack. The reason for this is so that the SCSI mid-layer can relax the default alignment requirements (which are basically causing a lot of superfluous copying to go on in the SG_IO interface) while allowing transports, devices or HBAs to add stricter limits if they need them. Acked-by: Jens Axboe Signed-off-by: James Bottomley --- block/ll_rw_blk.c | 24 ++++++++++++++++++++++++ include/linux/blkdev.h | 1 + 2 files changed, 25 insertions(+) (limited to 'block') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 8b919940b2a..14af36c5cdb 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -759,6 +759,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) EXPORT_SYMBOL(blk_queue_dma_alignment); +/** + * blk_queue_update_dma_alignment - update dma length and memory alignment + * @q: the request queue for the device + * @mask: alignment mask + * + * description: + * update required memory and length aligment for direct dma transactions. + * If the requested alignment is larger than the current alignment, then + * the current queue alignment is updated to the new value, otherwise it + * is left alone. The design of this is to allow multiple objects + * (driver, device, transport etc) to set their respective + * alignments without having them interfere. + * + **/ +void blk_queue_update_dma_alignment(struct request_queue *q, int mask) +{ + BUG_ON(mask > PAGE_SIZE); + + if (mask > q->dma_alignment) + q->dma_alignment = mask; +} + +EXPORT_SYMBOL(blk_queue_update_dma_alignment); + /** * blk_queue_find_tag - find a request by its tag and queue * @q: The request queue for the device diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d18ee67b40f..81e99e51630 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -766,6 +766,7 @@ extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_dma_alignment(struct request_queue *, int); +extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); -- cgit v1.2.3-70-g09d2