summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2009-02-04 11:36:27 +0900
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-03-12 12:58:12 -0500
commitc96952ed7031e7c576ecf90cf95b8ec099d5295a (patch)
tree4217498cf82131f6c870b0f92fea7039596fa0e7 /drivers/scsi/sg.c
parenta3b7aeaba29e3dd995ece05ba50db9e0650c16b6 (diff)
[SCSI] sg: avoid blk_put_request/blk_rq_unmap_user in interrupt
This fixes the following oops: http://marc.info/?l=linux-kernel&m=123316111415677&w=2 You can reproduce this bug by interrupting a program before a sg response completes. This leads to the special sg state (the orphan state), then sg calls blk_put_request in interrupt (rq->end_io). The above bug report shows the recursive lock problem because sg calls blk_put_request in interrupt. We could call __blk_put_request here instead however we also need to handle blk_rq_unmap_user here, which can't be called in interrupt too. In the orphan state, we don't need to care about the data transfer (the program revoked the command) so adding 'just free the resource' mode to blk_rq_unmap_user is a possible option. I prefer to avoid complicating the blk mapping API when possible. I change the orphan state to call sg_finish_rem_req via execute_in_process_context. We hold sg_fd->kref so sg_fd doesn't go away until keventd_wq finishes our work. copy_from_user/to_user fails so blk_rq_unmap_user just frees the resource without the data transfer. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Acked-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 18d079e3990..cdd83cf9710 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -138,6 +138,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
volatile char done; /* 0->before bh, 1->before read, 2->read */
struct request *rq;
struct bio *bio;
+ struct execute_work ew;
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
@@ -1234,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
+static void sg_rq_end_io_usercontext(struct work_struct *work)
+{
+ struct sg_request *srp = container_of(work, struct sg_request, ew.work);
+ struct sg_fd *sfp = srp->parentfp;
+
+ sg_finish_rem_req(srp);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
+}
+
/*
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
@@ -1312,10 +1322,9 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
*/
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
+ kref_put(&sfp->f_ref, sg_remove_sfp);
} else
- sg_finish_rem_req(srp); /* call with srp->done == 0 */
-
- kref_put(&sfp->f_ref, sg_remove_sfp);
+ execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
}
static struct file_operations sg_fops = {