summaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2010-12-09 16:17:51 +1100
committerNeilBrown <neilb@suse.de>2010-12-09 16:17:51 +1100
commita035fc3e2531703b539f23bec4ca7943cfc69349 (patch)
treef10e160ce65745fd7ac56838c491a45ae180cb1e /drivers/md/md.c
parenta7a07e69653acf8540daa1da053cd84bf86e8e66 (diff)
md: fix possible deadlock in handling flush requests.
As recorded in https://bugzilla.kernel.org/show_bug.cgi?id=24012 it is possible for a flush request through md to hang. This is due to an interaction between the recursion avoidance in generic_make_request, the insistence in md of only having one flush active at a time, and the possibility of dm (or md) submitting two flush requests to a device from the one generic_make_request. If a generic_make_request call into dm causes two flush requests to be queued (as happens if the dm table has two targets - they get one each), these two will be queued inside generic_make_request. Assume they are for the same md device. The first is processed and causes 1 or more flush requests to be sent to lower devices. These get queued within generic_make_request too. Then the second flush to the md device gets handled and it blocks waiting for the first flush to complete. But it won't complete until the two lower-device requests complete, and they haven't even been submitted yet as they are on the generic_make_request queue. The deadlock can be broken by using a separate thread to submit the requests to lower devices. md has such a thread readily available: md_wq. So use it to submit these requests. Reported-by: Giacomo Catenazzi <cate@cateee.net> Tested-by: Giacomo Catenazzi <cate@cateee.net> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 31f8e151d89..d66aaeddf95 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -373,8 +373,9 @@ static void md_end_flush(struct bio *bio, int err)
static void md_submit_flush_data(struct work_struct *ws);
-static void submit_flushes(mddev_t *mddev)
+static void submit_flushes(struct work_struct *ws)
{
+ mddev_t *mddev = container_of(ws, mddev_t, flush_work);
mdk_rdev_t *rdev;
INIT_WORK(&mddev->flush_work, md_submit_flush_data);
@@ -432,7 +433,8 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
mddev->flush_bio = bio;
spin_unlock_irq(&mddev->write_lock);
- submit_flushes(mddev);
+ INIT_WORK(&mddev->flush_work, submit_flushes);
+ queue_work(md_wq, &mddev->flush_work);
}
EXPORT_SYMBOL(md_flush_request);