summaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-08-28 06:15:24 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:07 -0400
commit9473f16c75606fe6b2e5000525fe9766114505f3 (patch)
tree93080f8a9bcf59170ce9b56b7e8b03fb4bfed8d9 /fs/btrfs
parentf3f9931e3d0836509cfccdf473b34e34543a3272 (diff)
Btrfs: Throttle for async bio submits higher up the chain
The current code waits for the count of async bio submits to get below a given threshold if it is too high right after adding the latest bio to the work queue. This isn't optimal because the caller may have sequential adjacent bios pending they are waiting to send down the pipe. This changeset requires the caller to wait on the async bio count, and changes the async checksumming submits to wait for async bios any time they self throttle. The end result is much higher sequential throughput. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/volumes.c6
2 files changed, 7 insertions, 7 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index bbba14b629d..6a218f792e5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -487,9 +487,15 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
atomic_inc(&fs_info->nr_async_submits);
btrfs_queue_worker(&fs_info->workers, &async->work);
- wait_event_timeout(fs_info->async_submit_wait,
+ if (atomic_read(&fs_info->nr_async_submits) > limit) {
+ wait_event_timeout(fs_info->async_submit_wait,
(atomic_read(&fs_info->nr_async_submits) < limit),
HZ/10);
+
+ wait_event_timeout(fs_info->async_submit_wait,
+ (atomic_read(&fs_info->nr_async_bios) < limit),
+ HZ/10);
+ }
return 0;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2652660e607..5b1b60839d2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2145,7 +2145,6 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
int rw, struct bio *bio)
{
int should_queue = 1;
- unsigned long limit;
/* don't bother with additional async steps for reads, right now */
if (!(rw & (1 << BIO_RW))) {
@@ -2182,11 +2181,6 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
if (should_queue)
btrfs_queue_worker(&root->fs_info->submit_workers,
&device->work);
-
- limit = btrfs_async_submit_limit(root->fs_info);
- wait_event_timeout(root->fs_info->async_submit_wait,
- (atomic_read(&root->fs_info->nr_async_bios) < limit),
- HZ/10);
return 0;
}