diff options
-rw-r--r-- | fs/btrfs/disk-io.c | 16 | ||||
-rw-r--r-- | fs/btrfs/disk-io.h | 1 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 2 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 18 |
4 files changed, 27 insertions, 10 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 92e14dd9bdd..bbba14b629d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -429,7 +429,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, return 0; } -static unsigned long async_submit_limit(struct btrfs_fs_info *info) +unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) { unsigned long limit = min_t(unsigned long, info->workers.max_workers, @@ -439,7 +439,8 @@ static unsigned long async_submit_limit(struct btrfs_fs_info *info) int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) { - return atomic_read(&info->nr_async_bios) > async_submit_limit(info); + return atomic_read(&info->nr_async_bios) > + btrfs_async_submit_limit(info); } static void run_one_async_submit(struct btrfs_work *work) @@ -451,12 +452,13 @@ static void run_one_async_submit(struct btrfs_work *work) async = container_of(work, struct async_submit_bio, work); fs_info = BTRFS_I(async->inode)->root->fs_info; - limit = async_submit_limit(fs_info); + limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; atomic_dec(&fs_info->nr_async_submits); - if (atomic_read(&fs_info->nr_async_submits) < limit) + if (atomic_read(&fs_info->nr_async_submits) < limit && + waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); async->submit_bio_hook(async->inode, async->rw, async->bio, @@ -469,7 +471,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, extent_submit_bio_hook_t *submit_bio_hook) { struct async_submit_bio *async; - int limit = async_submit_limit(fs_info); + int limit = btrfs_async_submit_limit(fs_info); async = kmalloc(sizeof(*async), GFP_NOFS); if (!async) @@ -1863,10 +1865,10 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) struct extent_io_tree *tree; u64 num_dirty; u64 start = 0; - unsigned long thresh = 12 * 1024 * 1024; + unsigned long thresh = 96 * 1024 * 1024; tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; - if (current_is_pdflush()) + if (current_is_pdflush() || current->flags & PF_MEMALLOC) return; num_dirty = count_range_bits(tree, &start, (u64)-1, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index e904a69347a..2562a273ae1 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -73,4 +73,5 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, extent_submit_bio_hook_t *submit_bio_hook); int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); +unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6bcb0876f9b..eff3ad72991 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -322,8 +322,6 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, if (ret) break; while(start <= end) { - if (btrfs_congested_async(root->fs_info, 0)) - congestion_wait(WRITE, HZ/10); cond_resched(); index = start >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 23a5b0aba00..2652660e607 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -138,12 +138,18 @@ int run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; + struct btrfs_fs_info *fs_info; struct bio *tail; struct bio *cur; int again = 0; unsigned long num_run = 0; + unsigned long limit; bdi = device->bdev->bd_inode->i_mapping->backing_dev_info; + fs_info = device->dev_root->fs_info; + limit = btrfs_async_submit_limit(fs_info); + limit = limit * 2 / 3; + loop: spin_lock(&device->io_lock); @@ -179,7 +185,11 @@ loop: cur = pending; pending = pending->bi_next; cur->bi_next = NULL; - atomic_dec(&device->dev_root->fs_info->nr_async_bios); + atomic_dec(&fs_info->nr_async_bios); + + if (atomic_read(&fs_info->nr_async_bios) < limit && + waitqueue_active(&fs_info->async_submit_wait)) + wake_up(&fs_info->async_submit_wait); BUG_ON(atomic_read(&cur->bi_cnt) == 0); bio_get(cur); @@ -2135,6 +2145,7 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { int should_queue = 1; + unsigned long limit; /* don't bother with additional async steps for reads, right now */ if (!(rw & (1 << BIO_RW))) { @@ -2171,6 +2182,11 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, if (should_queue) btrfs_queue_worker(&root->fs_info->submit_workers, &device->work); + + limit = btrfs_async_submit_limit(root->fs_info); + wait_event_timeout(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->nr_async_bios) < limit), + HZ/10); return 0; } |