From 581bb050941b4f220f84d3e5ed6dace3d42dd382 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:06:11 +0800 Subject: Btrfs: Cache free inode numbers in memory Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won't be reclaimed when we delete files, so we'll run out of inode numbers as we keep create/delete files in 32bits machines. This fixes it, and it works similarly to how we cache free space in block cgroups. We start a kernel thread to read the file tree. By scanning inode items, we know which chunks of inode numbers are free, and we cache them in an rb-tree. Because we are searching the commit root, we have to carefully handle the cross-transaction case. The rb-tree is a hybrid extent+bitmap tree, so if we have too many small chunks of inode numbers, we'll use bitmaps. Initially we allow 16K ram of extents, and a bitmap will be used if we exceed this threshold. The extents threshold is adjusted in runtime. Signed-off-by: Li Zefan --- fs/btrfs/disk-io.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ef6865c17cd..d02683b1ee1 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -41,6 +41,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" +#include "inode-map.h" static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -1327,6 +1328,19 @@ again: if (IS_ERR(root)) return root; + root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); + if (!root->free_ino_ctl) + goto fail; + root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), + GFP_NOFS); + if (!root->free_ino_pinned) + goto fail; + + btrfs_init_free_ino_ctl(root); + mutex_init(&root->fs_commit_mutex); + spin_lock_init(&root->cache_lock); + init_waitqueue_head(&root->cache_wait); + set_anon_super(&root->anon_super, NULL); if (btrfs_root_refs(&root->root_item) == 0) { @@ -2483,6 +2497,8 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) if (btrfs_root_refs(&root->root_item) == 0) synchronize_srcu(&fs_info->subvol_srcu); + __btrfs_remove_free_space_cache(root->free_ino_pinned); + __btrfs_remove_free_space_cache(root->free_ino_ctl); free_fs_root(root); return 0; } @@ -2496,6 +2512,8 @@ static void free_fs_root(struct btrfs_root *root) } free_extent_buffer(root->node); free_extent_buffer(root->commit_root); + kfree(root->free_ino_ctl); + kfree(root->free_ino_pinned); kfree(root->name); kfree(root); } -- cgit v1.2.3-70-g09d2 From 82d5902d9c681be37ffa9d70482907f9f0b7ec1f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 20 Apr 2011 10:33:24 +0800 Subject: Btrfs: Support reading/writing on disk free ino cache This is similar to block group caching. We dedicate a special inode in fs tree to save free ino cache. At the very first time we create/delete a file after mount, the free ino cache will be loaded from disk into memory. When the fs tree is commited, the cache will be written back to disk. To keep compatibility, we check the root generation against the generation of the special inode when loading the cache, so the loading will fail if the btrfs filesystem was mounted in an older kernel before. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 ++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 3 +- fs/btrfs/free-space-cache.c | 97 ++++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/free-space-cache.h | 11 +++++ fs/btrfs/inode-map.c | 87 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/inode-map.h | 2 + fs/btrfs/inode.c | 45 +++++++++++++-------- fs/btrfs/transaction.c | 2 + 9 files changed, 236 insertions(+), 19 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index c96a4e4c556..b20082e27a9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -105,6 +105,12 @@ struct btrfs_ordered_sum; /* For storing free space cache */ #define BTRFS_FREE_SPACE_OBJECTID -11ULL +/* + * The inode number assigned to the special inode for sotring + * free ino cache + */ +#define BTRFS_FREE_INO_OBJECTID -12ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -1110,6 +1116,7 @@ struct btrfs_root { wait_queue_head_t cache_wait; struct btrfs_free_space_ctl *free_ino_pinned; u64 cache_progress; + struct inode *cache_inode; struct mutex log_mutex; wait_queue_head_t log_writer_wait; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d02683b1ee1..4f12c30a547 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2505,6 +2505,7 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) static void free_fs_root(struct btrfs_root *root) { + iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); if (root->anon_super.s_dev) { down_write(&root->anon_super.s_umount); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a0e818cb040..95ce8da63b2 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3145,7 +3145,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) /* make sure bytes are sectorsize aligned */ bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - if (root == root->fs_info->tree_root) { + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { alloc_chunk = 0; committed = 1; } diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index fcbdcef6ca2..7d8b6b64340 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -209,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, return ret; } - return btrfs_update_inode(trans, root, inode); + ret = btrfs_update_inode(trans, root, inode); + return ret; } static int readahead_cache(struct inode *inode) @@ -525,6 +526,7 @@ out: spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_CLEAR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to load free space cache " "for block group %llu\n", block_group->key.objectid); @@ -893,6 +895,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, spin_lock(&block_group->lock); block_group->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&block_group->lock); + ret = 0; printk(KERN_ERR "btrfs: failed to write free space cace " "for block group %llu\n", block_group->key.objectid); @@ -2458,3 +2461,95 @@ out: return ino; } + +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path) +{ + struct inode *inode = NULL; + + spin_lock(&root->cache_lock); + if (root->cache_inode) + inode = igrab(root->cache_inode); + spin_unlock(&root->cache_lock); + if (inode) + return inode; + + inode = __lookup_free_space_inode(root, path, 0); + if (IS_ERR(inode)) + return inode; + + spin_lock(&root->cache_lock); + if (!root->fs_info->closing) + root->cache_inode = igrab(inode); + spin_unlock(&root->cache_lock); + + return inode; +} + +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + return __create_free_space_inode(root, trans, path, + BTRFS_FREE_INO_OBJECTID, 0); +} + +int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + int ret = 0; + u64 root_gen = btrfs_root_generation(&root->root_item); + + /* + * If we're unmounting then just return, since this does a search on the + * normal root and not the commit root and we could deadlock. + */ + smp_mb(); + if (fs_info->closing) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return 0; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + goto out; + + if (root_gen != BTRFS_I(inode)->generation) + goto out_put; + + ret = __load_free_space_cache(root, inode, ctl, path, 0); + + if (ret < 0) + printk(KERN_ERR "btrfs: failed to load free ino cache for " + "root %llu\n", root->root_key.objectid); +out_put: + iput(inode); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct inode *inode; + int ret; + + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode)) + return 0; + + ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); + if (ret < 0) + printk(KERN_ERR "btrfs: failed to write free ino cache " + "for root %llu\n", root->root_key.objectid); + + iput(inode); + return ret; +} diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index af06e6b6cea..8f2613f779e 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -65,6 +65,17 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_block_group_cache *block_group, struct btrfs_path *path); +struct inode *lookup_free_ino_inode(struct btrfs_root *root, + struct btrfs_path *path); +int create_free_ino_inode(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); +int load_free_ino_cache(struct btrfs_fs_info *fs_info, + struct btrfs_root *root); +int btrfs_write_out_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans, + struct btrfs_path *path); + void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, u64 bytenr, u64 size); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 5be62df90c4..7967e85c72f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -137,6 +137,7 @@ out: static void start_caching(struct btrfs_root *root) { struct task_struct *tsk; + int ret; spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { @@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root) root->cached = BTRFS_CACHE_STARTED; spin_unlock(&root->cache_lock); + ret = load_free_ino_cache(root->fs_info, root); + if (ret == 1) { + spin_lock(&root->cache_lock); + root->cached = BTRFS_CACHE_FINISHED; + spin_unlock(&root->cache_lock); + return; + } + tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", root->root_key.objectid); BUG_ON(IS_ERR(tsk)); @@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root) pinned->op = &pinned_free_ino_op; } +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans) +{ + struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; + struct btrfs_path *path; + struct inode *inode; + u64 alloc_hint = 0; + int ret; + int prealloc; + bool retry = false; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; +again: + inode = lookup_free_ino_inode(root, path); + if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { + ret = PTR_ERR(inode); + goto out; + } + + if (IS_ERR(inode)) { + BUG_ON(retry); + retry = true; + + ret = create_free_ino_inode(root, trans, path); + if (ret) + goto out; + goto again; + } + + BTRFS_I(inode)->generation = 0; + ret = btrfs_update_inode(trans, root, inode); + WARN_ON(ret); + + if (i_size_read(inode) > 0) { + ret = btrfs_truncate_free_space_cache(root, trans, path, inode); + if (ret) + goto out_put; + } + + spin_lock(&root->cache_lock); + if (root->cached != BTRFS_CACHE_FINISHED) { + ret = -1; + spin_unlock(&root->cache_lock); + goto out_put; + } + spin_unlock(&root->cache_lock); + + spin_lock(&ctl->tree_lock); + prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; + prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + spin_unlock(&ctl->tree_lock); + + /* Just to make sure we have enough space */ + prealloc += 8 * PAGE_CACHE_SIZE; + + ret = btrfs_check_data_free_space(inode, prealloc); + if (ret) + goto out_put; + + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) + goto out_put; + btrfs_free_reserved_data_space(inode, prealloc); + +out_put: + iput(inode); +out: + if (ret == 0) + ret = btrfs_write_out_ino_cache(root, trans, path); + + btrfs_free_path(path); + return ret; +} + static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) { struct btrfs_path *path; diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h index eb918451b49..ddb347bfee2 100644 --- a/fs/btrfs/inode-map.h +++ b/fs/btrfs/inode-map.h @@ -5,6 +5,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root); void btrfs_unpin_free_ino(struct btrfs_root *root); void btrfs_return_ino(struct btrfs_root *root, u64 objectid); int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); +int btrfs_save_ino_cache(struct btrfs_root *root, + struct btrfs_trans_handle *trans); int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index adec22884a3..b78d3ab789c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -745,6 +745,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, return alloc_hint; } +static inline bool is_free_space_inode(struct btrfs_root *root, + struct inode *inode) +{ + if (root == root->fs_info->tree_root || + BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + return true; + return false; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to @@ -777,7 +786,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; - BUG_ON(root == root->fs_info->tree_root); + BUG_ON(is_free_space_inode(root, inode)); trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -1048,17 +1057,18 @@ static noinline int run_delalloc_nocow(struct inode *inode, int type; int nocow; int check_prev = 1; - bool nolock = false; + bool nolock; u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); BUG_ON(!path); - if (root == root->fs_info->tree_root) { - nolock = true; + + nolock = is_free_space_inode(root, inode); + + if (nolock) trans = btrfs_join_transaction_nolock(root, 1); - } else { + else trans = btrfs_join_transaction(root, 1); - } BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; @@ -1316,8 +1326,7 @@ static int btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1350,8 +1359,7 @@ static int btrfs_clear_bit_hook(struct inode *inode, if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - int do_list = (root->root_key.objectid != - BTRFS_ROOT_TREE_OBJECTID); + bool do_list = !is_free_space_inode(root, inode); if (*bits & EXTENT_FIRST_DELALLOC) *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1458,7 +1466,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (root == root->fs_info->tree_root) + if (is_free_space_inode(root, inode)) ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); else ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); @@ -1701,7 +1709,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_state *cached_state = NULL; int compress_type = 0; int ret; - bool nolock = false; + bool nolock; ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1); @@ -1709,7 +1717,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) return 0; BUG_ON(!ordered_extent); - nolock = (root == root->fs_info->tree_root); + nolock = is_free_space_inode(root, inode); if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { BUG_ON(!list_empty(&ordered_extent->list)); @@ -3473,7 +3481,9 @@ delete: if (path->slots[0] == 0 || path->slots[0] != pending_del_slot) { - if (root->ref_cows) { + if (root->ref_cows && + BTRFS_I(inode)->location.objectid != + BTRFS_FREE_INO_OBJECTID) { err = -EAGAIN; goto out; } @@ -3765,7 +3775,7 @@ void btrfs_evict_inode(struct inode *inode) truncate_inode_pages(&inode->i_data, 0); if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || - root == root->fs_info->tree_root)) + is_free_space_inode(root, inode))) goto no_delete; if (is_bad_inode(inode)) { @@ -4382,7 +4392,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) return 0; smp_mb(); - nolock = (root->fs_info->closing && root == root->fs_info->tree_root); + if (root->fs_info->closing && is_free_space_inode(root, inode)) + nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) @@ -6900,7 +6911,7 @@ int btrfs_drop_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0 && - root != root->fs_info->tree_root) + !is_free_space_inode(root, inode)) return 1; else return generic_drop_inode(inode); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f4c1184b7f1..4d1dbcbbaf4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -761,6 +761,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, btrfs_update_reloc_root(trans, root); btrfs_orphan_commit_root(trans, root); + btrfs_save_ino_cache(root, trans); + if (root->commit_root != root->node) { mutex_lock(&root->fs_commit_mutex); switch_commit_root(root); -- cgit v1.2.3-70-g09d2 From 306e16ce13c0f3d4fc071b45803b5b83c2606011 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 14:29:38 +0200 Subject: btrfs: rename variables clashing with global function names reported by gcc -Wshadow: page_index, page_offset, new_inode, dev_name Signed-off-by: David Sterba --- fs/btrfs/compression.c | 42 +++++++++++++++++++++--------------------- fs/btrfs/compression.h | 2 +- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent_io.c | 28 ++++++++++++++-------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 24 ++++++++++++------------ fs/btrfs/super.c | 4 ++-- 8 files changed, 53 insertions(+), 53 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e2..d4cd0f0cd69 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -332,7 +332,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, struct compressed_bio *cb; unsigned long bytes_left; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - int page_index = 0; + int pg_index = 0; struct page *page; u64 first_byte = disk_start; struct block_device *bdev; @@ -366,8 +366,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, /* create and submit bios for the compressed pages */ bytes_left = compressed_len; - for (page_index = 0; page_index < cb->nr_pages; page_index++) { - page = compressed_pages[page_index]; + for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { + page = compressed_pages[pg_index]; page->mapping = inode->i_mapping; if (bio->bi_size) ret = io_tree->ops->merge_bio_hook(page, 0, @@ -432,7 +432,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, struct compressed_bio *cb) { unsigned long end_index; - unsigned long page_index; + unsigned long pg_index; u64 last_offset; u64 isize = i_size_read(inode); int ret; @@ -456,13 +456,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; while (last_offset < compressed_end) { - page_index = last_offset >> PAGE_CACHE_SHIFT; + pg_index = last_offset >> PAGE_CACHE_SHIFT; - if (page_index > end_index) + if (pg_index > end_index) break; rcu_read_lock(); - page = radix_tree_lookup(&mapping->page_tree, page_index); + page = radix_tree_lookup(&mapping->page_tree, pg_index); rcu_read_unlock(); if (page) { misses++; @@ -476,7 +476,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, if (!page) break; - if (add_to_page_cache_lru(page, mapping, page_index, + if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { page_cache_release(page); goto next; @@ -560,7 +560,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; unsigned long compressed_len; unsigned long nr_pages; - unsigned long page_index; + unsigned long pg_index; struct page *page; struct block_device *bdev; struct bio *comp_bio; @@ -613,10 +613,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - for (page_index = 0; page_index < nr_pages; page_index++) { - cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!cb->compressed_pages[page_index]) + if (!cb->compressed_pages[pg_index]) goto fail2; } cb->nr_pages = nr_pages; @@ -634,8 +634,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); - for (page_index = 0; page_index < nr_pages; page_index++) { - page = cb->compressed_pages[page_index]; + for (pg_index = 0; pg_index < nr_pages; pg_index++) { + page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; page->index = em_start >> PAGE_CACHE_SHIFT; @@ -702,8 +702,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return 0; fail2: - for (page_index = 0; page_index < nr_pages; page_index++) - free_page((unsigned long)cb->compressed_pages[page_index]); + for (pg_index = 0; pg_index < nr_pages; pg_index++) + free_page((unsigned long)cb->compressed_pages[pg_index]); kfree(cb->compressed_pages); fail1: @@ -945,7 +945,7 @@ void btrfs_exit_compress(void) int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset) { unsigned long buf_offset; @@ -954,7 +954,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; - struct page *page_out = bvec[*page_index].bv_page; + struct page *page_out = bvec[*pg_index].bv_page; /* * start byte is the first byte of the page we're currently @@ -995,11 +995,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* check if we need to pick another page */ if (*pg_offset == PAGE_CACHE_SIZE) { - (*page_index)++; - if (*page_index >= vcnt) + (*pg_index)++; + if (*pg_index >= vcnt) return 0; - page_out = bvec[*page_index].bv_page; + page_out = bvec[*pg_index].bv_page; *pg_offset = 0; start_byte = page_offset(page_out) - disk_start; diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 51000174b9d..a12059f4f0f 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio_vec *bvec, int vcnt, - unsigned long *page_index, + unsigned long *pg_index, unsigned long *pg_offset); int btrfs_submit_compressed_write(struct inode *inode, u64 start, diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae..b5433bbe751 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2534,7 +2534,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, int btrfs_commit_write(struct file *file, struct page *page, unsigned from, unsigned to); struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 end, + size_t pg_offset, u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece8..990afa8656a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -137,7 +137,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { * that covers the entire device */ static struct extent_map *btree_get_extent(struct inode *inode, - struct page *page, size_t page_offset, u64 start, u64 len, + struct page *page, size_t pg_offset, u64 start, u64 len, int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31..b730c12fa95 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2007,7 +2007,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct btrfs_ordered_extent *ordered; int ret; int nr = 0; - size_t page_offset = 0; + size_t pg_offset = 0; size_t iosize; size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; @@ -2043,9 +2043,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, char *userpage; struct extent_state *cached = NULL; - iosize = PAGE_CACHE_SIZE - page_offset; + iosize = PAGE_CACHE_SIZE - pg_offset; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, @@ -2054,7 +2054,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, &cached, GFP_NOFS); break; } - em = get_extent(inode, page, page_offset, cur, + em = get_extent(inode, page, pg_offset, cur, end - cur + 1, 0); if (IS_ERR(em) || !em) { SetPageError(page); @@ -2094,7 +2094,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct extent_state *cached = NULL; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, iosize); + memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); @@ -2103,7 +2103,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unlock_extent_cached(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* the get_extent function already copied into the page */ @@ -2112,7 +2112,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } /* we have an inline extent but it didn't get marked up @@ -2122,7 +2122,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, SetPageError(page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } @@ -2135,7 +2135,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; pnr -= page->index; ret = submit_extent_page(READ, tree, page, - sector, disk_io_size, page_offset, + sector, disk_io_size, pg_offset, bdev, bio, pnr, end_bio_extent_readpage, mirror_num, *bio_flags, @@ -2146,7 +2146,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, if (ret) SetPageError(page); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; } if (!nr) { if (!PageError(page)) @@ -2751,7 +2751,7 @@ int extent_prepare_write(struct extent_io_tree *tree, u64 cur_end; struct extent_map *em; unsigned blocksize = 1 << inode->i_blkbits; - size_t page_offset = 0; + size_t pg_offset = 0; size_t block_off_start; size_t block_off_end; int err = 0; @@ -2767,7 +2767,7 @@ int extent_prepare_write(struct extent_io_tree *tree, lock_extent(tree, page_start, page_end, GFP_NOFS); while (block_start <= block_end) { - em = get_extent(inode, page, page_offset, block_start, + em = get_extent(inode, page, pg_offset, block_start, block_end - block_start + 1, 1); if (IS_ERR(em) || !em) goto err; @@ -2811,7 +2811,7 @@ int extent_prepare_write(struct extent_io_tree *tree, block_start + iosize - 1, EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); ret = submit_extent_page(READ, tree, page, - sector, iosize, page_offset, em->bdev, + sector, iosize, pg_offset, em->bdev, NULL, 1, end_bio_extent_preparewrite, 0, 0, 0); @@ -2828,7 +2828,7 @@ int extent_prepare_write(struct extent_io_tree *tree, &cached, GFP_NOFS); block_start = cur_end + 1; } - page_offset = block_start & (PAGE_CACHE_SIZE - 1); + pg_offset = block_start & (PAGE_CACHE_SIZE - 1); free_extent_map(em); } if (iocount) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index af2d7179c37..b9ce2f72074 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -164,7 +164,7 @@ static inline struct extent_state *extent_state_next(struct extent_state *state) typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, - size_t page_offset, + size_t pg_offset, u64 start, u64 len, int create); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04..fc966472e3a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6985,7 +6985,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; - struct inode *new_inode = new_dentry->d_inode; + struct inode *newinode = new_dentry->d_inode; struct inode *old_inode = old_dentry->d_inode; struct timespec ctime = CURRENT_TIME; u64 index = 0; @@ -7000,18 +7000,18 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, return -EXDEV; if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || - (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) + (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) return -ENOTEMPTY; - if (S_ISDIR(old_inode->i_mode) && new_inode && - new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (S_ISDIR(old_inode->i_mode) && newinode && + newinode->i_size > BTRFS_EMPTY_DIR_SIZE) return -ENOTEMPTY; /* * we're using rename to replace one file with another. * and the replacement file is large. Start IO on it now so * we don't add too much work to the end of the transaction */ - if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && + if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size && old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) filemap_flush(old_inode->i_mapping); @@ -7065,7 +7065,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * make sure the inode gets flushed if it is replacing * something. */ - if (new_inode && new_inode->i_size && + if (newinode && newinode->i_size && old_inode && S_ISREG(old_inode->i_mode)) { btrfs_add_ordered_operation(trans, root, old_inode); } @@ -7092,16 +7092,16 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, } BUG_ON(ret); - if (new_inode) { - new_inode->i_ctime = CURRENT_TIME; - if (unlikely(new_inode->i_ino == + if (newinode) { + newinode->i_ctime = CURRENT_TIME; + if (unlikely(newinode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { - root_objectid = BTRFS_I(new_inode)->location.objectid; + root_objectid = BTRFS_I(newinode)->location.objectid; ret = btrfs_unlink_subvol(trans, dest, new_dir, root_objectid, new_dentry->d_name.name, new_dentry->d_name.len); - BUG_ON(new_inode->i_nlink == 0); + BUG_ON(newinode->i_nlink == 0); } else { ret = btrfs_unlink_inode(trans, dest, new_dir, new_dentry->d_inode, @@ -7109,7 +7109,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } BUG_ON(ret); - if (new_inode->i_nlink == 0) { + if (newinode->i_nlink == 0) { ret = btrfs_orphan_add(trans, new_dentry->d_inode); BUG_ON(ret); } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf..3e28521643f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -739,7 +739,7 @@ static int btrfs_set_super(struct super_block *s, void *data) * for multiple device setup. Make sure to keep it in sync. */ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) + const char *device_name, void *data) { struct block_device *bdev = NULL; struct super_block *s; @@ -762,7 +762,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, if (error) return ERR_PTR(error); - error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); + error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); if (error) goto error_free_subvol_name; -- cgit v1.2.3-70-g09d2 From 4891aca2dac612a2f21a3278d9906ade13b55788 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 19 Apr 2011 16:45:00 +0200 Subject: btrfs: fix dereference before check The superblock's ->s_fs_info is properly set in btrfs_fill_super, after a call to open_ctree, which derefereces it before check. Although tree_root is set via btrfs_set_super, let's be defensive and leave the check in place. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 990afa8656a..25e4b8f1d0e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1611,7 +1611,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *tree_root = btrfs_sb(sb); - struct btrfs_fs_info *fs_info = tree_root->fs_info; + struct btrfs_fs_info *fs_info = NULL; struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), @@ -1623,11 +1623,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; - if (!extent_root || !tree_root || !fs_info || + if (!extent_root || !tree_root || !tree_root->fs_info || !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } + fs_info = tree_root->fs_info; ret = init_srcu_struct(&fs_info->subvol_srcu); if (ret) { -- cgit v1.2.3-70-g09d2 From f993c883ad8e111fb9e9ae603540acbe94f7246c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 20 Apr 2011 23:35:57 +0200 Subject: btrfs: drop unused argument from extent_io_tree_init all callers pass GFP_NOFS, but the GFP mask argument is not used in the function; GFP_ATOMIC is passed to radix tree initialization and it's the only correct one, since we're using the preload/insert mechanism of radix tree. Let's drop the gfp mask from btrfs function, this will not change behaviour. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 9 ++++----- fs/btrfs/extent_io.c | 2 +- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 4 ++-- fs/btrfs/relocation.c | 2 +- fs/btrfs/transaction.c | 3 +-- 6 files changed, 10 insertions(+), 12 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 25e4b8f1d0e..3ce80f71e98 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1080,7 +1080,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->log_transid = 0; root->last_log_commit = 0; extent_io_tree_init(&root->dirty_log_pages, - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); @@ -1712,8 +1712,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, - fs_info->btree_inode->i_mapping, - GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, GFP_NOFS); @@ -1729,9 +1728,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->block_group_cache_tree = RB_ROOT; extent_io_tree_init(&fs_info->freed_extents[0], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); extent_io_tree_init(&fs_info->freed_extents[1], - fs_info->btree_inode->i_mapping, GFP_NOFS); + fs_info->btree_inode->i_mapping); fs_info->pinned_extents = &fs_info->freed_extents[0]; fs_info->do_barriers = 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3c92712e976..e67ed76668e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -101,7 +101,7 @@ void extent_io_exit(void) } void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask) + struct address_space *mapping) { tree->state = RB_ROOT; INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index b9ce2f72074..e9cfe8d1661 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -169,7 +169,7 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, int create); void extent_io_tree_init(struct extent_io_tree *tree, - struct address_space *mapping, gfp_t mask); + struct address_space *mapping); int try_release_extent_mapping(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ba760c3ced2..3c98164f8b2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6786,8 +6786,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); - extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); - extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); + extent_io_tree_init(&ei->io_tree, &inode->i_data); + extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); btrfs_ordered_inode_tree_init(&ei->ordered_tree); INIT_LIST_HEAD(&ei->i_orphan); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fed0aaec075..f3edf45317b 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3935,7 +3935,7 @@ static struct reloc_control *alloc_reloc_control(void) INIT_LIST_HEAD(&rc->reloc_roots); backref_cache_init(&rc->backref_cache); mapping_tree_init(&rc->reloc_root_tree); - extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); + extent_io_tree_init(&rc->processed_blocks, NULL); return rc; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5..955f76eb0fa 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -80,8 +80,7 @@ static noinline int join_transaction(struct btrfs_root *root) INIT_LIST_HEAD(&cur_trans->pending_snapshots); list_add_tail(&cur_trans->list, &root->fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping, - GFP_NOFS); + root->fs_info->btree_inode->i_mapping); spin_lock(&root->fs_info->new_trans_lock); root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); -- cgit v1.2.3-70-g09d2 From a8067e022ab54fde8953880a64572c3acca644dc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:34:43 +0200 Subject: btrfs: drop unused parameter from extent_map_tree_init the GFP flags are not stored anywhere and all allocations are done via alloc_extent_map(GFP_NOFS). Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 3 +-- fs/btrfs/extent_map.c | 3 +-- fs/btrfs/extent_map.h | 2 +- fs/btrfs/inode.c | 2 +- fs/btrfs/volumes.c | 2 +- 5 files changed, 5 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3ce80f71e98..f2ee584b8ef 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1713,8 +1713,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, fs_info->btree_inode->i_mapping); - extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, - GFP_NOFS); + extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index a24a3f2fa13..3c8f374a8e2 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -28,12 +28,11 @@ void extent_map_exit(void) /** * extent_map_tree_init - initialize extent map tree * @tree: tree to initialize - * @mask: flags for memory allocations during tree operations * * Initialize the extent tree @tree. Should be called for each new inode * or other user of the extent_map interface. */ -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) +void extent_map_tree_init(struct extent_map_tree *tree) { tree->map = RB_ROOT; rwlock_init(&tree->lock); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 28b44dbd1e3..255813c51b9 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -49,7 +49,7 @@ static inline u64 extent_map_block_end(struct extent_map *em) return em->block_start + em->block_len; } -void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); +void extent_map_tree_init(struct extent_map_tree *tree); struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len); int add_extent_mapping(struct extent_map_tree *tree, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3c98164f8b2..f54c015cc29 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6785,7 +6785,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->force_compress = BTRFS_COMPRESS_NONE; inode = &ei->vfs_inode; - extent_map_tree_init(&ei->extent_tree, GFP_NOFS); + extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); mutex_init(&ei->log_mutex); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e..15d7dc943c9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2849,7 +2849,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) void btrfs_mapping_init(struct btrfs_mapping_tree *tree) { - extent_map_tree_init(&tree->map_tree, GFP_NOFS); + extent_map_tree_init(&tree->map_tree); } void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) -- cgit v1.2.3-70-g09d2 From 172ddd60a662c4d8bf2809462866ddddd6431ea5 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 00:48:27 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_map pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_map.c | 5 ++--- fs/btrfs/extent_map.h | 2 +- fs/btrfs/file.c | 4 ++-- fs/btrfs/inode.c | 12 ++++++------ fs/btrfs/relocation.c | 2 +- fs/btrfs/volumes.c | 4 ++-- 8 files changed, 16 insertions(+), 17 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f2ee584b8ef..e1e55679d06 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -154,7 +154,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, } read_unlock(&em_tree->lock); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7cdce82e03e..6a3d53783d5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6694,7 +6694,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 start = extent_key->objectid - offset; u64 end = start + extent_key->offset - 1; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 3c8f374a8e2..2d0410344ea 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -40,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree) /** * alloc_extent_map - allocate new extent map structure - * @mask: memory allocation flags * * Allocate a new extent_map structure. The new structure is * returned with a reference count of one and needs to be * freed using free_extent_map() */ -struct extent_map *alloc_extent_map(gfp_t mask) +struct extent_map *alloc_extent_map(void) { struct extent_map *em; - em = kmem_cache_alloc(extent_map_cache, mask); + em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; em->in_tree = 0; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 255813c51b9..33a7890b1f4 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -56,7 +56,7 @@ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); -struct extent_map *alloc_extent_map(gfp_t mask); +struct extent_map *alloc_extent_map(void); void free_extent_map(struct extent_map *em); int __init extent_map_init(void); void extent_map_exit(void); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 83abd274370..80eabe85409 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -191,9 +191,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, } while (1) { if (!split) - split = alloc_extent_map(GFP_NOFS); + split = alloc_extent_map(); if (!split2) - split2 = alloc_extent_map(GFP_NOFS); + split2 = alloc_extent_map(); BUG_ON(!split || !split2); write_lock(&em_tree->lock); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f54c015cc29..26f4d56cf04 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -649,7 +649,7 @@ retry: async_extent->start + async_extent->ram_size - 1, 0); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; @@ -826,7 +826,7 @@ static noinline int cow_file_range(struct inode *inode, (u64)-1, &ins, 1); BUG_ON(ret); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = start; em->orig_start = em->start; @@ -1177,7 +1177,7 @@ out_check: struct extent_map *em; struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; @@ -5069,7 +5069,7 @@ again: else goto out; } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5382,7 +5382,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag u64 hole_start = start; u64 hole_len = len; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { err = -ENOMEM; goto out; @@ -5483,7 +5483,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, } if (!em) { - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { em = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f3edf45317b..2097a88f60a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2870,7 +2870,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, struct extent_map *em; int ret = 0; - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 15d7dc943c9..76acd1d235e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2609,7 +2609,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) { ret = -ENOMEM; goto error; @@ -3499,7 +3499,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, free_extent_map(em); } - em = alloc_extent_map(GFP_NOFS); + em = alloc_extent_map(); if (!em) return -ENOMEM; num_stripes = btrfs_chunk_num_stripes(leaf, chunk); -- cgit v1.2.3-70-g09d2 From f09d1f60e6aa82fb4cfaa525e21f6287fc1516f4 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:08:01 +0200 Subject: btrfs: drop gfp parameter from find_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/extent_io.c | 3 +-- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e1e55679d06..1c0752e9906 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -963,7 +963,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, struct inode *btree_inode = root->fs_info->btree_inode; struct extent_buffer *eb; eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, GFP_NOFS); + bytenr, blocksize); return eb; } @@ -2696,7 +2696,7 @@ int btree_lock_page_hook(struct page *page) goto out; len = page->private >> 2; - eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); + eb = find_extent_buffer(io_tree, bytenr, len); if (!eb) goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e67ed76668e..ad0f0a95ad3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3377,8 +3377,7 @@ free_eb: } struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask) + u64 start, unsigned long len) { struct extent_buffer *eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e9cfe8d1661..ff220c3c01b 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -263,8 +263,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct page *page0, gfp_t mask); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, - u64 start, unsigned long len, - gfp_t mask); + u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, -- cgit v1.2.3-70-g09d2 From ba14419264684b290f0d0b7f48d26eafb11fc0c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 21 Apr 2011 01:12:06 +0200 Subject: btrfs: drop gfp parameter from alloc_extent_buffer pass GFP_NOFS directly to kmem_cache_alloc Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent_io.c | 7 +++---- fs/btrfs/extent_io.h | 3 +-- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1c0752e9906..4084959b36f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -380,7 +380,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { WARN_ON(1); goto out; @@ -525,7 +525,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, len = page->private >> 2; WARN_ON(len == 0); - eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + eb = alloc_extent_buffer(tree, start, len, page); if (eb == NULL) { ret = -EIO; goto out; @@ -974,7 +974,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *eb; eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, - bytenr, blocksize, NULL, GFP_NOFS); + bytenr, blocksize, NULL); return eb; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ad0f0a95ad3..9369289ce77 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3266,8 +3266,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask) + struct page *page0) { unsigned long num_pages = num_extent_pages(start, len); unsigned long i; @@ -3288,7 +3287,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } rcu_read_unlock(); - eb = __alloc_extent_buffer(tree, start, len, mask); + eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); if (!eb) return NULL; @@ -3305,7 +3304,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, i = 0; } for (; i < num_pages; i++, index++) { - p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); + p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); if (!p) { WARN_ON(1); goto free_eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ff220c3c01b..3c3be74c934 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -260,8 +260,7 @@ void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, - struct page *page0, - gfp_t mask); + struct page *page0); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len); void free_extent_buffer(struct extent_buffer *eb); -- cgit v1.2.3-70-g09d2 From f2a97a9dbd86eb1ef956bdf20e05c507b32beb96 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 12:44:41 +0200 Subject: btrfs: remove all unused functions Remove static and global declarations and/or definitions. Reduces size of btrfs.ko by ~3.4kB. text data bss dec hex filename 402081 7464 200 409745 64091 btrfs.ko.base 398620 7144 200 405964 631cc btrfs.ko.remove-all Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 78 --------------- fs/btrfs/delayed-ref.c | 38 -------- fs/btrfs/delayed-ref.h | 1 - fs/btrfs/disk-io.c | 27 ------ fs/btrfs/disk-io.h | 7 -- fs/btrfs/extent_io.c | 227 -------------------------------------------- fs/btrfs/extent_io.h | 21 ---- fs/btrfs/free-space-cache.c | 15 --- fs/btrfs/free-space-cache.h | 1 - fs/btrfs/inode.c | 52 ---------- fs/btrfs/locking.c | 25 ----- fs/btrfs/locking.h | 2 - fs/btrfs/ref-cache.c | 164 -------------------------------- fs/btrfs/ref-cache.h | 24 ----- fs/btrfs/relocation.c | 2 +- fs/btrfs/root-tree.c | 47 --------- fs/btrfs/sysfs.c | 65 ------------- fs/btrfs/volumes.c | 19 ---- fs/btrfs/volumes.h | 3 - 19 files changed, 1 insertion(+), 817 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b66216e636c..e37d441617d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1440,26 +1440,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val); -} - static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, struct btrfs_chunk *c, int nr) { return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); } -static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb, - struct btrfs_chunk *c, int nr, - u64 val) -{ - btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val); -} - /* struct btrfs_block_group_item */ BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); @@ -1517,14 +1503,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item) return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_timespec * -btrfs_inode_otime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, otime); - return (struct btrfs_timespec *)ptr; -} - BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); @@ -1875,33 +1853,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) return (u8 *)ptr; } -static inline u8 *btrfs_super_fsid(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_super_block, fsid); - return (u8 *)ptr; -} - -static inline u8 *btrfs_header_csum(struct extent_buffer *eb) -{ - unsigned long ptr = offsetof(struct btrfs_header, csum); - return (u8 *)ptr; -} - -static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb) -{ - return NULL; -} - -static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) -{ - return NULL; -} - static inline int btrfs_is_leaf(struct extent_buffer *eb) { return btrfs_header_level(eb) == 0; @@ -2055,22 +2006,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb) return sb->s_fs_info; } -static inline int btrfs_set_root_name(struct btrfs_root *root, - const char *name, int len) -{ - /* if we already have a name just free it */ - kfree(root->name); - - root->name = kmalloc(len+1, GFP_KERNEL); - if (!root->name) - return -ENOMEM; - - memcpy(root->name, name, len); - root->name[len] = '\0'; - - return 0; -} - static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { if (level == 0) @@ -2304,11 +2239,6 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); -int btrfs_insert_some_items(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - int nr); int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -2354,8 +2284,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *item); int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct btrfs_root_item *item, struct btrfs_key *key); -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid); int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, @@ -2494,8 +2422,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u32 min_type); int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync); int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state); int btrfs_writepages(struct address_space *mapping, @@ -2579,10 +2505,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, /* sysfs.c */ int btrfs_init_sysfs(void); void btrfs_exit_sysfs(void); -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs); -int btrfs_sysfs_add_root(struct btrfs_root *root); -void btrfs_sysfs_del_root(struct btrfs_root *root); -void btrfs_sysfs_del_super(struct btrfs_fs_info *root); /* xattr.c */ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index bce28f65389..cb9b9a431fc 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -280,44 +280,6 @@ again: return 1; } -/* - * This checks to see if there are any delayed refs in the - * btree for a given bytenr. It returns one if it finds any - * and zero otherwise. - * - * If it only finds a head node, it returns 0. - * - * The idea is to use this when deciding if you can safely delete an - * extent from the extent allocation tree. There may be a pending - * ref in the rbtree that adds or removes references, so as long as this - * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent - * allocation tree. - */ -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr) -{ - struct btrfs_delayed_ref_node *ref; - struct btrfs_delayed_ref_root *delayed_refs; - struct rb_node *prev_node; - int ret = 0; - - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - ref = find_ref_head(&delayed_refs->root, bytenr, NULL); - if (ref) { - prev_node = rb_prev(&ref->rb_node); - if (!prev_node) - goto out; - ref = rb_entry(prev_node, struct btrfs_delayed_ref_node, - rb_node); - if (ref->bytenr == bytenr) - ret = 1; - } -out: - spin_unlock(&delayed_refs->lock); - return ret; -} - /* * helper function to update an extent delayed ref in the * rbtree. existing and update must both have the same diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 946ed71ab84..e287e3b0eab 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -166,7 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); -int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head); int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4084959b36f..fa287c551ff 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -650,12 +650,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) return 256 * limit; } -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) -{ - return atomic_read(&info->nr_async_bios) > - btrfs_async_submit_limit(info); -} - static void run_one_async_start(struct btrfs_work *work) { struct async_submit_bio *async; @@ -1283,21 +1277,6 @@ out: return root; } -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid) -{ - struct btrfs_root *root; - - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID) - return fs_info->tree_root; - if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID) - return fs_info->extent_root; - - root = radix_tree_lookup(&fs_info->fs_roots_radix, - (unsigned long)root_objectid); - return root; -} - struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, struct btrfs_key *location) { @@ -1369,11 +1348,6 @@ fail: return ERR_PTR(ret); } -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen) -{ - return btrfs_read_fs_root_no_name(fs_info, location); #if 0 struct btrfs_root *root; int ret; @@ -1402,7 +1376,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, root->in_sysfs = 1; return root; #endif -} static int btrfs_congested_fn(void *congested_data, int bdi_bits) { diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 758f3ca614e..2d75f9e896f 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -55,11 +55,6 @@ int btrfs_commit_super(struct btrfs_root *root); int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); -struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, - u64 root_objectid); -struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, - struct btrfs_key *location, - const char *name, int namelen); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, struct btrfs_key *location); struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, @@ -80,8 +75,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, unsigned long bio_flags, u64 bio_offset, extent_submit_bio_hook_t *submit_bio_start, extent_submit_bio_hook_t *submit_bio_done); - -int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); int btrfs_write_tree_block(struct extent_buffer *buf); int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9369289ce77..91208296ff2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, NULL, mask); } -static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) -{ - return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, - NULL, mask); -} - int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask) { @@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, cached_state, mask); } -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) -{ - return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); -} - /* * either insert or lock state struct between start and end use mask to tell * us if waiting is desired. @@ -1027,25 +1015,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) mask); } -/* - * helper function to set pages and extents in the tree dirty - */ -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) -{ - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; - struct page *page; - - while (index <= end_index) { - page = find_get_page(tree->mapping, index); - BUG_ON(!page); - __set_page_dirty_nobuffers(page); - page_cache_release(page); - index++; - } - return 0; -} - /* * helper function to set both pages and extents in the tree writeback */ @@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) bio_put(bio); } -/* - * IO done from prepare_write is pretty simple, we just unlock - * the structs in the extent tree when done, and set the uptodate bits - * as appropriate. - */ -static void end_bio_extent_preparewrite(struct bio *bio, int err) -{ - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree; - u64 start; - u64 end; - - do { - struct page *page = bvec->bv_page; - struct extent_state *cached = NULL; - tree = &BTRFS_I(page->mapping->host)->io_tree; - - start = ((u64)page->index << PAGE_CACHE_SHIFT) + - bvec->bv_offset; - end = start + bvec->bv_len - 1; - - if (--bvec >= bio->bi_io_vec) - prefetchw(&bvec->bv_page->flags); - - if (uptodate) { - set_extent_uptodate(tree, start, end, &cached, - GFP_ATOMIC); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - - unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); - - } while (bvec >= bio->bi_io_vec); - - bio_put(bio); -} - struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, gfp_t gfp_flags) @@ -2719,128 +2648,6 @@ int extent_invalidatepage(struct extent_io_tree *tree, return 0; } -/* - * simple commit_write call, set_range_dirty is used to mark both - * the pages and the extent records as dirty - */ -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to) -{ - loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; - - set_page_extent_mapped(page); - set_page_dirty(page); - - if (pos > inode->i_size) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - return 0; -} - -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent) -{ - u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - u64 block_start; - u64 orig_block_start; - u64 block_end; - u64 cur_end; - struct extent_map *em; - unsigned blocksize = 1 << inode->i_blkbits; - size_t pg_offset = 0; - size_t block_off_start; - size_t block_off_end; - int err = 0; - int iocount = 0; - int ret = 0; - int isnew; - - set_page_extent_mapped(page); - - block_start = (page_start + from) & ~((u64)blocksize - 1); - block_end = (page_start + to - 1) | (blocksize - 1); - orig_block_start = block_start; - - lock_extent(tree, page_start, page_end, GFP_NOFS); - while (block_start <= block_end) { - em = get_extent(inode, page, pg_offset, block_start, - block_end - block_start + 1, 1); - if (IS_ERR_OR_NULL(em)) - goto err; - - cur_end = min(block_end, extent_map_end(em) - 1); - block_off_start = block_start & (PAGE_CACHE_SIZE - 1); - block_off_end = block_off_start + blocksize; - isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); - - if (!PageUptodate(page) && isnew && - (block_off_end > to || block_off_start < from)) { - void *kaddr; - - kaddr = kmap_atomic(page, KM_USER0); - if (block_off_end > to) - memset(kaddr + to, 0, block_off_end - to); - if (block_off_start < from) - memset(kaddr + block_off_start, 0, - from - block_off_start); - flush_dcache_page(page); - kunmap_atomic(kaddr, KM_USER0); - } - if ((em->block_start != EXTENT_MAP_HOLE && - em->block_start != EXTENT_MAP_INLINE) && - !isnew && !PageUptodate(page) && - (block_off_end > to || block_off_start < from) && - !test_range_bit(tree, block_start, cur_end, - EXTENT_UPTODATE, 1, NULL)) { - u64 sector; - u64 extent_offset = block_start - em->start; - size_t iosize; - sector = (em->block_start + extent_offset) >> 9; - iosize = (cur_end - block_start + blocksize) & - ~((u64)blocksize - 1); - /* - * we've already got the extent locked, but we - * need to split the state such that our end_bio - * handler can clear the lock. - */ - set_extent_bit(tree, block_start, - block_start + iosize - 1, - EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); - ret = submit_extent_page(READ, tree, page, - sector, iosize, pg_offset, em->bdev, - NULL, 1, - end_bio_extent_preparewrite, 0, - 0, 0); - if (ret && !err) - err = ret; - iocount++; - block_start = block_start + iosize; - } else { - struct extent_state *cached = NULL; - - set_extent_uptodate(tree, block_start, cur_end, &cached, - GFP_NOFS); - unlock_extent_cached(tree, block_start, cur_end, - &cached, GFP_NOFS); - block_start = cur_end + 1; - } - pg_offset = block_start & (PAGE_CACHE_SIZE - 1); - free_extent_map(em); - } - if (iocount) { - wait_extent_bit(tree, orig_block_start, - block_end, EXTENT_LOCKED); - } - check_page_uptodate(tree, page); -err: - /* FIXME, zero out newly allocated blocks on error */ - return err; -} - /* * a helper for releasepage, this tests for areas of the page that * are locked or under IO and drops the related state bits if it is safe @@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map, return try_release_extent_state(map, tree, page, mask); } -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent) -{ - struct inode *inode = mapping->host; - struct extent_state *cached_state = NULL; - u64 start = iblock << inode->i_blkbits; - sector_t sector = 0; - size_t blksize = (1 << inode->i_blkbits); - struct extent_map *em; - - lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, - 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, start, blksize, 0); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, - start + blksize - 1, &cached_state, GFP_NOFS); - if (IS_ERR_OR_NULL(em)) - return 0; - - if (em->block_start > EXTENT_MAP_LAST_BYTE) - goto out; - - sector = (em->block_start + start - em->start) >> inode->i_blkbits; -out: - free_extent_map(em); - return sector; -} - /* * helper function for fiemap, which doesn't want to see any holes. * This maps until we find something past 'last' @@ -3437,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, return 0; } -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb) -{ - return wait_on_extent_writeback(tree, eb->start, - eb->start + eb->len - 1); -} - int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index d1c5a57c998..4e8445a4757 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -153,15 +153,6 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -static inline struct extent_state *extent_state_next(struct extent_state *state) -{ - struct rb_node *node; - node = rb_next(&state->rb_node); - if (!node) - return NULL; - return rb_entry(node, struct extent_state, rb_node); -} - typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, size_t pg_offset, @@ -237,17 +228,8 @@ int extent_readpages(struct extent_io_tree *tree, struct address_space *mapping, struct list_head *pages, unsigned nr_pages, get_extent_t get_extent); -int extent_prepare_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to, get_extent_t *get_extent); -int extent_commit_write(struct extent_io_tree *tree, - struct inode *inode, struct page *page, - unsigned from, unsigned to); -sector_t extent_bmap(struct address_space *mapping, sector_t iblock, - get_extent_t *get_extent); int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent); -int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); void set_page_extent_mapped(struct page *page); @@ -284,9 +266,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len); void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); -int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, - struct extent_buffer *eb); -int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 48fafcb85b0..0290b0c7b00 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1685,21 +1685,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) -{ - struct btrfs_free_space *info; - struct rb_node *n; - u64 ret = 0; - - for (n = rb_first(&block_group->free_space_offset); n; - n = rb_next(n)) { - info = rb_entry(n, struct btrfs_free_space, offset_index); - ret += info->bytes; - } - - return ret; -} - /* * for a given cluster, put all of its extents back into the free * space cache. If the block group passed doesn't match the block group diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289..12b2b5165f8 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h @@ -55,7 +55,6 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes, u64 empty_size); void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, u64 bytes); -u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_block_group_cache *block_group, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2840989737b..57122a5e847 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7185,58 +7185,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) return 0; } -int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, - int sync) -{ - struct btrfs_inode *binode; - struct inode *inode = NULL; - - spin_lock(&root->fs_info->delalloc_lock); - while (!list_empty(&root->fs_info->delalloc_inodes)) { - binode = list_entry(root->fs_info->delalloc_inodes.next, - struct btrfs_inode, delalloc_inodes); - inode = igrab(&binode->vfs_inode); - if (inode) { - list_move_tail(&binode->delalloc_inodes, - &root->fs_info->delalloc_inodes); - break; - } - - list_del_init(&binode->delalloc_inodes); - cond_resched_lock(&root->fs_info->delalloc_lock); - } - spin_unlock(&root->fs_info->delalloc_lock); - - if (inode) { - if (sync) { - filemap_write_and_wait(inode->i_mapping); - /* - * We have to do this because compression doesn't - * actually set PG_writeback until it submits the pages - * for IO, which happens in an async thread, so we could - * race and not actually wait for any writeback pages - * because they've not been submitted yet. Technically - * this could still be the case for the ordered stuff - * since the async thread may not have started to do its - * work yet. If this becomes the case then we need to - * figure out a way to make sure that in writepage we - * wait for any async pages to be submitted before - * returning so that fdatawait does what its supposed to - * do. - */ - btrfs_wait_ordered_range(inode, 0, (u64)-1); - } else { - filemap_flush(inode->i_mapping); - } - if (delay_iput) - btrfs_add_delayed_iput(inode); - else - iput(inode); - return 1; - } - return 0; -} - static int btrfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 6151f2ea38b..66fa43dc3f0 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -185,31 +185,6 @@ sleep: return 0; } -/* - * Very quick trylock, this does not spin or schedule. It returns - * 1 with the spinlock held if it was able to take the lock, or it - * returns zero if it was unable to take the lock. - * - * After this call, scheduling is not safe without first calling - * btrfs_set_lock_blocking() - */ -int btrfs_try_tree_lock(struct extent_buffer *eb) -{ - if (spin_trylock(&eb->lock)) { - if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { - /* - * we've got the spinlock, but the real owner is - * blocking. Drop the spinlock and return failure - */ - spin_unlock(&eb->lock); - return 0; - } - return 1; - } - /* someone else has the spinlock giveup */ - return 0; -} - int btrfs_tree_unlock(struct extent_buffer *eb) { /* diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 6c4ce457168..5c33a560a2f 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -21,8 +21,6 @@ int btrfs_tree_lock(struct extent_buffer *eb); int btrfs_tree_unlock(struct extent_buffer *eb); - -int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_spin_lock(struct extent_buffer *eb); void btrfs_set_lock_blocking(struct extent_buffer *eb); diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a97314cf6bd..82d569cb626 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -23,56 +23,6 @@ #include "ref-cache.h" #include "transaction.h" -/* - * leaf refs are used to cache the information about which extents - * a given leaf has references on. This allows us to process that leaf - * in btrfs_drop_snapshot without needing to read it back from disk. - */ - -/* - * kmalloc a leaf reference struct and update the counters for the - * total ref cache size - */ -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents) -{ - struct btrfs_leaf_ref *ref; - size_t size = btrfs_leaf_ref_size(nr_extents); - - ref = kmalloc(size, GFP_NOFS); - if (ref) { - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size += size; - spin_unlock(&root->fs_info->ref_cache_lock); - - memset(ref, 0, sizeof(*ref)); - atomic_set(&ref->usage, 1); - INIT_LIST_HEAD(&ref->list); - } - return ref; -} - -/* - * free a leaf reference struct and update the counters for the - * total ref cache size - */ -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - if (!ref) - return; - WARN_ON(atomic_read(&ref->usage) == 0); - if (atomic_dec_and_test(&ref->usage)) { - size_t size = btrfs_leaf_ref_size(ref->nritems); - - BUG_ON(ref->in_tree); - kfree(ref); - - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size -= size; - spin_unlock(&root->fs_info->ref_cache_lock); - } -} - static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { @@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) } return NULL; } - -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared) -{ - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - if (!tree) - return 0; - - spin_lock(&tree->lock); - while (!list_empty(&tree->list)) { - ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); - BUG_ON(ref->tree != tree); - if (ref->root_gen > max_root_gen) - break; - if (!xchg(&ref->in_tree, 0)) { - cond_resched_lock(&tree->lock); - continue; - } - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - btrfs_free_leaf_ref(root, ref); - cond_resched(); - spin_lock(&tree->lock); - } - spin_unlock(&tree->lock); - return 0; -} - -/* - * find the leaf ref for a given extent. This returns the ref struct with - * a usage reference incremented - */ -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr) -{ - struct rb_node *rb; - struct btrfs_leaf_ref *ref = NULL; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; -again: - if (tree) { - spin_lock(&tree->lock); - rb = tree_search(&tree->root, bytenr); - if (rb) - ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); - if (ref) - atomic_inc(&ref->usage); - spin_unlock(&tree->lock); - if (ref) - return ref; - } - if (tree != &root->fs_info->shared_ref_tree) { - tree = &root->fs_info->shared_ref_tree; - goto again; - } - return NULL; -} - -/* - * add a fully filled in leaf ref struct - * remove all the refs older than a given root generation - */ -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared) -{ - int ret = 0; - struct rb_node *rb; - struct btrfs_leaf_ref_tree *tree = root->ref_tree; - - if (shared) - tree = &root->fs_info->shared_ref_tree; - - spin_lock(&tree->lock); - rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); - if (rb) { - ret = -EEXIST; - } else { - atomic_inc(&ref->usage); - ref->tree = tree; - ref->in_tree = 1; - list_add_tail(&ref->list, &tree->list); - } - spin_unlock(&tree->lock); - return ret; -} - -/* - * remove a single leaf ref from the tree. This drops the ref held by the tree - * only - */ -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) -{ - struct btrfs_leaf_ref_tree *tree; - - if (!xchg(&ref->in_tree, 0)) - return 0; - - tree = ref->tree; - spin_lock(&tree->lock); - - rb_erase(&ref->rb_node, &tree->root); - list_del_init(&ref->list); - - spin_unlock(&tree->lock); - - btrfs_free_leaf_ref(root, ref); - return 0; -} diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index e2a55cb2072..24f7001f638 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents) return sizeof(struct btrfs_leaf_ref) + sizeof(struct btrfs_extent_info) * nr_extents; } - -static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) -{ - tree->root = RB_ROOT; - INIT_LIST_HEAD(&tree->list); - spin_lock_init(&tree->lock); -} - -static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree) -{ - return RB_EMPTY_ROOT(&tree->root); -} - -void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, - int nr_extents); -void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); -struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, - u64 bytenr); -int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, - int shared); -int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, - int shared); -int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); #endif diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f7b799b151a..f726e72dd36 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -507,6 +507,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, return 1; } + static int should_ignore_root(struct btrfs_root *root) { struct btrfs_root *reloc_root; @@ -529,7 +530,6 @@ static int should_ignore_root(struct btrfs_root *root) */ return 1; } - /* * find reloc tree by address of tree root */ diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 59a94c1d981..3bcfe5a7c33 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -21,53 +21,6 @@ #include "disk-io.h" #include "print-tree.h" -/* - * search forward for a root, starting with objectid 'search_start' - * if a root key is found, the objectid we find is filled into 'found_objectid' - * and 0 is returned. < 0 is returned on error, 1 if there is nothing - * left in the tree. - */ -int btrfs_search_root(struct btrfs_root *root, u64 search_start, - u64 *found_objectid) -{ - struct btrfs_path *path; - struct btrfs_key search_key; - int ret; - - root = root->fs_info->tree_root; - search_key.objectid = search_start; - search_key.type = (u8)-1; - search_key.offset = (u64)-1; - - path = btrfs_alloc_path(); - BUG_ON(!path); -again: - ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); - if (ret < 0) - goto out; - if (ret == 0) { - ret = 1; - goto out; - } - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { - ret = btrfs_next_leaf(root, path); - if (ret) - goto out; - } - btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); - if (search_key.type != BTRFS_ROOT_ITEM_KEY) { - search_key.offset++; - btrfs_release_path(path); - goto again; - } - ret = 0; - *found_objectid = search_key.objectid; - -out: - btrfs_free_path(path); - return ret; -} - /* * lookup the root with the highest offset for a given objectid. The key we do * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4ce16ef702a..ab9633fd72a 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -189,71 +189,6 @@ static struct kobj_type btrfs_super_ktype = { /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs) -{ - int error; - char *name; - char c; - int len = strlen(fs->sb->s_id) + 1; - int i; - - name = kmalloc(len, GFP_NOFS); - if (!name) { - error = -ENOMEM; - goto fail; - } - - for (i = 0; i < len; i++) { - c = fs->sb->s_id[i]; - if (c == '/' || c == '\\') - c = '!'; - name[i] = c; - } - name[len] = '\0'; - - fs->super_kobj.kset = btrfs_kset; - error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype, - NULL, "%s", name); - kfree(name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for super failed\n"); - return error; -} - -int btrfs_sysfs_add_root(struct btrfs_root *root) -{ - int error; - - error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype, - &root->fs_info->super_kobj, - "%s", root->name); - if (error) - goto fail; - - return 0; - -fail: - printk(KERN_ERR "btrfs: sysfs creation for root failed\n"); - return error; -} - -void btrfs_sysfs_del_root(struct btrfs_root *root) -{ - kobject_put(&root->root_kobj); - wait_for_completion(&root->kobj_unregister); -} - -void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) -{ - kobject_put(&fs->super_kobj); - wait_for_completion(&fs->kobj_unregister); -} - int btrfs_init_sysfs(void) { btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e21130d3f98..cd0b31a9ba3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -44,16 +44,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root); static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); -void btrfs_lock_volumes(void) -{ - mutex_lock(&uuid_mutex); -} - -void btrfs_unlock_volumes(void) -{ - mutex_unlock(&uuid_mutex); -} - static void lock_chunks(struct btrfs_root *root) { mutex_lock(&root->fs_info->chunk_mutex); @@ -3688,15 +3678,6 @@ static int read_one_dev(struct btrfs_root *root, return ret; } -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) -{ - struct btrfs_dev_item *dev_item; - - dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, - dev_item); - return read_one_dev(root, buf, dev_item); -} - int btrfs_read_sys_array(struct btrfs_root *root) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 036b276b486..5669ae8ea1c 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -196,7 +196,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int mirror_num, int async_submit); -int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder); int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, @@ -216,8 +215,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); int btrfs_init_new_device(struct btrfs_root *root, char *path); int btrfs_balance(struct btrfs_root *dev_root); -void btrfs_unlock_volumes(void); -void btrfs_lock_volumes(void); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, -- cgit v1.2.3-70-g09d2 From 182608c8294b5fe90d7bbd4b026c82bf0a24b736 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 5 May 2011 13:13:16 +0200 Subject: btrfs: remove old unused commented out code Remove code which has been #if0-ed out for a very long time and does not seem to be related to current codebase anymore. Signed-off-by: David Sterba --- fs/btrfs/delayed-ref.c | 76 --- fs/btrfs/disk-io.c | 29 - fs/btrfs/extent-tree.c | 1661 +----------------------------------------------- fs/btrfs/inode.c | 172 ----- fs/btrfs/transaction.c | 134 ---- 5 files changed, 1 insertion(+), 2071 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index cb9b9a431fc..125cf76fcd0 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -709,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) return btrfs_delayed_node_to_head(ref); return NULL; } - -/* - * add a delayed ref to the tree. This does all of the accounting required - * to make sure the delayed ref is eventually processed before this - * transaction commits. - * - * The main point of this call is to add and remove a backreference in a single - * shot, taking the lock only once, and only searching for the head node once. - * - * It is the same as doing a ref add and delete in two separate calls. - */ -#if 0 -int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, - u64 bytenr, u64 num_bytes, u64 orig_parent, - u64 parent, u64 orig_ref_root, u64 ref_root, - u64 orig_ref_generation, u64 ref_generation, - u64 owner_objectid, int pin) -{ - struct btrfs_delayed_ref *ref; - struct btrfs_delayed_ref *old_ref; - struct btrfs_delayed_ref_head *head_ref; - struct btrfs_delayed_ref_root *delayed_refs; - int ret; - - ref = kmalloc(sizeof(*ref), GFP_NOFS); - if (!ref) - return -ENOMEM; - - old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS); - if (!old_ref) { - kfree(ref); - return -ENOMEM; - } - - /* - * the parent = 0 case comes from cases where we don't actually - * know the parent yet. It will get updated later via a add/drop - * pair. - */ - if (parent == 0) - parent = bytenr; - if (orig_parent == 0) - orig_parent = bytenr; - - head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); - if (!head_ref) { - kfree(ref); - kfree(old_ref); - return -ENOMEM; - } - delayed_refs = &trans->transaction->delayed_refs; - spin_lock(&delayed_refs->lock); - - /* - * insert both the head node and the new ref without dropping - * the spin lock - */ - ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes, - (u64)-1, 0, 0, 0, - BTRFS_UPDATE_DELAYED_HEAD, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes, - parent, ref_root, ref_generation, - owner_objectid, BTRFS_ADD_DELAYED_REF, 0); - BUG_ON(ret); - - ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes, - orig_parent, orig_ref_root, - orig_ref_generation, owner_objectid, - BTRFS_DROP_DELAYED_REF, pin); - BUG_ON(ret); - spin_unlock(&delayed_refs->lock); - return 0; -} -#endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fa287c551ff..de7b4770ab1 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1348,35 +1348,6 @@ fail: return ERR_PTR(ret); } -#if 0 - struct btrfs_root *root; - int ret; - - root = btrfs_read_fs_root_no_name(fs_info, location); - if (!root) - return NULL; - - if (root->in_sysfs) - return root; - - ret = btrfs_set_root_name(root, name, namelen); - if (ret) { - free_extent_buffer(root->node); - kfree(root); - return ERR_PTR(ret); - } - - ret = btrfs_sysfs_add_root(root); - if (ret) { - free_extent_buffer(root->node); - kfree(root->name); - kfree(root); - return ERR_PTR(ret); - } - root->in_sysfs = 1; - return root; -#endif - static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fba1348cb2a..b457f195636 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2522,126 +2522,6 @@ out: return ret; } -#if 0 -int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct extent_buffer *buf, u32 nr_extents) -{ - struct btrfs_key key; - struct btrfs_file_extent_item *fi; - u64 root_gen; - u32 nritems; - int i; - int level; - int ret = 0; - int shared = 0; - - if (!root->ref_cows) - return 0; - - if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { - shared = 0; - root_gen = root->root_key.offset; - } else { - shared = 1; - root_gen = trans->transid - 1; - } - - level = btrfs_header_level(buf); - nritems = btrfs_header_nritems(buf); - - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_extent_info *info; - - ref = btrfs_alloc_leaf_ref(root, nr_extents); - if (!ref) { - ret = -ENOMEM; - goto out; - } - - ref->root_gen = root_gen; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - ref->nritems = nr_extents; - info = ref->extents; - - for (i = 0; nr_extents > 0 && i < nritems; i++) { - u64 disk_bytenr; - btrfs_item_key_to_cpu(buf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(buf, i, - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(buf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); - if (disk_bytenr == 0) - continue; - - info->bytenr = disk_bytenr; - info->num_bytes = - btrfs_file_extent_disk_num_bytes(buf, fi); - info->objectid = key.objectid; - info->offset = key.offset; - info++; - } - - ret = btrfs_add_leaf_ref(root, ref, shared); - if (ret == -EEXIST && shared) { - struct btrfs_leaf_ref *old; - old = btrfs_lookup_leaf_ref(root, ref->bytenr); - BUG_ON(!old); - btrfs_remove_leaf_ref(root, old); - btrfs_free_leaf_ref(root, old); - ret = btrfs_add_leaf_ref(root, ref, shared); - } - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } -out: - return ret; -} - -/* when a block goes through cow, we update the reference counts of - * everything that block points to. The internal pointers of the block - * can be in just about any order, and it is likely to have clusters of - * things that are close together and clusters of things that are not. - * - * To help reduce the seeks that come with updating all of these reference - * counts, sort them by byte number before actual updates are done. - * - * struct refsort is used to match byte number to slot in the btree block. - * we sort based on the byte number and then use the slot to actually - * find the item. - * - * struct refsort is smaller than strcut btrfs_item and smaller than - * struct btrfs_key_ptr. Since we're currently limited to the page size - * for a btree block, there's no way for a kmalloc of refsorts for a - * single node to be bigger than a page. - */ -struct refsort { - u64 bytenr; - u32 slot; -}; - -/* - * for passing into sort() - */ -static int refsort_cmp(const void *a_void, const void *b_void) -{ - const struct refsort *a = a_void; - const struct refsort *b = b_void; - - if (a->bytenr < b->bytenr) - return -1; - if (a->bytenr > b->bytenr) - return 1; - return 0; -} -#endif - static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -3223,18 +3103,6 @@ commit_trans: goto again; } -#if 0 /* I hope we never need this code again, just in case */ - printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " - "%llu bytes_reserved, " "%llu bytes_pinned, " - "%llu bytes_readonly, %llu may use %llu total\n", - (unsigned long long)bytes, - (unsigned long long)data_sinfo->bytes_used, - (unsigned long long)data_sinfo->bytes_reserved, - (unsigned long long)data_sinfo->bytes_pinned, - (unsigned long long)data_sinfo->bytes_readonly, - (unsigned long long)data_sinfo->bytes_may_use, - (unsigned long long)data_sinfo->total_bytes); -#endif return -ENOSPC; } data_sinfo->bytes_may_use += bytes; @@ -3867,23 +3735,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) u64 meta_used; u64 data_used; int csum_size = btrfs_super_csum_size(&fs_info->super_copy); -#if 0 - /* - * per tree used space accounting can be inaccuracy, so we - * can't rely on it. - */ - spin_lock(&fs_info->extent_root->accounting_lock); - num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); - spin_unlock(&fs_info->extent_root->accounting_lock); - spin_lock(&fs_info->csum_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); - spin_unlock(&fs_info->csum_root->accounting_lock); - - spin_lock(&fs_info->tree_root->accounting_lock); - num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); - spin_unlock(&fs_info->tree_root->accounting_lock); -#endif sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); spin_lock(&sinfo->lock); data_used = sinfo->bytes_used; @@ -3936,10 +3788,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->reserved = block_rsv->size; block_rsv->full = 1; } -#if 0 - printk(KERN_INFO"global block rsv size %llu reserved %llu\n", - block_rsv->size, block_rsv->reserved); -#endif + spin_unlock(&sinfo->lock); spin_unlock(&block_rsv->lock); } @@ -6596,1514 +6445,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, return ret; } -#if 0 -static unsigned long calc_ra(unsigned long start, unsigned long last, - unsigned long nr) -{ - return min(last, start + nr - 1); -} - -static noinline int relocate_inode_pages(struct inode *inode, u64 start, - u64 len) -{ - u64 page_start; - u64 page_end; - unsigned long first_index; - unsigned long last_index; - unsigned long i; - struct page *page; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct file_ra_state *ra; - struct btrfs_ordered_extent *ordered; - unsigned int total_read = 0; - unsigned int total_dirty = 0; - int ret = 0; - - ra = kzalloc(sizeof(*ra), GFP_NOFS); - if (!ra) - return -ENOMEM; - - mutex_lock(&inode->i_mutex); - first_index = start >> PAGE_CACHE_SHIFT; - last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; - - /* make sure the dirty trick played by the caller work */ - ret = invalidate_inode_pages2_range(inode->i_mapping, - first_index, last_index); - if (ret) - goto out_unlock; - - file_ra_state_init(ra, inode->i_mapping); - - for (i = first_index ; i <= last_index; i++) { - if (total_read % ra->ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, ra, NULL, i, - calc_ra(i, last_index, ra->ra_pages)); - } - total_read++; -again: - if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) - BUG_ON(1); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto out_unlock; - } - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto out_unlock; - } - } - wait_on_page_writeback(page); - - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); - - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; - } - set_page_extent_mapped(page); - - if (i == first_index) - set_extent_bits(io_tree, page_start, page_end, - EXTENT_BOUNDARY, GFP_NOFS); - btrfs_set_extent_delalloc(inode, page_start, page_end); - - set_page_dirty(page); - total_dirty++; - - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - } - -out_unlock: - kfree(ra); - mutex_unlock(&inode->i_mutex); - balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); - return ret; -} - -static noinline int relocate_data_extent(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; - struct extent_map *em; - u64 start = extent_key->objectid - offset; - u64 end = start + extent_key->offset - 1; - - em = alloc_extent_map(); - BUG_ON(!em); - - em->start = start; - em->len = extent_key->offset; - em->block_len = extent_key->offset; - em->block_start = extent_key->objectid; - em->bdev = root->fs_info->fs_devices->latest_bdev; - set_bit(EXTENT_FLAG_PINNED, &em->flags); - - /* setup extent map to cheat btrfs_readpage */ - lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - while (1) { - int ret; - write_lock(&em_tree->lock); - ret = add_extent_mapping(em_tree, em); - write_unlock(&em_tree->lock); - if (ret != -EEXIST) { - free_extent_map(em); - break; - } - btrfs_drop_extent_cache(reloc_inode, start, end, 0); - } - unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - - return relocate_inode_pages(reloc_inode, start, extent_key->offset); -} - -struct btrfs_ref_path { - u64 extent_start; - u64 nodes[BTRFS_MAX_LEVEL]; - u64 root_objectid; - u64 root_generation; - u64 owner_objectid; - u32 num_refs; - int lowest_level; - int current_level; - int shared_level; - - struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; - u64 new_nodes[BTRFS_MAX_LEVEL]; -}; - -struct disk_extent { - u64 ram_bytes; - u64 disk_bytenr; - u64 disk_num_bytes; - u64 offset; - u64 num_bytes; - u8 compression; - u8 encryption; - u16 other_encoding; -}; - -static int is_cowonly_root(u64 root_objectid) -{ - if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || - root_objectid == BTRFS_EXTENT_TREE_OBJECTID || - root_objectid == BTRFS_CHUNK_TREE_OBJECTID || - root_objectid == BTRFS_DEV_TREE_OBJECTID || - root_objectid == BTRFS_TREE_LOG_OBJECTID || - root_objectid == BTRFS_CSUM_TREE_OBJECTID) - return 1; - return 0; -} - -static noinline int __next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - int first_time) -{ - struct extent_buffer *leaf; - struct btrfs_path *path; - struct btrfs_extent_ref *ref; - struct btrfs_key key; - struct btrfs_key found_key; - u64 bytenr; - u32 nritems; - int level; - int ret = 1; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - if (first_time) { - ref_path->lowest_level = -1; - ref_path->current_level = -1; - ref_path->shared_level = -1; - goto walk_up; - } -walk_down: - level = ref_path->current_level - 1; - while (level >= -1) { - u64 parent; - if (level < ref_path->lowest_level) - break; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - BUG_ON(bytenr == 0); - - parent = ref_path->nodes[level + 1]; - ref_path->nodes[level + 1] = 0; - ref_path->current_level = level; - BUG_ON(parent == 0); - - key.objectid = bytenr; - key.offset = parent + 1; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - BUG_ON(ret == 0); - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) - goto next; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid == bytenr && - found_key.type == BTRFS_EXTENT_REF_KEY) { - if (level < ref_path->shared_level) - ref_path->shared_level = level; - goto found; - } -next: - level--; - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached lowest level */ - ret = 1; - goto out; -walk_up: - level = ref_path->current_level; - while (level < BTRFS_MAX_LEVEL - 1) { - u64 ref_objectid; - - if (level >= 0) - bytenr = ref_path->nodes[level]; - else - bytenr = ref_path->extent_start; - - BUG_ON(bytenr == 0); - - key.objectid = bytenr; - key.offset = 0; - key.type = BTRFS_EXTENT_REF_KEY; - - ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(extent_root, path); - if (ret < 0) - goto out; - if (ret > 0) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) - goto out; - btrfs_release_path(extent_root, path); - goto walk_down; - } - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != bytenr || - found_key.type != BTRFS_EXTENT_REF_KEY) { - /* the extent was freed by someone */ - if (ref_path->lowest_level == level) { - ret = 1; - goto out; - } - btrfs_release_path(extent_root, path); - goto walk_down; - } -found: - ref = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_extent_ref); - ref_objectid = btrfs_ref_objectid(leaf, ref); - if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { - if (first_time) { - level = (int)ref_objectid; - BUG_ON(level >= BTRFS_MAX_LEVEL); - ref_path->lowest_level = level; - ref_path->current_level = level; - ref_path->nodes[level] = bytenr; - } else { - WARN_ON(ref_objectid != level); - } - } else { - WARN_ON(level != -1); - } - first_time = 0; - - if (ref_path->lowest_level == level) { - ref_path->owner_objectid = ref_objectid; - ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); - } - - /* - * the block is tree root or the block isn't in reference - * counted tree. - */ - if (found_key.objectid == found_key.offset || - is_cowonly_root(btrfs_ref_root(leaf, ref))) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - if (level < 0) { - /* special reference from the tree log */ - ref_path->nodes[0] = found_key.offset; - ref_path->current_level = 0; - } - ret = 0; - goto out; - } - - level++; - BUG_ON(ref_path->nodes[level] != 0); - ref_path->nodes[level] = found_key.offset; - ref_path->current_level = level; - - /* - * the reference was created in the running transaction, - * no need to continue walking up. - */ - if (btrfs_ref_generation(leaf, ref) == trans->transid) { - ref_path->root_objectid = btrfs_ref_root(leaf, ref); - ref_path->root_generation = - btrfs_ref_generation(leaf, ref); - ret = 0; - goto out; - } - - btrfs_release_path(extent_root, path); - cond_resched(); - } - /* reached max tree level, but no tree root found. */ - BUG(); -out: - btrfs_free_path(path); - return ret; -} - -static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path, - u64 extent_start) -{ - memset(ref_path, 0, sizeof(*ref_path)); - ref_path->extent_start = extent_start; - - return __next_ref_path(trans, extent_root, ref_path, 1); -} - -static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_ref_path *ref_path) -{ - return __next_ref_path(trans, extent_root, ref_path, 0); -} - -static noinline int get_new_locations(struct inode *reloc_inode, - struct btrfs_key *extent_key, - u64 offset, int no_fragment, - struct disk_extent **extents, - int *nr_extents) -{ - struct btrfs_root *root = BTRFS_I(reloc_inode)->root; - struct btrfs_path *path; - struct btrfs_file_extent_item *fi; - struct extent_buffer *leaf; - struct disk_extent *exts = *extents; - struct btrfs_key found_key; - u64 cur_pos; - u64 last_byte; - u32 nritems; - int nr = 0; - int max = *nr_extents; - int ret; - - WARN_ON(!no_fragment && *extents); - if (!exts) { - max = 1; - exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) - return -ENOMEM; - } - - path = btrfs_alloc_path(); - if (!path) { - if (exts != *extents) - kfree(exts); - return -ENOMEM; - } - - cur_pos = extent_key->objectid - offset; - last_byte = extent_key->objectid + extent_key->offset; - ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, - cur_pos, 0); - if (ret < 0) - goto out; - if (ret > 0) { - ret = -ENOENT; - goto out; - } - - while (1) { - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - } - - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.offset != cur_pos || - found_key.type != BTRFS_EXTENT_DATA_KEY || - found_key.objectid != reloc_inode->i_ino) - break; - - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) != - BTRFS_FILE_EXTENT_REG || - btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - break; - - if (nr == max) { - struct disk_extent *old = exts; - max *= 2; - exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); - if (!exts) { - ret = -ENOMEM; - goto out; - } - memcpy(exts, old, sizeof(*exts) * nr); - if (old != *extents) - kfree(old); - } - - exts[nr].disk_bytenr = - btrfs_file_extent_disk_bytenr(leaf, fi); - exts[nr].disk_num_bytes = - btrfs_file_extent_disk_num_bytes(leaf, fi); - exts[nr].offset = btrfs_file_extent_offset(leaf, fi); - exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); - exts[nr].compression = btrfs_file_extent_compression(leaf, fi); - exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); - exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, - fi); - BUG_ON(exts[nr].offset > 0); - BUG_ON(exts[nr].compression || exts[nr].encryption); - BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); - - cur_pos += exts[nr].num_bytes; - nr++; - - if (cur_pos + offset >= last_byte) - break; - - if (no_fragment) { - ret = 1; - goto out; - } - path->slots[0]++; - } - - BUG_ON(cur_pos + offset > last_byte); - if (cur_pos + offset < last_byte) { - ret = -ENOENT; - goto out; - } - ret = 0; -out: - btrfs_free_path(path); - if (ret) { - if (exts != *extents) - kfree(exts); - } else { - *extents = exts; - *nr_extents = nr; - } - return ret; -} - -static noinline int replace_one_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_key *leaf_key, - struct btrfs_ref_path *ref_path, - struct disk_extent *new_extents, - int nr_extents) -{ - struct extent_buffer *leaf; - struct btrfs_file_extent_item *fi; - struct inode *inode = NULL; - struct btrfs_key key; - u64 lock_start = 0; - u64 lock_end = 0; - u64 num_bytes; - u64 ext_offset; - u64 search_end = (u64)-1; - u32 nritems; - int nr_scaned = 0; - int extent_locked = 0; - int extent_type; - int ret; - - memcpy(&key, leaf_key, sizeof(key)); - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if (key.objectid < ref_path->owner_objectid || - (key.objectid == ref_path->owner_objectid && - key.type < BTRFS_EXTENT_DATA_KEY)) { - key.objectid = ref_path->owner_objectid; - key.type = BTRFS_EXTENT_DATA_KEY; - key.offset = 0; - } - } - - while (1) { - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) - goto out; - - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); -next: - if (extent_locked && ret > 0) { - /* - * the file extent item was modified by someone - * before the extent got locked. - */ - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } - - if (path->slots[0] >= nritems) { - if (++nr_scaned > 2) - break; - - BUG_ON(extent_locked); - ret = btrfs_next_leaf(root, path); - if (ret < 0) - goto out; - if (ret > 0) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - } - - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { - if ((key.objectid > ref_path->owner_objectid) || - (key.objectid == ref_path->owner_objectid && - key.type > BTRFS_EXTENT_DATA_KEY) || - key.offset >= search_end) - break; - } - - if (inode && key.objectid != inode->i_ino) { - BUG_ON(extent_locked); - btrfs_release_path(root, path); - mutex_unlock(&inode->i_mutex); - iput(inode); - inode = NULL; - continue; - } - - if (key.type != BTRFS_EXTENT_DATA_KEY) { - path->slots[0]++; - ret = 1; - goto next; - } - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - extent_type = btrfs_file_extent_type(leaf, fi); - if ((extent_type != BTRFS_FILE_EXTENT_REG && - extent_type != BTRFS_FILE_EXTENT_PREALLOC) || - (btrfs_file_extent_disk_bytenr(leaf, fi) != - extent_key->objectid)) { - path->slots[0]++; - ret = 1; - goto next; - } - - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - ext_offset = btrfs_file_extent_offset(leaf, fi); - - if (search_end == (u64)-1) { - search_end = key.offset - ext_offset + - btrfs_file_extent_ram_bytes(leaf, fi); - } - - if (!extent_locked) { - lock_start = key.offset; - lock_end = lock_start + num_bytes - 1; - } else { - if (lock_start > key.offset || - lock_end + 1 < key.offset + num_bytes) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - extent_locked = 0; - } - } - - if (!inode) { - btrfs_release_path(root, path); - - inode = btrfs_iget_locked(root->fs_info->sb, - key.objectid, root); - if (inode->i_state & I_NEW) { - BTRFS_I(inode)->root = root; - BTRFS_I(inode)->location.objectid = - key.objectid; - BTRFS_I(inode)->location.type = - BTRFS_INODE_ITEM_KEY; - BTRFS_I(inode)->location.offset = 0; - btrfs_read_locked_inode(inode); - unlock_new_inode(inode); - } - /* - * some code call btrfs_commit_transaction while - * holding the i_mutex, so we can't use mutex_lock - * here. - */ - if (is_bad_inode(inode) || - !mutex_trylock(&inode->i_mutex)) { - iput(inode); - inode = NULL; - key.offset = (u64)-1; - goto skip; - } - } - - if (!extent_locked) { - struct btrfs_ordered_extent *ordered; - - btrfs_release_path(root, path); - - lock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, - lock_end); - if (ordered && - ordered->file_offset <= lock_end && - ordered->file_offset + ordered->len > lock_start) { - unlock_extent(&BTRFS_I(inode)->io_tree, - lock_start, lock_end, GFP_NOFS); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - key.offset += num_bytes; - goto skip; - } - if (ordered) - btrfs_put_ordered_extent(ordered); - - extent_locked = 1; - continue; - } - - if (nr_extents == 1) { - /* update extent pointer in place */ - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[0].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[0].disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[0].disk_bytenr, - new_extents[0].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, - key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - extent_key->objectid, - extent_key->offset, - leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - - btrfs_release_path(root, path); - key.offset += num_bytes; - } else { - BUG_ON(1); -#if 0 - u64 alloc_hint; - u64 extent_len; - int i; - /* - * drop old extent pointer at first, then insert the - * new pointers one bye one - */ - btrfs_release_path(root, path); - ret = btrfs_drop_extents(trans, root, inode, key.offset, - key.offset + num_bytes, - key.offset, &alloc_hint); - BUG_ON(ret); - - for (i = 0; i < nr_extents; i++) { - if (ext_offset >= new_extents[i].num_bytes) { - ext_offset -= new_extents[i].num_bytes; - continue; - } - extent_len = min(new_extents[i].num_bytes - - ext_offset, num_bytes); - - ret = btrfs_insert_empty_item(trans, root, - path, &key, - sizeof(*fi)); - BUG_ON(ret); - - leaf = path->nodes[0]; - fi = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, fi, - trans->transid); - btrfs_set_file_extent_type(leaf, fi, - BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extents[i].disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extents[i].disk_num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extents[i].ram_bytes); - - btrfs_set_file_extent_compression(leaf, fi, - new_extents[i].compression); - btrfs_set_file_extent_encryption(leaf, fi, - new_extents[i].encryption); - btrfs_set_file_extent_other_encoding(leaf, fi, - new_extents[i].other_encoding); - - btrfs_set_file_extent_num_bytes(leaf, fi, - extent_len); - ext_offset += new_extents[i].offset; - btrfs_set_file_extent_offset(leaf, fi, - ext_offset); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, key.offset, - key.offset + extent_len - 1, 0); - - ret = btrfs_inc_extent_ref(trans, root, - new_extents[i].disk_bytenr, - new_extents[i].disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - btrfs_release_path(root, path); - - inode_add_bytes(inode, extent_len); - - ext_offset = 0; - num_bytes -= extent_len; - key.offset += extent_len; - - if (num_bytes == 0) - break; - } - BUG_ON(i >= nr_extents); -#endif - } - - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - extent_locked = 0; - } -skip: - if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && - key.offset >= search_end) - break; - - cond_resched(); - } - ret = 0; -out: - btrfs_release_path(root, path); - if (inode) { - mutex_unlock(&inode->i_mutex); - if (extent_locked) { - unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, - lock_end, GFP_NOFS); - } - iput(inode); - } - return ret; -} - -int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf, u64 orig_start) -{ - int level; - int ret; - - BUG_ON(btrfs_header_generation(buf) != trans->transid); - BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); - - level = btrfs_header_level(buf); - if (level == 0) { - struct btrfs_leaf_ref *ref; - struct btrfs_leaf_ref *orig_ref; - - orig_ref = btrfs_lookup_leaf_ref(root, orig_start); - if (!orig_ref) - return -ENOENT; - - ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); - if (!ref) { - btrfs_free_leaf_ref(root, orig_ref); - return -ENOMEM; - } - - ref->nritems = orig_ref->nritems; - memcpy(ref->extents, orig_ref->extents, - sizeof(ref->extents[0]) * ref->nritems); - - btrfs_free_leaf_ref(root, orig_ref); - - ref->root_gen = trans->transid; - ref->bytenr = buf->start; - ref->owner = btrfs_header_owner(buf); - ref->generation = btrfs_header_generation(buf); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } - return 0; -} - -static noinline int invalidate_extent_cache(struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct btrfs_root *target_root) -{ - struct btrfs_key key; - struct inode *inode = NULL; - struct btrfs_file_extent_item *fi; - struct extent_state *cached_state = NULL; - u64 num_bytes; - u64 skip_objectid = 0; - u32 nritems; - u32 i; - - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (key.objectid == skip_objectid || - key.type != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) - continue; - if (!inode || inode->i_ino != key.objectid) { - iput(inode); - inode = btrfs_ilookup(target_root->fs_info->sb, - key.objectid, target_root, 1); - } - if (!inode) { - skip_objectid = key.objectid; - continue; - } - num_bytes = btrfs_file_extent_num_bytes(leaf, fi); - - lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, 0, &cached_state, - GFP_NOFS); - btrfs_drop_extent_cache(inode, key.offset, - key.offset + num_bytes - 1, 1); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, - key.offset + num_bytes - 1, &cached_state, - GFP_NOFS); - cond_resched(); - } - iput(inode); - return 0; -} - -static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *leaf, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_key key; - struct btrfs_key extent_key; - struct btrfs_file_extent_item *fi; - struct btrfs_leaf_ref *ref; - struct disk_extent *new_extent; - u64 bytenr; - u64 num_bytes; - u32 nritems; - u32 i; - int ext_index; - int nr_extent; - int ret; - - new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); - if (!new_extent) - return -ENOMEM; - - ref = btrfs_lookup_leaf_ref(root, leaf->start); - BUG_ON(!ref); - - ext_index = -1; - nritems = btrfs_header_nritems(leaf); - for (i = 0; i < nritems; i++) { - btrfs_item_key_to_cpu(leaf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); - if (btrfs_file_extent_type(leaf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); - num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); - if (bytenr == 0) - continue; - - ext_index++; - if (bytenr >= group->key.objectid + group->key.offset || - bytenr + num_bytes <= group->key.objectid) - continue; - - extent_key.objectid = bytenr; - extent_key.offset = num_bytes; - extent_key.type = BTRFS_EXTENT_ITEM_KEY; - nr_extent = 1; - ret = get_new_locations(reloc_inode, &extent_key, - group->key.objectid, 1, - &new_extent, &nr_extent); - if (ret > 0) - continue; - BUG_ON(ret < 0); - - BUG_ON(ref->extents[ext_index].bytenr != bytenr); - BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); - ref->extents[ext_index].bytenr = new_extent->disk_bytenr; - ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; - - btrfs_set_file_extent_disk_bytenr(leaf, fi, - new_extent->disk_bytenr); - btrfs_set_file_extent_disk_num_bytes(leaf, fi, - new_extent->disk_num_bytes); - btrfs_mark_buffer_dirty(leaf); - - ret = btrfs_inc_extent_ref(trans, root, - new_extent->disk_bytenr, - new_extent->disk_num_bytes, - leaf->start, - root->root_key.objectid, - trans->transid, key.objectid); - BUG_ON(ret); - - ret = btrfs_free_extent(trans, root, - bytenr, num_bytes, leaf->start, - btrfs_header_owner(leaf), - btrfs_header_generation(leaf), - key.objectid, 0); - BUG_ON(ret); - cond_resched(); - } - kfree(new_extent); - BUG_ON(ext_index + 1 != ref->nritems); - btrfs_free_leaf_ref(root, ref); - return 0; -} - -int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - int ret; - - if (root->reloc_root) { - reloc_root = root->reloc_root; - root->reloc_root = NULL; - list_add(&reloc_root->dead_list, - &root->fs_info->dead_reloc_roots); - - btrfs_set_root_bytenr(&reloc_root->root_item, - reloc_root->node->start); - btrfs_set_root_level(&root->root_item, - btrfs_header_level(reloc_root->node)); - memset(&reloc_root->root_item.drop_progress, 0, - sizeof(struct btrfs_disk_key)); - reloc_root->root_item.drop_level = 0; - - ret = btrfs_update_root(trans, root->fs_info->tree_root, - &reloc_root->root_key, - &reloc_root->root_item); - BUG_ON(ret); - } - return 0; -} - -int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *reloc_root; - struct btrfs_root *prev_root = NULL; - struct list_head dead_roots; - int ret; - unsigned long nr; - - INIT_LIST_HEAD(&dead_roots); - list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); - - while (!list_empty(&dead_roots)) { - reloc_root = list_entry(dead_roots.prev, - struct btrfs_root, dead_list); - list_del_init(&reloc_root->dead_list); - - BUG_ON(reloc_root->commit_root != NULL); - while (1) { - trans = btrfs_join_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - - mutex_lock(&root->fs_info->drop_mutex); - ret = btrfs_drop_snapshot(trans, reloc_root); - if (ret != -EAGAIN) - break; - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - } - - free_extent_buffer(reloc_root->node); - - ret = btrfs_del_root(trans, root->fs_info->tree_root, - &reloc_root->root_key); - BUG_ON(ret); - mutex_unlock(&root->fs_info->drop_mutex); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, root); - BUG_ON(ret); - btrfs_btree_balance_dirty(root, nr); - - kfree(prev_root); - prev_root = reloc_root; - } - if (prev_root) { - btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); - kfree(prev_root); - } - return 0; -} - -int btrfs_add_dead_reloc_root(struct btrfs_root *root) -{ - list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); - return 0; -} - -int btrfs_cleanup_reloc_trees(struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct btrfs_trans_handle *trans; - struct btrfs_key location; - int found; - int ret; - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); - BUG_ON(ret); - found = !list_empty(&root->fs_info->dead_reloc_roots); - mutex_unlock(&root->fs_info->tree_reloc_mutex); - - if (found) { - trans = btrfs_start_transaction(root, 1); - BUG_ON(IS_ERR(trans)); - ret = btrfs_commit_transaction(trans, root); - BUG_ON(ret); - } - - location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; - location.offset = (u64)-1; - location.type = BTRFS_ROOT_ITEM_KEY; - - reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); - BUG_ON(!reloc_root); - ret = btrfs_orphan_cleanup(reloc_root); - BUG_ON(ret); - return 0; -} - -static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb; - struct btrfs_root_item *root_item; - struct btrfs_key root_key; - int ret; - - BUG_ON(!root->ref_cows); - if (root->reloc_root) - return 0; - - root_item = kmalloc(sizeof(*root_item), GFP_NOFS); - if (!root_item) - return -ENOMEM; - - ret = btrfs_copy_root(trans, root, root->commit_root, - &eb, BTRFS_TREE_RELOC_OBJECTID); - BUG_ON(ret); - - root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; - root_key.offset = root->root_key.objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - - memcpy(root_item, &root->root_item, sizeof(root_item)); - btrfs_set_root_refs(root_item, 0); - btrfs_set_root_bytenr(root_item, eb->start); - btrfs_set_root_level(root_item, btrfs_header_level(eb)); - btrfs_set_root_generation(root_item, trans->transid); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - - ret = btrfs_insert_root(trans, root->fs_info->tree_root, - &root_key, root_item); - BUG_ON(ret); - kfree(root_item); - - reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, - &root_key); - BUG_ON(IS_ERR(reloc_root)); - reloc_root->last_trans = trans->transid; - reloc_root->commit_root = NULL; - reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; - - root->reloc_root = reloc_root; - return 0; -} - -/* - * Core function of space balance. - * - * The idea is using reloc trees to relocate tree blocks in reference - * counted roots. There is one reloc tree for each subvol, and all - * reloc trees share same root key objectid. Reloc trees are snapshots - * of the latest committed roots of subvols (root->commit_root). - * - * To relocate a tree block referenced by a subvol, there are two steps. - * COW the block through subvol's reloc tree, then update block pointer - * in the subvol to point to the new block. Since all reloc trees share - * same root key objectid, doing special handing for tree blocks owned - * by them is easy. Once a tree block has been COWed in one reloc tree, - * we can use the resulting new block directly when the same block is - * required to COW again through other reloc trees. By this way, relocated - * tree blocks are shared between reloc trees, so they are also shared - * between subvols. - */ -static noinline int relocate_one_path(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode) -{ - struct btrfs_root *reloc_root; - struct extent_buffer *eb = NULL; - struct btrfs_key *keys; - u64 *nodes; - int level; - int shared_level; - int lowest_level = 0; - int ret; - - if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) - lowest_level = ref_path->owner_objectid; - - if (!root->ref_cows) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); - BUG_ON(ret < 0); - path->lowest_level = 0; - btrfs_release_path(root, path); - return 0; - } - - mutex_lock(&root->fs_info->tree_reloc_mutex); - ret = init_reloc_tree(trans, root); - BUG_ON(ret); - reloc_root = root->reloc_root; - - shared_level = ref_path->shared_level; - ref_path->shared_level = BTRFS_MAX_LEVEL - 1; - - keys = ref_path->node_keys; - nodes = ref_path->new_nodes; - memset(&keys[shared_level + 1], 0, - sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); - memset(&nodes[shared_level + 1], 0, - sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); - - if (nodes[lowest_level] == 0) { - path->lowest_level = lowest_level; - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 1); - BUG_ON(ret); - for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { - eb = path->nodes[level]; - if (!eb || eb == reloc_root->node) - break; - nodes[level] = eb->start; - if (level == 0) - btrfs_item_key_to_cpu(eb, &keys[level], 0); - else - btrfs_node_key_to_cpu(eb, &keys[level], 0); - } - if (nodes[0] && - ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - eb = path->nodes[0]; - ret = replace_extents_in_leaf(trans, reloc_root, eb, - group, reloc_inode); - BUG_ON(ret); - } - btrfs_release_path(reloc_root, path); - } else { - ret = btrfs_merge_path(trans, reloc_root, keys, nodes, - lowest_level); - BUG_ON(ret); - } - - /* - * replace tree blocks in the fs tree with tree blocks in - * the reloc tree. - */ - ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); - BUG_ON(ret < 0); - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - ret = btrfs_search_slot(trans, reloc_root, first_key, path, - 0, 0); - BUG_ON(ret); - extent_buffer_get(path->nodes[0]); - eb = path->nodes[0]; - btrfs_release_path(reloc_root, path); - ret = invalidate_extent_cache(reloc_root, eb, group, root); - BUG_ON(ret); - free_extent_buffer(eb); - } - - mutex_unlock(&root->fs_info->tree_reloc_mutex); - path->lowest_level = 0; - return 0; -} - -static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct btrfs_key *first_key, - struct btrfs_ref_path *ref_path) -{ - int ret; - - ret = relocate_one_path(trans, root, path, first_key, - ref_path, NULL, NULL); - BUG_ON(ret); - - return 0; -} - -static noinline int del_extent_zero(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key) -{ - int ret; - - ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); - if (ret) - goto out; - ret = btrfs_del_item(trans, extent_root, path); -out: - btrfs_release_path(extent_root, path); - return ret; -} - -static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, - struct btrfs_ref_path *ref_path) -{ - struct btrfs_key root_key; - - root_key.objectid = ref_path->root_objectid; - root_key.type = BTRFS_ROOT_ITEM_KEY; - if (is_cowonly_root(ref_path->root_objectid)) - root_key.offset = 0; - else - root_key.offset = (u64)-1; - - return btrfs_read_fs_root_no_name(fs_info, &root_key); -} - -static noinline int relocate_one_extent(struct btrfs_root *extent_root, - struct btrfs_path *path, - struct btrfs_key *extent_key, - struct btrfs_block_group_cache *group, - struct inode *reloc_inode, int pass) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *found_root; - struct btrfs_ref_path *ref_path = NULL; - struct disk_extent *new_extents = NULL; - int nr_extents = 0; - int loops; - int ret; - int level; - struct btrfs_key first_key; - u64 prev_block = 0; - - - trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(IS_ERR(trans)); - - if (extent_key->objectid == 0) { - ret = del_extent_zero(trans, extent_root, path, extent_key); - goto out; - } - - ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); - if (!ref_path) { - ret = -ENOMEM; - goto out; - } - - for (loops = 0; ; loops++) { - if (loops == 0) { - ret = btrfs_first_ref_path(trans, extent_root, ref_path, - extent_key->objectid); - } else { - ret = btrfs_next_ref_path(trans, extent_root, ref_path); - } - if (ret < 0) - goto out; - if (ret > 0) - break; - - if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || - ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) - continue; - - found_root = read_ref_root(extent_root->fs_info, ref_path); - BUG_ON(!found_root); - /* - * for reference counted tree, only process reference paths - * rooted at the latest committed root. - */ - if (found_root->ref_cows && - ref_path->root_generation != found_root->root_key.offset) - continue; - - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - if (pass == 0) { - /* - * copy data extents to new locations - */ - u64 group_start = group->key.objectid; - ret = relocate_data_extent(reloc_inode, - extent_key, - group_start); - if (ret < 0) - goto out; - break; - } - level = 0; - } else { - level = ref_path->owner_objectid; - } - - if (prev_block != ref_path->nodes[level]) { - struct extent_buffer *eb; - u64 block_start = ref_path->nodes[level]; - u64 block_size = btrfs_level_size(found_root, level); - - eb = read_tree_block(found_root, block_start, - block_size, 0); - if (!eb) { - ret = -EIO; - goto out; - } - btrfs_tree_lock(eb); - BUG_ON(level != btrfs_header_level(eb)); - - if (level == 0) - btrfs_item_key_to_cpu(eb, &first_key, 0); - else - btrfs_node_key_to_cpu(eb, &first_key, 0); - - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - prev_block = block_start; - } - - mutex_lock(&extent_root->fs_info->trans_mutex); - btrfs_record_root_in_trans(found_root); - mutex_unlock(&extent_root->fs_info->trans_mutex); - if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { - /* - * try to update data extent references while - * keeping metadata shared between snapshots. - */ - if (pass == 1) { - ret = relocate_one_path(trans, found_root, - path, &first_key, ref_path, - group, reloc_inode); - if (ret < 0) - goto out; - continue; - } - /* - * use fallback method to process the remaining - * references. - */ - if (!new_extents) { - u64 group_start = group->key.objectid; - new_extents = kmalloc(sizeof(*new_extents), - GFP_NOFS); - if (!new_extents) { - ret = -ENOMEM; - goto out; - } - nr_extents = 1; - ret = get_new_locations(reloc_inode, - extent_key, - group_start, 1, - &new_extents, - &nr_extents); - if (ret) - goto out; - } - ret = replace_one_extent(trans, found_root, - path, extent_key, - &first_key, ref_path, - new_extents, nr_extents); - } else { - ret = relocate_tree_block(trans, found_root, path, - &first_key, ref_path); - } - if (ret < 0) - goto out; - } - ret = 0; -out: - btrfs_end_transaction(trans, extent_root); - kfree(new_extents); - kfree(ref_path); - return ret; -} -#endif - static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) { u64 num_devices; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 57122a5e847..5ff52b644a6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3093,178 +3093,6 @@ out: return err; } -#if 0 -/* - * when truncating bytes in a file, it is possible to avoid reading - * the leaves that contain only checksum items. This can be the - * majority of the IO required to delete a large file, but it must - * be done carefully. - * - * The keys in the level just above the leaves are checked to make sure - * the lowest key in a given leaf is a csum key, and starts at an offset - * after the new size. - * - * Then the key for the next leaf is checked to make sure it also has - * a checksum item for the same file. If it does, we know our target leaf - * contains only checksum items, and it can be safely freed without reading - * it. - * - * This is just an optimization targeted at large files. It may do - * nothing. It will return 0 unless things went badly. - */ -static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, - struct inode *inode, u64 new_size) -{ - struct btrfs_key key; - int ret; - int nritems; - struct btrfs_key found_key; - struct btrfs_key other_key; - struct btrfs_leaf_ref *ref; - u64 leaf_gen; - u64 leaf_start; - - path->lowest_level = 1; - key.objectid = inode->i_ino; - key.type = BTRFS_CSUM_ITEM_KEY; - key.offset = new_size; -again: - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) - goto out; - - if (path->nodes[1] == NULL) { - ret = 0; - goto out; - } - ret = 0; - btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); - nritems = btrfs_header_nritems(path->nodes[1]); - - if (!nritems) - goto out; - - if (path->slots[1] >= nritems) - goto next_node; - - /* did we find a key greater than anything we want to delete? */ - if (found_key.objectid > inode->i_ino || - (found_key.objectid == inode->i_ino && found_key.type > key.type)) - goto out; - - /* we check the next key in the node to make sure the leave contains - * only checksum items. This comparison doesn't work if our - * leaf is the last one in the node - */ - if (path->slots[1] + 1 >= nritems) { -next_node: - /* search forward from the last key in the node, this - * will bring us into the next node in the tree - */ - btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); - - /* unlikely, but we inc below, so check to be safe */ - if (found_key.offset == (u64)-1) - goto out; - - /* search_forward needs a path with locks held, do the - * search again for the original key. It is possible - * this will race with a balance and return a path that - * we could modify, but this drop is just an optimization - * and is allowed to miss some leaves. - */ - btrfs_release_path(root, path); - found_key.offset++; - - /* setup a max key for search_forward */ - other_key.offset = (u64)-1; - other_key.type = key.type; - other_key.objectid = key.objectid; - - path->keep_locks = 1; - ret = btrfs_search_forward(root, &found_key, &other_key, - path, 0, 0); - path->keep_locks = 0; - if (ret || found_key.objectid != key.objectid || - found_key.type != key.type) { - ret = 0; - goto out; - } - - key.offset = found_key.offset; - btrfs_release_path(root, path); - cond_resched(); - goto again; - } - - /* we know there's one more slot after us in the tree, - * read that key so we can verify it is also a checksum item - */ - btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); - - if (found_key.objectid < inode->i_ino) - goto next_key; - - if (found_key.type != key.type || found_key.offset < new_size) - goto next_key; - - /* - * if the key for the next leaf isn't a csum key from this objectid, - * we can't be sure there aren't good items inside this leaf. - * Bail out - */ - if (other_key.objectid != inode->i_ino || other_key.type != key.type) - goto out; - - leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); - leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); - /* - * it is safe to delete this leaf, it contains only - * csum items from this inode at an offset >= new_size - */ - ret = btrfs_del_leaf(trans, root, path, leaf_start); - BUG_ON(ret); - - if (root->ref_cows && leaf_gen < trans->transid) { - ref = btrfs_alloc_leaf_ref(root, 0); - if (ref) { - ref->root_gen = root->root_key.offset; - ref->bytenr = leaf_start; - ref->owner = 0; - ref->generation = leaf_gen; - ref->nritems = 0; - - btrfs_sort_leaf_ref(ref); - - ret = btrfs_add_leaf_ref(root, ref, 0); - WARN_ON(ret); - btrfs_free_leaf_ref(root, ref); - } else { - WARN_ON(1); - } - } -next_key: - btrfs_release_path(root, path); - - if (other_key.objectid == inode->i_ino && - other_key.type == key.type && other_key.offset > key.offset) { - key.offset = other_key.offset; - cond_resched(); - goto again; - } - ret = 0; -out: - /* fixup any changes we've made to the path */ - path->lowest_level = 0; - path->keep_locks = 0; - btrfs_release_path(root, path); - return ret; -} - -#endif - /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 955f76eb0fa..211aceeb9ea 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -346,49 +346,6 @@ out_unlock: return ret; } -#if 0 -/* - * rate limit against the drop_snapshot code. This helps to slow down new - * operations if the drop_snapshot code isn't able to keep up. - */ -static void throttle_on_drops(struct btrfs_root *root) -{ - struct btrfs_fs_info *info = root->fs_info; - int harder_count = 0; - -harder: - if (atomic_read(&info->throttles)) { - DEFINE_WAIT(wait); - int thr; - thr = atomic_read(&info->throttle_gen); - - do { - prepare_to_wait(&info->transaction_throttle, - &wait, TASK_UNINTERRUPTIBLE); - if (!atomic_read(&info->throttles)) { - finish_wait(&info->transaction_throttle, &wait); - break; - } - schedule(); - finish_wait(&info->transaction_throttle, &wait); - } while (thr == atomic_read(&info->throttle_gen)); - harder_count++; - - if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && - harder_count < 2) - goto harder; - - if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && - harder_count < 10) - goto harder; - - if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && - harder_count < 20) - goto harder; - } -} -#endif - void btrfs_throttle(struct btrfs_root *root) { mutex_lock(&root->fs_info->trans_mutex); @@ -808,97 +765,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) return ret; } -#if 0 -/* - * when dropping snapshots, we generate a ton of delayed refs, and it makes - * sense not to join the transaction while it is trying to flush the current - * queue of delayed refs out. - * - * This is used by the drop snapshot code only - */ -static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) -{ - DEFINE_WAIT(wait); - - mutex_lock(&info->trans_mutex); - while (info->running_transaction && - info->running_transaction->delayed_refs.flushing) { - prepare_to_wait(&info->transaction_wait, &wait, - TASK_UNINTERRUPTIBLE); - mutex_unlock(&info->trans_mutex); - - schedule(); - - mutex_lock(&info->trans_mutex); - finish_wait(&info->transaction_wait, &wait); - } - mutex_unlock(&info->trans_mutex); - return 0; -} - -/* - * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on - * all of them - */ -int btrfs_drop_dead_root(struct btrfs_root *root) -{ - struct btrfs_trans_handle *trans; - struct btrfs_root *tree_root = root->fs_info->tree_root; - unsigned long nr; - int ret; - - while (1) { - /* - * we don't want to jump in and create a bunch of - * delayed refs if the transaction is starting to close - */ - wait_transaction_pre_flush(tree_root->fs_info); - trans = btrfs_start_transaction(tree_root, 1); - - /* - * we've joined a transaction, make sure it isn't - * closing right now - */ - if (trans->transaction->delayed_refs.flushing) { - btrfs_end_transaction(trans, tree_root); - continue; - } - - ret = btrfs_drop_snapshot(trans, root); - if (ret != -EAGAIN) - break; - - ret = btrfs_update_root(trans, tree_root, - &root->root_key, - &root->root_item); - if (ret) - break; - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - btrfs_btree_balance_dirty(tree_root, nr); - cond_resched(); - } - BUG_ON(ret); - - ret = btrfs_del_root(trans, tree_root, &root->root_key); - BUG_ON(ret); - - nr = trans->blocks_used; - ret = btrfs_end_transaction(trans, tree_root); - BUG_ON(ret); - - free_extent_buffer(root->node); - free_extent_buffer(root->commit_root); - kfree(root); - - btrfs_btree_balance_dirty(tree_root, nr); - return ret; -} -#endif - /* * new snapshots need to be created at a very specific time in the * transaction commit. This does the actual creation -- cgit v1.2.3-70-g09d2 From a2de733c78fa7af51ba9670482fa7d392aa67c57 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Tue, 8 Mar 2011 14:14:00 +0100 Subject: btrfs: scrub This adds an initial implementation for scrub. It works quite straightforward. The usermode issues an ioctl for each device in the fs. For each device, it enumerates the allocated device chunks. For each chunk, the contained extents are enumerated and the data checksums fetched. The extents are read sequentially and the checksums verified. If an error occurs (checksum or EIO), a good copy is searched for. If one is found, the bad copy will be rewritten. All enumerations happen from the commit roots. During a transaction commit, the scrubs get paused and afterwards continue from the new roots. This commit is based on the series originally posted to linux-btrfs with some improvements that resulted from comments from David Sterba, Ilya Dryomov and Jan Schmidt. Signed-off-by: Arne Jansen --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.h | 37 +- fs/btrfs/disk-io.c | 12 + fs/btrfs/file-item.c | 8 +- fs/btrfs/inode.c | 2 +- fs/btrfs/ioctl.h | 37 ++ fs/btrfs/relocation.c | 2 +- fs/btrfs/scrub.c | 1492 ++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/transaction.c | 3 + fs/btrfs/tree-log.c | 6 +- fs/btrfs/volumes.c | 4 +- fs/btrfs/volumes.h | 6 + 12 files changed, 1600 insertions(+), 11 deletions(-) create mode 100644 fs/btrfs/scrub.c (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73ae..8fda3133c1b 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o scrub.o diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2e61fe1b6b8..31141ba6072 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -33,6 +34,7 @@ #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" +#include "ioctl.h" struct btrfs_trans_handle; struct btrfs_transaction; @@ -510,6 +512,12 @@ struct btrfs_extent_item_v0 { /* use full backrefs for extent pointers in the block */ #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) +/* + * this flag is only used internally by scrub and may be changed at any time + * it is only declared here to avoid collisions + */ +#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) + struct btrfs_tree_block_info { struct btrfs_disk_key key; u8 level; @@ -1077,6 +1085,17 @@ struct btrfs_fs_info { void *bdev_holder; + /* private scrub information */ + struct mutex scrub_lock; + atomic_t scrubs_running; + atomic_t scrub_pause_req; + atomic_t scrubs_paused; + atomic_t scrub_cancel_req; + wait_queue_head_t scrub_pause_wait; + struct rw_semaphore scrub_super_lock; + int scrub_workers_refcnt; + struct btrfs_workers scrub_workers; + /* filesystem state */ u64 fs_state; }; @@ -2472,8 +2491,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); -int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, - u64 end, struct list_head *list); +int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, + struct list_head *list, int search_commit); /* inode.c */ /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ @@ -2637,4 +2656,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, u64 *bytes_to_reserve); void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending); + +/* scrub.c */ +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress); +int btrfs_scrub_pause(struct btrfs_root *root); +int btrfs_scrub_pause_super(struct btrfs_root *root); +int btrfs_scrub_continue(struct btrfs_root *root); +int btrfs_scrub_continue_super(struct btrfs_root *root); +int btrfs_scrub_cancel(struct btrfs_root *root); +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress); + #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fe5aec9b392..e48e8095c61 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1773,6 +1773,17 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + mutex_init(&fs_info->scrub_lock); + atomic_set(&fs_info->scrubs_running, 0); + atomic_set(&fs_info->scrub_pause_req, 0); + atomic_set(&fs_info->scrubs_paused, 0); + atomic_set(&fs_info->scrub_cancel_req, 0); + init_waitqueue_head(&fs_info->scrub_pause_wait); + init_rwsem(&fs_info->scrub_super_lock); + fs_info->scrub_workers_refcnt = 0; + btrfs_init_workers(&fs_info->scrub_workers, "scrub", + fs_info->thread_pool_size, &fs_info->generic_worker); + sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); sb->s_bdi = &fs_info->bdi; @@ -2599,6 +2610,7 @@ int close_ctree(struct btrfs_root *root) fs_info->closing = 1; smp_mb(); + btrfs_scrub_cancel(root); btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b49..39ca7c1250e 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -266,7 +266,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, - struct list_head *list) + struct list_head *list, int search_commit) { struct btrfs_key key; struct btrfs_path *path; @@ -283,6 +283,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, path = btrfs_alloc_path(); BUG_ON(!path); + if (search_commit) { + path->skip_locking = 1; + path->reada = 2; + path->search_commit_root = 1; + } + key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = start; key.type = BTRFS_EXTENT_CSUM_KEY; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 870869aab0b..27142446b30 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1007,7 +1007,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root, LIST_HEAD(list); ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, - bytenr + num_bytes - 1, &list); + bytenr + num_bytes - 1, &list, 0); if (ret == 0 && list_empty(&list)) return 0; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 8fb382167b1..37ac030d64b 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -42,6 +42,43 @@ struct btrfs_ioctl_vol_args_v2 { char name[BTRFS_SUBVOL_NAME_MAX + 1]; }; +/* + * structure to report errors and progress to userspace, either as a + * result of a finished scrub, a canceled scrub or a progress inquiry + */ +struct btrfs_scrub_progress { + __u64 data_extents_scrubbed; /* # of data extents scrubbed */ + __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */ + __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */ + __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ + __u64 read_errors; /* # of read errors encountered (EIO) */ + __u64 csum_errors; /* # of failed csum checks */ + __u64 verify_errors; /* # of occurences, where the metadata + * of a tree block did not match the + * expected values, like generation or + * logical */ + __u64 no_csum; /* # of 4k data block for which no csum + * is present, probably the result of + * data written with nodatasum */ + __u64 csum_discards; /* # of csum for which no data was found + * in the extent tree. */ + __u64 super_errors; /* # of bad super blocks encountered */ + __u64 malloc_errors; /* # of internal kmalloc errors. These + * will likely cause an incomplete + * scrub */ + __u64 uncorrectable_errors; /* # of errors where either no intact + * copy was found or the writeback + * failed */ + __u64 corrected_errors; /* # of errors corrected */ + __u64 last_physical; /* last physical address scrubbed. In + * case a scrub was aborted, this can + * be used to restart the scrub */ + __u64 unverified_errors; /* # of occurences where a read for a + * full (64k) bio failed, but the re- + * check succeeded for each 4k piece. + * Intermittent error. */ +}; + #define BTRFS_INO_LOOKUP_PATH_MAX 4080 struct btrfs_ioctl_ino_lookup_args { __u64 treeid; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 58250e09eb0..db1dffa9952 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -4242,7 +4242,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, - disk_bytenr + len - 1, &list); + disk_bytenr + len - 1, &list, 0); while (!list_empty(&list)) { sums = list_entry(list.next, struct btrfs_ordered_sum, list); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c new file mode 100644 index 00000000000..70f9fa772ee --- /dev/null +++ b/fs/btrfs/scrub.c @@ -0,0 +1,1492 @@ +/* + * Copyright (C) 2011 STRATO. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "volumes.h" +#include "disk-io.h" +#include "ordered-data.h" + +/* + * This is only the first step towards a full-features scrub. It reads all + * extent and super block and verifies the checksums. In case a bad checksum + * is found or the extent cannot be read, good data will be written back if + * any can be found. + * + * Future enhancements: + * - To enhance the performance, better read-ahead strategies for the + * extent-tree can be employed. + * - In case an unrepairable extent is encountered, track which files are + * affected and report them + * - In case of a read error on files with nodatasum, map the file and read + * the extent to trigger a writeback of the good copy + * - track and record media errors, throw out bad devices + * - add a readonly mode + * - add a mode to also read unallocated space + * - make the prefetch cancellable + */ + +struct scrub_bio; +struct scrub_page; +struct scrub_dev; +struct scrub_fixup; +static void scrub_bio_end_io(struct bio *bio, int err); +static void scrub_checksum(struct btrfs_work *work); +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer); +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer); +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); +static void scrub_recheck_end_io(struct bio *bio, int err); +static void scrub_fixup_worker(struct btrfs_work *work); +static void scrub_fixup(struct scrub_fixup *fixup); + +#define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ +#define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ + +struct scrub_page { + u64 flags; /* extent flags */ + u64 generation; + u64 mirror_num; + int have_csum; + u8 csum[BTRFS_CSUM_SIZE]; +}; + +struct scrub_bio { + int index; + struct scrub_dev *sdev; + struct bio *bio; + int err; + u64 logical; + u64 physical; + struct scrub_page spag[SCRUB_PAGES_PER_BIO]; + u64 count; + int next_free; + struct btrfs_work work; +}; + +struct scrub_dev { + struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; + struct btrfs_device *dev; + int first_free; + int curr; + atomic_t in_flight; + spinlock_t list_lock; + wait_queue_head_t list_wait; + u16 csum_size; + struct list_head csum_list; + atomic_t cancel_req; + /* + * statistics + */ + struct btrfs_scrub_progress stat; + spinlock_t stat_lock; +}; + +struct scrub_fixup { + struct scrub_dev *sdev; + struct bio *bio; + u64 logical; + u64 physical; + struct scrub_page spag; + struct btrfs_work work; + int err; + int recheck; +}; + +static void scrub_free_csums(struct scrub_dev *sdev) +{ + while (!list_empty(&sdev->csum_list)) { + struct btrfs_ordered_sum *sum; + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + list_del(&sum->list); + kfree(sum); + } +} + +static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) +{ + int i; + int j; + struct page *last_page; + + if (!sdev) + return; + + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct scrub_bio *sbio = sdev->bios[i]; + struct bio *bio; + + if (!sbio) + break; + + bio = sbio->bio; + if (bio) { + last_page = NULL; + for (j = 0; j < bio->bi_vcnt; ++j) { + if (bio->bi_io_vec[j].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[j].bv_page; + __free_page(last_page); + } + bio_put(bio); + } + kfree(sbio); + } + + scrub_free_csums(sdev); + kfree(sdev); +} + +static noinline_for_stack +struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) +{ + struct scrub_dev *sdev; + int i; + int j; + int ret; + struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; + + sdev = kzalloc(sizeof(*sdev), GFP_NOFS); + if (!sdev) + goto nomem; + sdev->dev = dev; + for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { + struct bio *bio; + struct scrub_bio *sbio; + + sbio = kzalloc(sizeof(*sbio), GFP_NOFS); + if (!sbio) + goto nomem; + sdev->bios[i] = sbio; + + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) + goto nomem; + + sbio->index = i; + sbio->sdev = sdev; + sbio->bio = bio; + sbio->count = 0; + sbio->work.func = scrub_checksum; + bio->bi_private = sdev->bios[i]; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = dev->bdev; + bio->bi_size = 0; + + for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { + struct page *page; + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto nomem; + } + WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); + + if (i != SCRUB_BIOS_PER_DEV-1) + sdev->bios[i]->next_free = i + 1; + else + sdev->bios[i]->next_free = -1; + } + sdev->first_free = 0; + sdev->curr = -1; + atomic_set(&sdev->in_flight, 0); + atomic_set(&sdev->cancel_req, 0); + sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); + INIT_LIST_HEAD(&sdev->csum_list); + + spin_lock_init(&sdev->list_lock); + spin_lock_init(&sdev->stat_lock); + init_waitqueue_head(&sdev->list_wait); + return sdev; + +nomem: + scrub_free_dev(sdev); + return ERR_PTR(-ENOMEM); +} + +/* + * scrub_recheck_error gets called when either verification of the page + * failed or the bio failed to read, e.g. with EIO. In the latter case, + * recheck_error gets called for every page in the bio, even though only + * one may be bad + */ +static void scrub_recheck_error(struct scrub_bio *sbio, int ix) +{ + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct bio *bio = NULL; + struct page *page = NULL; + struct scrub_fixup *fixup = NULL; + int ret; + + /* + * while we're in here we do not want the transaction to commit. + * To prevent it, we increment scrubs_running. scrub_pause will + * have to wait until we're finished + * we can safely increment scrubs_running here, because we're + * in the context of the original bio which is still marked in_flight + */ + atomic_inc(&fs_info->scrubs_running); + + fixup = kzalloc(sizeof(*fixup), GFP_NOFS); + if (!fixup) + goto malloc_error; + + fixup->logical = sbio->logical + ix * PAGE_SIZE; + fixup->physical = sbio->physical + ix * PAGE_SIZE; + fixup->spag = sbio->spag[ix]; + fixup->sdev = sdev; + + bio = bio_alloc(GFP_NOFS, 1); + if (!bio) + goto malloc_error; + bio->bi_private = fixup; + bio->bi_size = 0; + bio->bi_bdev = sdev->dev->bdev; + fixup->bio = bio; + fixup->recheck = 0; + + page = alloc_page(GFP_NOFS); + if (!page) + goto malloc_error; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) + goto malloc_error; + + if (!sbio->err) { + /* + * shorter path: just a checksum error, go ahead and correct it + */ + scrub_fixup_worker(&fixup->work); + return; + } + + /* + * an I/O-error occured for one of the blocks in the bio, not + * necessarily for this one, so first try to read it separately + */ + fixup->work.func = scrub_fixup_worker; + fixup->recheck = 1; + bio->bi_end_io = scrub_recheck_end_io; + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + submit_bio(0, bio); + + return; + +malloc_error: + if (bio) + bio_put(bio); + if (page) + __free_page(page); + kfree(fixup); + spin_lock(&sdev->stat_lock); + ++sdev->stat.malloc_errors; + spin_unlock(&sdev->stat_lock); + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); +} + +static void scrub_recheck_end_io(struct bio *bio, int err) +{ + struct scrub_fixup *fixup = bio->bi_private; + struct btrfs_fs_info *fs_info = fixup->sdev->dev->dev_root->fs_info; + + fixup->err = err; + btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work); +} + +static int scrub_fixup_check(struct scrub_fixup *fixup) +{ + int ret = 1; + struct page *page; + void *buffer; + u64 flags = fixup->spag.flags; + + page = fixup->bio->bi_io_vec[0].bv_page; + buffer = kmap_atomic(page, KM_USER0); + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(fixup->sdev, + &fixup->spag, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(fixup->sdev, + &fixup->spag, + fixup->logical, + buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + + return ret; +} + +static void scrub_fixup_worker(struct btrfs_work *work) +{ + struct scrub_fixup *fixup; + struct btrfs_fs_info *fs_info; + u64 flags; + int ret = 1; + + fixup = container_of(work, struct scrub_fixup, work); + fs_info = fixup->sdev->dev->dev_root->fs_info; + flags = fixup->spag.flags; + + if (fixup->recheck && fixup->err == 0) + ret = scrub_fixup_check(fixup); + + if (ret || fixup->err) + scrub_fixup(fixup); + + __free_page(fixup->bio->bi_io_vec[0].bv_page); + bio_put(fixup->bio); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + kfree(fixup); +} + +static void scrub_fixup_end_io(struct bio *bio, int err) +{ + complete((struct completion *)bio->bi_private); +} + +static void scrub_fixup(struct scrub_fixup *fixup) +{ + struct scrub_dev *sdev = fixup->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; + struct btrfs_multi_bio *multi = NULL; + struct bio *bio = fixup->bio; + u64 length; + int i; + int ret; + DECLARE_COMPLETION_ONSTACK(complete); + + if ((fixup->spag.flags & BTRFS_EXTENT_FLAG_DATA) && + (fixup->spag.have_csum == 0)) { + /* + * nodatasum, don't try to fix anything + * FIXME: we can do better, open the inode and trigger a + * writeback + */ + goto uncorrectable; + } + + length = PAGE_SIZE; + ret = btrfs_map_block(map_tree, REQ_WRITE, fixup->logical, &length, + &multi, 0); + if (ret || !multi || length < PAGE_SIZE) { + printk(KERN_ERR + "scrub_fixup: btrfs_map_block failed us for %llu\n", + (unsigned long long)fixup->logical); + WARN_ON(1); + return; + } + + if (multi->num_stripes == 1) { + /* there aren't any replicas */ + goto uncorrectable; + } + + /* + * first find a good copy + */ + for (i = 0; i < multi->num_stripes; ++i) { + if (i == fixup->spag.mirror_num) + continue; + + bio->bi_sector = multi->stripes[i].physical >> 9; + bio->bi_bdev = multi->stripes[i].dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(0, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, this is not a good copy */ + continue; + + ret = scrub_fixup_check(fixup); + if (ret == 0) + break; + } + if (i == multi->num_stripes) + goto uncorrectable; + + /* + * the bio now contains good data, write it back + */ + bio->bi_sector = fixup->physical >> 9; + bio->bi_bdev = sdev->dev->bdev; + bio->bi_size = PAGE_SIZE; + bio->bi_next = NULL; + bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; + bio->bi_end_io = scrub_fixup_end_io; + bio->bi_private = &complete; + + submit_bio(REQ_WRITE, bio); + + wait_for_completion(&complete); + + if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + /* I/O-error, writeback failed, give up */ + goto uncorrectable; + + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.corrected_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: fixed up at %llu\n", + (unsigned long long)fixup->logical); + return; + +uncorrectable: + kfree(multi); + spin_lock(&sdev->stat_lock); + ++sdev->stat.uncorrectable_errors; + spin_unlock(&sdev->stat_lock); + + if (printk_ratelimit()) + printk(KERN_ERR "btrfs: unable to fixup at %llu\n", + (unsigned long long)fixup->logical); +} + +static void scrub_bio_end_io(struct bio *bio, int err) +{ + struct scrub_bio *sbio = bio->bi_private; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + + sbio->err = err; + + btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); +} + +static void scrub_checksum(struct btrfs_work *work) +{ + struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); + struct scrub_dev *sdev = sbio->sdev; + struct page *page; + void *buffer; + int i; + u64 flags; + u64 logical; + int ret; + + if (sbio->err) { + struct bio *bio; + struct bio *old_bio; + + for (i = 0; i < sbio->count; ++i) + scrub_recheck_error(sbio, i); + spin_lock(&sdev->stat_lock); + ++sdev->stat.read_errors; + spin_unlock(&sdev->stat_lock); + + /* + * FIXME: allocate a new bio after a media error. I haven't + * figured out how to reuse this one + */ + old_bio = sbio->bio; + bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); + if (!bio) { + /* + * alloc failed. cancel the scrub and don't requeue + * this sbio + */ + printk(KERN_ERR "btrfs scrub: allocation failure, " + "cancelling scrub\n"); + atomic_inc(&sdev->dev->dev_root->fs_info-> + scrub_cancel_req); + goto out_no_enqueue; + } + sbio->bio = bio; + bio->bi_private = sbio; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_sector = 0; + bio->bi_bdev = sbio->sdev->dev->bdev; + bio->bi_size = 0; + for (i = 0; i < SCRUB_PAGES_PER_BIO; ++i) { + struct page *page; + page = old_bio->bi_io_vec[i].bv_page; + bio_add_page(bio, page, PAGE_SIZE, 0); + } + bio_put(old_bio); + goto out; + } + for (i = 0; i < sbio->count; ++i) { + page = sbio->bio->bi_io_vec[i].bv_page; + buffer = kmap_atomic(page, KM_USER0); + flags = sbio->spag[i].flags; + logical = sbio->logical + i * PAGE_SIZE; + ret = 0; + if (flags & BTRFS_EXTENT_FLAG_DATA) { + ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + ret = scrub_checksum_tree_block(sdev, sbio->spag + i, + logical, buffer); + } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { + BUG_ON(i); + (void)scrub_checksum_super(sbio, buffer); + } else { + WARN_ON(1); + } + kunmap_atomic(buffer, KM_USER0); + if (ret) + scrub_recheck_error(sbio, i); + } + +out: + spin_lock(&sdev->list_lock); + sbio->next_free = sdev->first_free; + sdev->first_free = sbio->index; + spin_unlock(&sdev->list_lock); +out_no_enqueue: + atomic_dec(&sdev->in_flight); + wake_up(&sdev->list_wait); +} + +static int scrub_checksum_data(struct scrub_dev *sdev, + struct scrub_page *spag, void *buffer) +{ + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + struct btrfs_root *root = sdev->dev->dev_root; + + if (!spag->have_csum) + return 0; + + crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, spag->csum, sdev->csum_size)) + fail = 1; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.data_extents_scrubbed; + sdev->stat.data_bytes_scrubbed += PAGE_SIZE; + if (fail) + ++sdev->stat.csum_errors; + spin_unlock(&sdev->stat_lock); + + return fail; +} + +static int scrub_checksum_tree_block(struct scrub_dev *sdev, + struct scrub_page *spag, u64 logical, + void *buffer) +{ + struct btrfs_header *h; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + int crc_fail = 0; + + /* + * we don't use the getter functions here, as we + * a) don't have an extent buffer and + * b) the page is already kmapped + */ + h = (struct btrfs_header *)buffer; + + if (logical != le64_to_cpu(h->bytenr)) + ++fail; + + if (spag->generation != le64_to_cpu(h->generation)) + ++fail; + + if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, + BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, h->csum, sdev->csum_size)) + ++crc_fail; + + spin_lock(&sdev->stat_lock); + ++sdev->stat.tree_extents_scrubbed; + sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; + if (crc_fail) + ++sdev->stat.csum_errors; + if (fail) + ++sdev->stat.verify_errors; + spin_unlock(&sdev->stat_lock); + + return fail || crc_fail; +} + +static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) +{ + struct btrfs_super_block *s; + u64 logical; + struct scrub_dev *sdev = sbio->sdev; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u8 csum[BTRFS_CSUM_SIZE]; + u32 crc = ~(u32)0; + int fail = 0; + + s = (struct btrfs_super_block *)buffer; + logical = sbio->logical; + + if (logical != le64_to_cpu(s->bytenr)) + ++fail; + + if (sbio->spag[0].generation != le64_to_cpu(s->generation)) + ++fail; + + if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) + ++fail; + + crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, + PAGE_SIZE - BTRFS_CSUM_SIZE); + btrfs_csum_final(crc, csum); + if (memcmp(csum, s->csum, sbio->sdev->csum_size)) + ++fail; + + if (fail) { + /* + * if we find an error in a super block, we just report it. + * They will get written with the next transaction commit + * anyway + */ + spin_lock(&sdev->stat_lock); + ++sdev->stat.super_errors; + spin_unlock(&sdev->stat_lock); + } + + return fail; +} + +static int scrub_submit(struct scrub_dev *sdev) +{ + struct scrub_bio *sbio; + + if (sdev->curr == -1) + return 0; + + sbio = sdev->bios[sdev->curr]; + + sbio->bio->bi_sector = sbio->physical >> 9; + sbio->bio->bi_size = sbio->count * PAGE_SIZE; + sbio->bio->bi_next = NULL; + sbio->bio->bi_flags |= 1 << BIO_UPTODATE; + sbio->bio->bi_comp_cpu = -1; + sbio->bio->bi_bdev = sdev->dev->bdev; + sbio->err = 0; + sdev->curr = -1; + atomic_inc(&sdev->in_flight); + + submit_bio(0, sbio->bio); + + return 0; +} + +static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num, + u8 *csum, int force) +{ + struct scrub_bio *sbio; + +again: + /* + * grab a fresh bio or wait for one to become available + */ + while (sdev->curr == -1) { + spin_lock(&sdev->list_lock); + sdev->curr = sdev->first_free; + if (sdev->curr != -1) { + sdev->first_free = sdev->bios[sdev->curr]->next_free; + sdev->bios[sdev->curr]->next_free = -1; + sdev->bios[sdev->curr]->count = 0; + spin_unlock(&sdev->list_lock); + } else { + spin_unlock(&sdev->list_lock); + wait_event(sdev->list_wait, sdev->first_free != -1); + } + } + sbio = sdev->bios[sdev->curr]; + if (sbio->count == 0) { + sbio->physical = physical; + sbio->logical = logical; + } else if (sbio->physical + sbio->count * PAGE_SIZE != physical) { + scrub_submit(sdev); + goto again; + } + sbio->spag[sbio->count].flags = flags; + sbio->spag[sbio->count].generation = gen; + sbio->spag[sbio->count].have_csum = 0; + sbio->spag[sbio->count].mirror_num = mirror_num; + if (csum) { + sbio->spag[sbio->count].have_csum = 1; + memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); + } + ++sbio->count; + if (sbio->count == SCRUB_PAGES_PER_BIO || force) + scrub_submit(sdev); + + return 0; +} + +static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, + u8 *csum) +{ + struct btrfs_ordered_sum *sum = NULL; + int ret = 0; + unsigned long i; + unsigned long num_sectors; + u32 sectorsize = sdev->dev->dev_root->sectorsize; + + while (!list_empty(&sdev->csum_list)) { + sum = list_first_entry(&sdev->csum_list, + struct btrfs_ordered_sum, list); + if (sum->bytenr > logical) + return 0; + if (sum->bytenr + sum->len > logical) + break; + + ++sdev->stat.csum_discards; + list_del(&sum->list); + kfree(sum); + sum = NULL; + } + if (!sum) + return 0; + + num_sectors = sum->len / sectorsize; + for (i = 0; i < num_sectors; ++i) { + if (sum->sums[i].bytenr == logical) { + memcpy(csum, &sum->sums[i].sum, sdev->csum_size); + ret = 1; + break; + } + } + if (ret && i == num_sectors - 1) { + list_del(&sum->list); + kfree(sum); + } + return ret; +} + +/* scrub extent tries to collect up to 64 kB for each bio */ +static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, + u64 physical, u64 flags, u64 gen, u64 mirror_num) +{ + int ret; + u8 csum[BTRFS_CSUM_SIZE]; + + while (len) { + u64 l = min_t(u64, len, PAGE_SIZE); + int have_csum = 0; + + if (flags & BTRFS_EXTENT_FLAG_DATA) { + /* push csums to sbio */ + have_csum = scrub_find_csum(sdev, logical, l, csum); + if (have_csum == 0) + ++sdev->stat.no_csum; + } + ret = scrub_page(sdev, logical, l, physical, flags, gen, + mirror_num, have_csum ? csum : NULL, 0); + if (ret) + return ret; + len -= l; + logical += l; + physical += l; + } + return 0; +} + +static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, + struct map_lookup *map, int num, u64 base, u64 length) +{ + struct btrfs_path *path; + struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; + struct btrfs_root *root = fs_info->extent_root; + struct btrfs_root *csum_root = fs_info->csum_root; + struct btrfs_extent_item *extent; + u64 flags; + int ret; + int slot; + int i; + u64 nstripes; + int start_stripe; + struct extent_buffer *l; + struct btrfs_key key; + u64 physical; + u64 logical; + u64 generation; + u64 mirror_num; + + u64 increment = map->stripe_len; + u64 offset; + + nstripes = length; + offset = 0; + do_div(nstripes, map->stripe_len); + if (map->type & BTRFS_BLOCK_GROUP_RAID0) { + offset = map->stripe_len * num; + increment = map->stripe_len * map->num_stripes; + mirror_num = 0; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { + int factor = map->num_stripes / map->sub_stripes; + offset = map->stripe_len * (num / map->sub_stripes); + increment = map->stripe_len * factor; + mirror_num = num % map->sub_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { + increment = map->stripe_len; + mirror_num = num % map->num_stripes; + } else { + increment = map->stripe_len; + mirror_num = 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + /* + * find all extents for each stripe and just read them to get + * them into the page cache + * FIXME: we can do better. build a more intelligent prefetching + */ + logical = base + offset; + physical = map->stripes[num].physical; + ret = 0; + for (i = 0; i < nstripes; ++i) { + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid >= logical + map->stripe_len) + break; + + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + cond_resched(); + } + + /* + * collect all data csums for the stripe to avoid seeking during + * the scrub. This might currently (crc32) end up to be about 1MB + */ + start_stripe = 0; +again: + logical = base + offset + start_stripe * increment; + for (i = start_stripe; i < nstripes; ++i) { + ret = btrfs_lookup_csums_range(csum_root, logical, + logical + map->stripe_len - 1, + &sdev->csum_list, 1); + if (ret) + goto out; + + logical += increment; + cond_resched(); + } + /* + * now find all extents for each stripe and scrub them + */ + logical = base + offset + start_stripe * increment; + physical = map->stripes[num].physical + start_stripe * map->stripe_len; + ret = 0; + for (i = start_stripe; i < nstripes; ++i) { + /* + * canceled? + */ + if (atomic_read(&fs_info->scrub_cancel_req) || + atomic_read(&sdev->cancel_req)) { + ret = -ECANCELED; + goto out; + } + /* + * check to see if we have to pause + */ + if (atomic_read(&fs_info->scrub_pause_req)) { + /* push queued extents */ + scrub_submit(sdev); + wait_event(sdev->list_wait, + atomic_read(&sdev->in_flight) == 0); + atomic_inc(&fs_info->scrubs_paused); + wake_up(&fs_info->scrub_pause_wait); + mutex_lock(&fs_info->scrub_lock); + while (atomic_read(&fs_info->scrub_pause_req)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrub_pause_req) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrubs_paused); + mutex_unlock(&fs_info->scrub_lock); + wake_up(&fs_info->scrub_pause_wait); + scrub_free_csums(sdev); + start_stripe = i; + goto again; + } + + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + l = path->nodes[0]; + slot = path->slots[0]; + btrfs_item_key_to_cpu(l, &key, slot); + if (key.objectid != logical) { + ret = btrfs_previous_item(root, path, 0, + BTRFS_EXTENT_ITEM_KEY); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid + key.offset <= logical) + goto next; + + if (key.objectid >= logical + map->stripe_len) + break; + + if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) + goto next; + + extent = btrfs_item_ptr(l, slot, + struct btrfs_extent_item); + flags = btrfs_extent_flags(l, extent); + generation = btrfs_extent_generation(l, extent); + + if (key.objectid < logical && + (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { + printk(KERN_ERR + "btrfs scrub: tree block %llu spanning " + "stripes, ignored. logical=%llu\n", + (unsigned long long)key.objectid, + (unsigned long long)logical); + goto next; + } + + /* + * trim extent to this stripe + */ + if (key.objectid < logical) { + key.offset -= logical - key.objectid; + key.objectid = logical; + } + if (key.objectid + key.offset > + logical + map->stripe_len) { + key.offset = logical + map->stripe_len - + key.objectid; + } + + ret = scrub_extent(sdev, key.objectid, key.offset, + key.objectid - logical + physical, + flags, generation, mirror_num); + if (ret) + goto out; + +next: + path->slots[0]++; + } + btrfs_release_path(root, path); + logical += increment; + physical += map->stripe_len; + spin_lock(&sdev->stat_lock); + sdev->stat.last_physical = physical; + spin_unlock(&sdev->stat_lock); + } + /* push queued extents */ + scrub_submit(sdev); + +out: + btrfs_free_path(path); + return ret < 0 ? ret : 0; +} + +static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, + u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) +{ + struct btrfs_mapping_tree *map_tree = + &sdev->dev->dev_root->fs_info->mapping_tree; + struct map_lookup *map; + struct extent_map *em; + int i; + int ret = -EINVAL; + + read_lock(&map_tree->map_tree.lock); + em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); + read_unlock(&map_tree->map_tree.lock); + + if (!em) + return -EINVAL; + + map = (struct map_lookup *)em->bdev; + if (em->start != chunk_offset) + goto out; + + if (em->len < length) + goto out; + + for (i = 0; i < map->num_stripes; ++i) { + if (map->stripes[i].dev == sdev->dev) { + ret = scrub_stripe(sdev, map, i, chunk_offset, length); + if (ret) + goto out; + } + } +out: + free_extent_map(em); + + return ret; +} + +static noinline_for_stack +int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) +{ + struct btrfs_dev_extent *dev_extent = NULL; + struct btrfs_path *path; + struct btrfs_root *root = sdev->dev->dev_root; + struct btrfs_fs_info *fs_info = root->fs_info; + u64 length; + u64 chunk_tree; + u64 chunk_objectid; + u64 chunk_offset; + int ret; + int slot; + struct extent_buffer *l; + struct btrfs_key key; + struct btrfs_key found_key; + struct btrfs_block_group_cache *cache; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + path->reada = 2; + path->search_commit_root = 1; + path->skip_locking = 1; + + key.objectid = sdev->dev->devid; + key.offset = 0ull; + key.type = BTRFS_DEV_EXTENT_KEY; + + + while (1) { + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + ret = 0; + + l = path->nodes[0]; + slot = path->slots[0]; + + btrfs_item_key_to_cpu(l, &found_key, slot); + + if (found_key.objectid != sdev->dev->devid) + break; + + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + break; + + if (found_key.offset >= end) + break; + + if (found_key.offset < key.offset) + break; + + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + length = btrfs_dev_extent_length(l, dev_extent); + + if (found_key.offset + length <= start) { + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + continue; + } + + chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); + chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); + chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); + + /* + * get a reference on the corresponding block group to prevent + * the chunk from going away while we scrub it + */ + cache = btrfs_lookup_block_group(fs_info, chunk_offset); + if (!cache) { + ret = -ENOENT; + goto out; + } + ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, + chunk_offset, length); + btrfs_put_block_group(cache); + if (ret) + break; + + key.offset = found_key.offset + length; + btrfs_release_path(root, path); + } + +out: + btrfs_free_path(path); + return ret; +} + +static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) +{ + int i; + u64 bytenr; + u64 gen; + int ret; + struct btrfs_device *device = sdev->dev; + struct btrfs_root *root = device->dev_root; + + gen = root->fs_info->last_trans_committed; + + for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { + bytenr = btrfs_sb_offset(i); + if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) + break; + + ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, + BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); + if (ret) + return ret; + } + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + return 0; +} + +/* + * get a reference count on fs_info->scrub_workers. start worker if necessary + */ +static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (fs_info->scrub_workers_refcnt == 0) + btrfs_start_workers(&fs_info->scrub_workers, 1); + ++fs_info->scrub_workers_refcnt; + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (--fs_info->scrub_workers_refcnt == 0) + btrfs_stop_workers(&fs_info->scrub_workers); + WARN_ON(fs_info->scrub_workers_refcnt < 0); + mutex_unlock(&fs_info->scrub_lock); +} + + +int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, + struct btrfs_scrub_progress *progress) +{ + struct scrub_dev *sdev; + struct btrfs_fs_info *fs_info = root->fs_info; + int ret; + struct btrfs_device *dev; + + if (root->fs_info->closing) + return -EINVAL; + + /* + * check some assumptions + */ + if (root->sectorsize != PAGE_SIZE || + root->sectorsize != root->leafsize || + root->sectorsize != root->nodesize) { + printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); + return -EINVAL; + } + + ret = scrub_workers_get(root); + if (ret) + return ret; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev || dev->missing) { + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + mutex_lock(&fs_info->scrub_lock); + + if (!dev->in_fs_metadata) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -ENODEV; + } + + if (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return -EINPROGRESS; + } + sdev = scrub_setup_dev(dev); + if (IS_ERR(sdev)) { + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + scrub_workers_put(root); + return PTR_ERR(sdev); + } + dev->scrub_device = sdev; + + atomic_inc(&fs_info->scrubs_running); + mutex_unlock(&fs_info->scrub_lock); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + down_read(&fs_info->scrub_super_lock); + ret = scrub_supers(sdev); + up_read(&fs_info->scrub_super_lock); + + if (!ret) + ret = scrub_enumerate_chunks(sdev, start, end); + + wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); + + atomic_dec(&fs_info->scrubs_running); + wake_up(&fs_info->scrub_pause_wait); + + if (progress) + memcpy(progress, &sdev->stat, sizeof(*progress)); + + mutex_lock(&fs_info->scrub_lock); + dev->scrub_device = NULL; + mutex_unlock(&fs_info->scrub_lock); + + scrub_free_dev(sdev); + scrub_workers_put(root); + + return ret; +} + +int btrfs_scrub_pause(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + atomic_inc(&fs_info->scrub_pause_req); + while (atomic_read(&fs_info->scrubs_paused) != + atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_paused) == + atomic_read(&fs_info->scrubs_running)); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_continue(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + atomic_dec(&fs_info->scrub_pause_req); + wake_up(&fs_info->scrub_pause_wait); + return 0; +} + +int btrfs_scrub_pause_super(struct btrfs_root *root) +{ + down_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_continue_super(struct btrfs_root *root) +{ + up_write(&root->fs_info->scrub_super_lock); + return 0; +} + +int btrfs_scrub_cancel(struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + + mutex_lock(&fs_info->scrub_lock); + if (!atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + + atomic_inc(&fs_info->scrub_cancel_req); + while (atomic_read(&fs_info->scrubs_running)) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + atomic_read(&fs_info->scrubs_running) == 0); + mutex_lock(&fs_info->scrub_lock); + } + atomic_dec(&fs_info->scrub_cancel_req); + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} + +int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct scrub_dev *sdev; + + mutex_lock(&fs_info->scrub_lock); + sdev = dev->scrub_device; + if (!sdev) { + mutex_unlock(&fs_info->scrub_lock); + return -ENOTCONN; + } + atomic_inc(&sdev->cancel_req); + while (dev->scrub_device) { + mutex_unlock(&fs_info->scrub_lock); + wait_event(fs_info->scrub_pause_wait, + dev->scrub_device == NULL); + mutex_lock(&fs_info->scrub_lock); + } + mutex_unlock(&fs_info->scrub_lock); + + return 0; +} +int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device *dev; + int ret; + + /* + * we have to hold the device_list_mutex here so the device + * does not go away in cancel_dev. FIXME: find a better solution + */ + mutex_lock(&fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (!dev) { + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + return -ENODEV; + } + ret = btrfs_scrub_cancel_dev(root, dev); + mutex_unlock(&fs_info->fs_devices->device_list_mutex); + + return ret; +} + +int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, + struct btrfs_scrub_progress *progress) +{ + struct btrfs_device *dev; + struct scrub_dev *sdev = NULL; + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + dev = btrfs_find_device(root, devid, NULL, NULL); + if (dev) + sdev = dev->scrub_device; + if (sdev) + memcpy(progress, &sdev->stat, sizeof(*progress)); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + + return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5..37c2302a08d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1321,6 +1321,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, WARN_ON(cur_trans != trans->transaction); + btrfs_scrub_pause(root); /* btrfs_commit_tree_roots is responsible for getting the * various roots consistent with each other. Every pointer * in the tree of tree roots has to point to the most up to date @@ -1405,6 +1406,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_unlock(&root->fs_info->trans_mutex); + btrfs_scrub_continue(root); + if (current->journal_info == trans) current->journal_info = NULL; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba..f1a0726da5f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range(root->log_root, csum_start, csum_end - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums; @@ -2093,7 +2093,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, * the running transaction open, so a full commit can't hop * in and cause problems either. */ + btrfs_scrub_pause_super(root); write_ctree_super(trans, root->fs_info->tree_root, 1); + btrfs_scrub_continue_super(root); ret = 0; mutex_lock(&root->log_mutex); @@ -2689,7 +2691,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ret = btrfs_lookup_csums_range( log->fs_info->csum_root, ds + cs, ds + cs + cl - 1, - &ordered_sums); + &ordered_sums, 0); BUG_ON(ret); } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8b9fb8c7683..89ca8f110b6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -38,9 +38,6 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_device *device); static int btrfs_relocate_sys_chunks(struct btrfs_root *root); -#define map_lookup_size(n) (sizeof(struct map_lookup) + \ - (sizeof(struct btrfs_bio_stripe) * (n))) - static DEFINE_MUTEX(uuid_mutex); static LIST_HEAD(fs_uuids); @@ -1334,6 +1331,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto error_undo; device->in_fs_metadata = 0; + btrfs_scrub_cancel_dev(root, device); /* * the device list mutex makes sure that we don't change diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a2..f7c20123a1f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -85,6 +85,9 @@ struct btrfs_device { /* physical drive uuid (or lvm uuid) */ u8 uuid[BTRFS_UUID_SIZE]; + /* per-device scrub information */ + struct scrub_dev *scrub_device; + struct btrfs_work work; }; @@ -157,6 +160,9 @@ struct map_lookup { struct btrfs_bio_stripe stripes[]; }; +#define map_lookup_size(n) (sizeof(struct map_lookup) + \ + (sizeof(struct btrfs_bio_stripe) * (n))) + /* Used to sort the devices by max_avail(descending sort) */ int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); -- cgit v1.2.3-70-g09d2 From 7a36ddec1003a4e84e79f28ee714a142ed6bc529 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 6 May 2011 15:33:15 +0200 Subject: btrfs: use printk_ratelimited instead of printk_ratelimit As per printk_ratelimit comment, it should not be used. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 21 ++++++--------------- fs/btrfs/inode.c | 13 ++++--------- 2 files changed, 10 insertions(+), 24 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index de7b4770ab1..cb9d1b8bfe7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "compat.h" #include "ctree.h" @@ -254,14 +255,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs: %s checksum verify " + printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " "failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, (unsigned long long)buf->start, val, found, btrfs_header_level(buf)); - } if (result != (char *)&inline_result) kfree(result); return 1; @@ -296,13 +295,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ret = 0; goto out; } - if (printk_ratelimit()) { - printk("parent transid verify failed on %llu wanted %llu " + printk_ratelimited("parent transid verify failed on %llu wanted %llu " "found %llu\n", (unsigned long long)eb->start, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); - } ret = 1; clear_extent_buffer_uptodate(io_tree, eb, &cached_state); out: @@ -533,12 +530,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad tree block start " + printk_ratelimited(KERN_INFO "btrfs bad tree block start " "%llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -550,10 +545,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto err; } if (check_tree_block_fsid(root, eb)) { - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs bad fsid on block %llu\n", + printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", (unsigned long long)eb->start); - } ret = -EIO; goto err; } @@ -2108,11 +2101,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (printk_ratelimit()) { - printk(KERN_WARNING "lost page write due to " + printk_ratelimited(KERN_WARNING "lost page write due to " "I/O error on %s\n", bdevname(bh->b_bdev, b)); - } /* note, we dont' set_buffer_write_io_error because we have * our own ways of dealing with the IO errors */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5ff52b644a6..1d1017f9155 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "compat.h" #include "ctree.h" #include "disk-io.h" @@ -2004,12 +2005,10 @@ good: return 0; zeroit: - if (printk_ratelimit()) { - printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " "private %llu\n", page->mapping->host->i_ino, (unsigned long long)start, csum, (unsigned long long)private); - } memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); @@ -4243,22 +4242,18 @@ void btrfs_dirty_inode(struct inode *inode) btrfs_end_transaction(trans, root); trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %ld\n", inode->i_ino, PTR_ERR(trans)); - } return; } btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { - if (printk_ratelimit()) { - printk(KERN_ERR "btrfs: fail to " + printk_ratelimited(KERN_ERR "btrfs: fail to " "dirty inode %lu error %d\n", inode->i_ino, ret); - } } } btrfs_end_transaction(trans, root); -- cgit v1.2.3-70-g09d2 From 16cdcec736cd214350cdb591bf1091f8beedefa0 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 22 Apr 2011 18:12:22 +0800 Subject: btrfs: implement delayed inode items operation Changelog V5 -> V6: - Fix oom when the memory load is high, by storing the delayed nodes into the root's radix tree, and letting btrfs inodes go. Changelog V4 -> V5: - Fix the race on adding the delayed node to the inode, which is spotted by Chris Mason. - Merge Chris Mason's incremental patch into this patch. - Fix deadlock between readdir() and memory fault, which is reported by Itaru Kitayama. Changelog V3 -> V4: - Fix nested lock, which is reported by Itaru Kitayama, by updating space cache inode in time. Changelog V2 -> V3: - Fix the race between the delayed worker and the task which does delayed items balance, which is reported by Tsutomu Itoh. - Modify the patch address David Sterba's comment. - Fix the bug of the cpu recursion spinlock, reported by Chris Mason Changelog V1 -> V2: - break up the global rb-tree, use a list to manage the delayed nodes, which is created for every directory and file, and used to manage the delayed directory name index items and the delayed inode item. - introduce a worker to deal with the delayed nodes. Compare with Ext3/4, the performance of file creation and deletion on btrfs is very poor. the reason is that btrfs must do a lot of b+ tree insertions, such as inode item, directory name item, directory name index and so on. If we can do some delayed b+ tree insertion or deletion, we can improve the performance, so we made this patch which implemented delayed directory name index insertion/deletion and delayed inode update. Implementation: - introduce a delayed root object into the filesystem, that use two lists to manage the delayed nodes which are created for every file/directory. One is used to manage all the delayed nodes that have delayed items. And the other is used to manage the delayed nodes which is waiting to be dealt with by the work thread. - Every delayed node has two rb-tree, one is used to manage the directory name index which is going to be inserted into b+ tree, and the other is used to manage the directory name index which is going to be deleted from b+ tree. - introduce a worker to deal with the delayed operation. This worker is used to deal with the works of the delayed directory name index items insertion and deletion and the delayed inode update. When the delayed items is beyond the lower limit, we create works for some delayed nodes and insert them into the work queue of the worker, and then go back. When the delayed items is beyond the upper bound, we create works for all the delayed nodes that haven't been dealt with, and insert them into the work queue of the worker, and then wait for that the untreated items is below some threshold value. - When we want to insert a directory name index into b+ tree, we just add the information into the delayed inserting rb-tree. And then we check the number of the delayed items and do delayed items balance. (The balance policy is above.) - When we want to delete a directory name index from the b+ tree, we search it in the inserting rb-tree at first. If we look it up, just drop it. If not, add the key of it into the delayed deleting rb-tree. Similar to the delayed inserting rb-tree, we also check the number of the delayed items and do delayed items balance. (The same to inserting manipulation) - When we want to update the metadata of some inode, we cached the data of the inode into the delayed node. the worker will flush it into the b+ tree after dealing with the delayed insertion and deletion. - We will move the delayed node to the tail of the list after we access the delayed node, By this way, we can cache more delayed items and merge more inode updates. - If we want to commit transaction, we will deal with all the delayed node. - the delayed node will be freed when we free the btrfs inode. - Before we log the inode items, we commit all the directory name index items and the delayed inode update. I did a quick test by the benchmark tool[1] and found we can improve the performance of file creation by ~15%, and file deletion by ~20%. Before applying this patch: Create files: Total files: 50000 Total time: 1.096108 Average time: 0.000022 Delete files: Total files: 50000 Total time: 1.510403 Average time: 0.000030 After applying this patch: Create files: Total files: 50000 Total time: 0.932899 Average time: 0.000019 Delete files: Total files: 50000 Total time: 1.215732 Average time: 0.000024 [1] http://marc.info/?l=linux-btrfs&m=128212635122920&q=p3 Many thanks for Kitayama-san's help! Signed-off-by: Miao Xie Reviewed-by: David Sterba Tested-by: Tsutomu Itoh Tested-by: Itaru Kitayama Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/btrfs_inode.h | 5 + fs/btrfs/ctree.c | 14 +- fs/btrfs/ctree.h | 29 +- fs/btrfs/delayed-inode.c | 1694 ++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/delayed-inode.h | 141 ++++ fs/btrfs/dir-item.c | 34 +- fs/btrfs/disk-io.c | 50 +- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 18 +- fs/btrfs/inode.c | 111 ++- fs/btrfs/ioctl.c | 2 +- fs/btrfs/super.c | 10 +- fs/btrfs/transaction.c | 45 +- fs/btrfs/transaction.h | 2 + fs/btrfs/tree-log.c | 7 + 16 files changed, 2074 insertions(+), 91 deletions(-) create mode 100644 fs/btrfs/delayed-inode.c create mode 100644 fs/btrfs/delayed-inode.h (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73ae..a8411c22313 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ - compression.o delayed-ref.o relocation.o + compression.o delayed-ref.o relocation.o delayed-inode.o diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884c..beefafd91f2 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -22,6 +22,7 @@ #include "extent_map.h" #include "extent_io.h" #include "ordered-data.h" +#include "delayed-inode.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -158,9 +159,13 @@ struct btrfs_inode { */ unsigned force_compress:4; + struct btrfs_delayed_node *delayed_node; + struct inode vfs_inode; }; +extern unsigned char btrfs_filetype_table[]; + static inline struct btrfs_inode *BTRFS_I(struct inode *inode) { return container_of(inode, struct btrfs_inode, vfs_inode); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0b..2736b6b2ff5 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans, struct extent_buffer *src_buf); static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot); -static int setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr); - struct btrfs_path *btrfs_alloc_path(void) { @@ -3559,11 +3554,10 @@ out: * to save stack depth by doing the bulk of the work in a function * that doesn't call btrfs_search_slot */ -static noinline_for_stack int -setup_items_for_insert(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *cpu_key, u32 *data_size, - u32 total_data, u32 total_size, int nr) +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr) { struct btrfs_item *item; int i; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae..5d25129d011 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -869,6 +869,7 @@ struct btrfs_block_group_cache { struct reloc_control; struct btrfs_device; struct btrfs_fs_devices; +struct btrfs_delayed_root; struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; @@ -895,7 +896,10 @@ struct btrfs_fs_info { /* logical->physical extent mapping */ struct btrfs_mapping_tree mapping_tree; - /* block reservation for extent, checksum and root tree */ + /* + * block reservation for extent, checksum, root tree and + * delayed dir index item + */ struct btrfs_block_rsv global_block_rsv; /* block reservation for delay allocation */ struct btrfs_block_rsv delalloc_block_rsv; @@ -1022,6 +1026,7 @@ struct btrfs_fs_info { * for the sys_munmap function call path */ struct btrfs_workers fixup_workers; + struct btrfs_workers delayed_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; int thread_pool_size; @@ -1079,6 +1084,8 @@ struct btrfs_fs_info { /* filesystem state */ u64 fs_state; + + struct btrfs_delayed_root *delayed_root; }; /* @@ -1161,6 +1168,11 @@ struct btrfs_root { /* red-black tree that keeps track of in-memory inodes */ struct rb_root inode_tree; + /* + * radix tree that keeps track of delayed nodes of every inode, + * protected by inode_lock + */ + struct radix_tree_root delayed_nodes_tree; /* * right now this just gets used so that a root has its own devid * for stat. It may be used for more later @@ -2099,6 +2111,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) } /* extent-tree.c */ +static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, + int num_items) +{ + return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * + 3 * num_items; +} + void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); @@ -2294,6 +2313,8 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_set_path_blocking(struct btrfs_path *p); +void btrfs_clear_path_blocking(struct btrfs_path *p, + struct extent_buffer *held); void btrfs_unlock_up_safe(struct btrfs_path *p, int level); int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -2305,6 +2326,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, return btrfs_del_items(trans, root, path, path->slots[0], 1); } +int setup_items_for_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *cpu_key, u32 *data_size, + u32 total_data, u32 total_size, int nr); int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_some_items(struct btrfs_trans_handle *trans, @@ -2368,7 +2393,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item); /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, u64 dir, + int name_len, struct inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c new file mode 100644 index 00000000000..95485318f00 --- /dev/null +++ b/fs/btrfs/delayed-inode.c @@ -0,0 +1,1694 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include "delayed-inode.h" +#include "disk-io.h" +#include "transaction.h" + +#define BTRFS_DELAYED_WRITEBACK 400 +#define BTRFS_DELAYED_BACKGROUND 100 + +static struct kmem_cache *delayed_node_cache; + +int __init btrfs_delayed_inode_init(void) +{ + delayed_node_cache = kmem_cache_create("delayed_node", + sizeof(struct btrfs_delayed_node), + 0, + SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, + NULL); + if (!delayed_node_cache) + return -ENOMEM; + return 0; +} + +void btrfs_delayed_inode_exit(void) +{ + if (delayed_node_cache) + kmem_cache_destroy(delayed_node_cache); +} + +static inline void btrfs_init_delayed_node( + struct btrfs_delayed_node *delayed_node, + struct btrfs_root *root, u64 inode_id) +{ + delayed_node->root = root; + delayed_node->inode_id = inode_id; + atomic_set(&delayed_node->refs, 0); + delayed_node->count = 0; + delayed_node->in_list = 0; + delayed_node->inode_dirty = 0; + delayed_node->ins_root = RB_ROOT; + delayed_node->del_root = RB_ROOT; + mutex_init(&delayed_node->mutex); + delayed_node->index_cnt = 0; + INIT_LIST_HEAD(&delayed_node->n_list); + INIT_LIST_HEAD(&delayed_node->p_list); + delayed_node->bytes_reserved = 0; +} + +static inline int btrfs_is_continuous_delayed_item( + struct btrfs_delayed_item *item1, + struct btrfs_delayed_item *item2) +{ + if (item1->key.type == BTRFS_DIR_INDEX_KEY && + item1->key.objectid == item2->key.objectid && + item1->key.type == item2->key.type && + item1->key.offset + 1 == item2->key.offset) + return 1; + return 0; +} + +static inline struct btrfs_delayed_root *btrfs_get_delayed_root( + struct btrfs_root *root) +{ + return root->fs_info->delayed_root; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + int ret; + +again: + node = ACCESS_ONCE(btrfs_inode->delayed_node); + if (node) { + atomic_inc(&node->refs); /* can be accessed */ + return node; + } + + spin_lock(&root->inode_lock); + node = radix_tree_lookup(&root->delayed_nodes_tree, inode->i_ino); + if (node) { + if (btrfs_inode->delayed_node) { + spin_unlock(&root->inode_lock); + goto again; + } + btrfs_inode->delayed_node = node; + atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); /* cached in the inode */ + spin_unlock(&root->inode_lock); + return node; + } + spin_unlock(&root->inode_lock); + + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); + if (!node) + return ERR_PTR(-ENOMEM); + btrfs_init_delayed_node(node, root, inode->i_ino); + + atomic_inc(&node->refs); /* cached in the btrfs inode */ + atomic_inc(&node->refs); /* can be accessed */ + + ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + if (ret) { + kmem_cache_free(delayed_node_cache, node); + return ERR_PTR(ret); + } + + spin_lock(&root->inode_lock); + ret = radix_tree_insert(&root->delayed_nodes_tree, inode->i_ino, node); + if (ret == -EEXIST) { + kmem_cache_free(delayed_node_cache, node); + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + goto again; + } + btrfs_inode->delayed_node = node; + spin_unlock(&root->inode_lock); + radix_tree_preload_end(); + + return node; +} + +/* + * Call it when holding delayed_node->mutex + * + * If mod = 1, add this node into the prepared list. + */ +static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node, + int mod) +{ + spin_lock(&root->lock); + if (node->in_list) { + if (!list_empty(&node->p_list)) + list_move_tail(&node->p_list, &root->prepare_list); + else if (mod) + list_add_tail(&node->p_list, &root->prepare_list); + } else { + list_add_tail(&node->n_list, &root->node_list); + list_add_tail(&node->p_list, &root->prepare_list); + atomic_inc(&node->refs); /* inserted into list */ + root->nodes++; + node->in_list = 1; + } + spin_unlock(&root->lock); +} + +/* Call it when holding delayed_node->mutex */ +static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, + struct btrfs_delayed_node *node) +{ + spin_lock(&root->lock); + if (node->in_list) { + root->nodes--; + atomic_dec(&node->refs); /* not in the list */ + list_del_init(&node->n_list); + if (!list_empty(&node->p_list)) + list_del_init(&node->p_list); + node->in_list = 0; + } + spin_unlock(&root->lock); +} + +struct btrfs_delayed_node *btrfs_first_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->node_list)) + goto out; + + p = delayed_root->node_list.next; + node = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +struct btrfs_delayed_node *btrfs_next_delayed_node( + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_root *delayed_root; + struct list_head *p; + struct btrfs_delayed_node *next = NULL; + + delayed_root = node->root->fs_info->delayed_root; + spin_lock(&delayed_root->lock); + if (!node->in_list) { /* not in the list */ + if (list_empty(&delayed_root->node_list)) + goto out; + p = delayed_root->node_list.next; + } else if (list_is_last(&node->n_list, &delayed_root->node_list)) + goto out; + else + p = node->n_list.next; + + next = list_entry(p, struct btrfs_delayed_node, n_list); + atomic_inc(&next->refs); +out: + spin_unlock(&delayed_root->lock); + + return next; +} + +static void __btrfs_release_delayed_node( + struct btrfs_delayed_node *delayed_node, + int mod) +{ + struct btrfs_delayed_root *delayed_root; + + if (!delayed_node) + return; + + delayed_root = delayed_node->root->fs_info->delayed_root; + + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + btrfs_queue_delayed_node(delayed_root, delayed_node, mod); + else + btrfs_dequeue_delayed_node(delayed_root, delayed_node); + mutex_unlock(&delayed_node->mutex); + + if (atomic_dec_and_test(&delayed_node->refs)) { + struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); + if (atomic_read(&delayed_node->refs) == 0) { + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); + kmem_cache_free(delayed_node_cache, delayed_node); + } + spin_unlock(&root->inode_lock); + } +} + +static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 0); +} + +struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( + struct btrfs_delayed_root *delayed_root) +{ + struct list_head *p; + struct btrfs_delayed_node *node = NULL; + + spin_lock(&delayed_root->lock); + if (list_empty(&delayed_root->prepare_list)) + goto out; + + p = delayed_root->prepare_list.next; + list_del_init(p); + node = list_entry(p, struct btrfs_delayed_node, p_list); + atomic_inc(&node->refs); +out: + spin_unlock(&delayed_root->lock); + + return node; +} + +static inline void btrfs_release_prepared_delayed_node( + struct btrfs_delayed_node *node) +{ + __btrfs_release_delayed_node(node, 1); +} + +struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) +{ + struct btrfs_delayed_item *item; + item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); + if (item) { + item->data_len = data_len; + item->ins_or_del = 0; + item->bytes_reserved = 0; + item->block_rsv = NULL; + item->delayed_node = NULL; + atomic_set(&item->refs, 1); + } + return item; +} + +/* + * __btrfs_lookup_delayed_item - look up the delayed item by key + * @delayed_node: pointer to the delayed node + * @key: the key to look up + * @prev: used to store the prev item if the right item isn't found + * @next: used to store the next item if the right item isn't found + * + * Note: if we don't find the right item, we will return the prev item and + * the next item. + */ +static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( + struct rb_root *root, + struct btrfs_key *key, + struct btrfs_delayed_item **prev, + struct btrfs_delayed_item **next) +{ + struct rb_node *node, *prev_node = NULL; + struct btrfs_delayed_item *delayed_item = NULL; + int ret = 0; + + node = root->rb_node; + + while (node) { + delayed_item = rb_entry(node, struct btrfs_delayed_item, + rb_node); + prev_node = node; + ret = btrfs_comp_cpu_keys(&delayed_item->key, key); + if (ret < 0) + node = node->rb_right; + else if (ret > 0) + node = node->rb_left; + else + return delayed_item; + } + + if (prev) { + if (!prev_node) + *prev = NULL; + else if (ret < 0) + *prev = delayed_item; + else if ((node = rb_prev(prev_node)) != NULL) { + *prev = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *prev = NULL; + } + + if (next) { + if (!prev_node) + *next = NULL; + else if (ret > 0) + *next = delayed_item; + else if ((node = rb_next(prev_node)) != NULL) { + *next = rb_entry(node, struct btrfs_delayed_item, + rb_node); + } else + *next = NULL; + } + return NULL; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, NULL); + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item, *next; + + item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, + NULL, &next); + if (!item) + item = next; + + return item; +} + +static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, + struct btrfs_delayed_item *ins, + int action) +{ + struct rb_node **p, *node; + struct rb_node *parent_node = NULL; + struct rb_root *root; + struct btrfs_delayed_item *item; + int cmp; + + if (action == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_node->ins_root; + else if (action == BTRFS_DELAYED_DELETION_ITEM) + root = &delayed_node->del_root; + else + BUG(); + p = &root->rb_node; + node = &ins->rb_node; + + while (*p) { + parent_node = *p; + item = rb_entry(parent_node, struct btrfs_delayed_item, + rb_node); + + cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); + if (cmp < 0) + p = &(*p)->rb_right; + else if (cmp > 0) + p = &(*p)->rb_left; + else + return -EEXIST; + } + + rb_link_node(node, parent_node, p); + rb_insert_color(node, root); + ins->delayed_node = delayed_node; + ins->ins_or_del = action; + + if (ins->key.type == BTRFS_DIR_INDEX_KEY && + action == BTRFS_DELAYED_INSERTION_ITEM && + ins->key.offset >= delayed_node->index_cnt) + delayed_node->index_cnt = ins->key.offset + 1; + + delayed_node->count++; + atomic_inc(&delayed_node->root->fs_info->delayed_root->items); + return 0; +} + +static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_INSERTION_ITEM); +} + +static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, + struct btrfs_delayed_item *item) +{ + return __btrfs_add_delayed_item(node, item, + BTRFS_DELAYED_DELETION_ITEM); +} + +static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) +{ + struct rb_root *root; + struct btrfs_delayed_root *delayed_root; + + delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; + + BUG_ON(!delayed_root); + BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && + delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); + + if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) + root = &delayed_item->delayed_node->ins_root; + else + root = &delayed_item->delayed_node->del_root; + + rb_erase(&delayed_item->rb_node, root); + delayed_item->delayed_node->count--; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); +} + +static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) +{ + if (item) { + __btrfs_remove_delayed_item(item); + if (atomic_dec_and_test(&item->refs)) + kfree(item); + } +} + +struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->ins_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( + struct btrfs_delayed_node *delayed_node) +{ + struct rb_node *p; + struct btrfs_delayed_item *item = NULL; + + p = rb_first(&delayed_node->del_root); + if (p) + item = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return item; +} + +struct btrfs_delayed_item *__btrfs_next_delayed_item( + struct btrfs_delayed_item *item) +{ + struct rb_node *p; + struct btrfs_delayed_item *next = NULL; + + p = rb_next(&item->rb_node); + if (p) + next = rb_entry(p, struct btrfs_delayed_item, rb_node); + + return next; +} + +static inline struct btrfs_delayed_node *btrfs_get_delayed_node( + struct inode *inode) +{ + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_inode->delayed_node; + if (delayed_node) + atomic_inc(&delayed_node->refs); + + return delayed_node; +} + +static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, + u64 root_id) +{ + struct btrfs_key root_key; + + if (root->objectid == root_id) + return root; + + root_key.objectid = root_id; + root_key.type = BTRFS_ROOT_ITEM_KEY; + root_key.offset = (u64)-1; + return btrfs_read_fs_root_no_name(root->fs_info, &root_key); +} + +static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) { + item->bytes_reserved = num_bytes; + item->block_rsv = dst_rsv; + } + + return ret; +} + +static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_item *item) +{ + if (!item->bytes_reserved) + return; + + btrfs_block_rsv_release(root, item->block_rsv, + item->bytes_reserved); +} + +static int btrfs_delayed_inode_reserve_metadata( + struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *src_rsv; + struct btrfs_block_rsv *dst_rsv; + u64 num_bytes; + int ret; + + if (!trans->bytes_reserved) + return 0; + + src_rsv = trans->block_rsv; + dst_rsv = &root->fs_info->global_block_rsv; + + num_bytes = btrfs_calc_trans_metadata_size(root, 1); + ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); + if (!ret) + node->bytes_reserved = num_bytes; + + return ret; +} + +static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_block_rsv *rsv; + + if (!node->bytes_reserved) + return; + + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, + node->bytes_reserved); + node->bytes_reserved = 0; +} + +/* + * This helper will insert some continuous items into the same leaf according + * to the free space of the leaf. + */ +static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + int free_space; + int total_data_size = 0, total_size = 0; + struct extent_buffer *leaf; + char *data_ptr; + struct btrfs_key *keys; + u32 *data_size; + struct list_head head; + int slot; + int nitems; + int i; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + free_space = btrfs_leaf_free_space(root, leaf); + INIT_LIST_HEAD(&head); + + next = item; + + /* + * count the number of the continuous items that we can insert in batch + */ + while (total_size + next->data_len + sizeof(struct btrfs_item) <= + free_space) { + total_data_size += next->data_len; + total_size += next->data_len + sizeof(struct btrfs_item); + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + } + + if (!nitems) { + ret = 0; + goto out; + } + + /* + * we need allocate some memory space, but it might cause the task + * to sleep, so we set all locked nodes in the path to blocking locks + * first. + */ + btrfs_set_path_blocking(path); + + keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); + if (!keys) { + ret = -ENOMEM; + goto out; + } + + data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); + if (!data_size) { + ret = -ENOMEM; + goto error; + } + + /* get keys of all the delayed items */ + i = 0; + list_for_each_entry(next, &head, tree_list) { + keys[i] = next->key; + data_size[i] = next->data_len; + i++; + } + + /* reset all the locked nodes in the patch to spinning locks. */ + btrfs_clear_path_blocking(path, NULL); + + /* insert the keys of the items */ + ret = setup_items_for_insert(trans, root, path, keys, data_size, + total_data_size, total_size, nitems); + if (ret) + goto error; + + /* insert the dir index items */ + slot = path->slots[0]; + list_for_each_entry_safe(curr, next, &head, tree_list) { + data_ptr = btrfs_item_ptr(leaf, slot, char); + write_extent_buffer(leaf, &curr->data, + (unsigned long)data_ptr, + curr->data_len); + slot++; + + btrfs_delayed_item_release_metadata(root, curr); + + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +error: + kfree(data_size); + kfree(keys); +out: + return ret; +} + +/* + * This helper can just do simple insertion that needn't extend item for new + * data, such as directory name index insertion, inode insertion. + */ +static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *delayed_item) +{ + struct extent_buffer *leaf; + struct btrfs_item *item; + char *ptr; + int ret; + + ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, + delayed_item->data_len); + if (ret < 0 && ret != -EEXIST) + return ret; + + leaf = path->nodes[0]; + + item = btrfs_item_nr(leaf, path->slots[0]); + ptr = btrfs_item_ptr(leaf, path->slots[0], char); + + write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, + delayed_item->data_len); + btrfs_mark_buffer_dirty(leaf); + + btrfs_delayed_item_release_metadata(root, delayed_item); + return 0; +} + +/* + * we insert an item first, then if there are some continuous items, we try + * to insert those items into the same leaf. + */ +static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_insertion_item(node); + if (!curr) + goto insert_end; + + ret = btrfs_insert_delayed_item(trans, root, path, curr); + if (ret < 0) { + btrfs_release_path(root, path); + goto insert_end; + } + + prev = curr; + curr = __btrfs_next_delayed_item(prev); + if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { + /* insert the continuous items into the same leaf */ + path->slots[0]++; + btrfs_batch_insert_items(trans, root, path, curr); + } + btrfs_release_delayed_item(prev); + btrfs_mark_buffer_dirty(path->nodes[0]); + + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +insert_end: + mutex_unlock(&node->mutex); + return ret; +} + +static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_item *item) +{ + struct btrfs_delayed_item *curr, *next; + struct extent_buffer *leaf; + struct btrfs_key key; + struct list_head head; + int nitems, i, last_item; + int ret = 0; + + BUG_ON(!path->nodes[0]); + + leaf = path->nodes[0]; + + i = path->slots[0]; + last_item = btrfs_header_nritems(leaf) - 1; + if (i > last_item) + return -ENOENT; /* FIXME: Is errno suitable? */ + + next = item; + INIT_LIST_HEAD(&head); + btrfs_item_key_to_cpu(leaf, &key, i); + nitems = 0; + /* + * count the number of the dir index items that we can delete in batch + */ + while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { + list_add_tail(&next->tree_list, &head); + nitems++; + + curr = next; + next = __btrfs_next_delayed_item(curr); + if (!next) + break; + + if (!btrfs_is_continuous_delayed_item(curr, next)) + break; + + i++; + if (i > last_item) + break; + btrfs_item_key_to_cpu(leaf, &key, i); + } + + if (!nitems) + return 0; + + ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); + if (ret) + goto out; + + list_for_each_entry_safe(curr, next, &head, tree_list) { + btrfs_delayed_item_release_metadata(root, curr); + list_del(&curr->tree_list); + btrfs_release_delayed_item(curr); + } + +out: + return ret; +} + +static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_path *path, + struct btrfs_root *root, + struct btrfs_delayed_node *node) +{ + struct btrfs_delayed_item *curr, *prev; + int ret = 0; + +do_again: + mutex_lock(&node->mutex); + curr = __btrfs_first_delayed_deletion_item(node); + if (!curr) + goto delete_fail; + + ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); + if (ret < 0) + goto delete_fail; + else if (ret > 0) { + /* + * can't find the item which the node points to, so this node + * is invalid, just drop it. + */ + prev = curr; + curr = __btrfs_next_delayed_item(prev); + btrfs_release_delayed_item(prev); + ret = 0; + btrfs_release_path(root, path); + if (curr) + goto do_again; + else + goto delete_fail; + } + + btrfs_batch_delete_items(trans, root, path, curr); + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + goto do_again; + +delete_fail: + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return ret; +} + +static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_delayed_root *delayed_root; + + if (delayed_node && delayed_node->inode_dirty) { + BUG_ON(!delayed_node->root); + delayed_node->inode_dirty = 0; + delayed_node->count--; + + delayed_root = delayed_node->root->fs_info->delayed_root; + atomic_dec(&delayed_root->items); + if (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && + waitqueue_active(&delayed_root->wait)) + wake_up(&delayed_root->wait); + } +} + +static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + struct btrfs_delayed_node *node) +{ + struct btrfs_key key; + struct btrfs_inode_item *inode_item; + struct extent_buffer *leaf; + int ret; + + mutex_lock(&node->mutex); + if (!node->inode_dirty) { + mutex_unlock(&node->mutex); + return 0; + } + + key.objectid = node->inode_id; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + ret = btrfs_lookup_inode(trans, root, path, &key, 1); + if (ret > 0) { + btrfs_release_path(root, path); + mutex_unlock(&node->mutex); + return -ENOENT; + } else if (ret < 0) { + mutex_unlock(&node->mutex); + return ret; + } + + btrfs_unlock_up_safe(path, 1); + leaf = path->nodes[0]; + inode_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_inode_item); + write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item)); + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + btrfs_delayed_inode_release_metadata(root, node); + btrfs_release_delayed_inode(node); + mutex_unlock(&node->mutex); + + return 0; +} + +/* Called when committing the transaction. */ +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + struct btrfs_delayed_node *curr_node, *prev_node; + struct btrfs_path *path; + int ret = 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + delayed_root = btrfs_get_delayed_root(root); + + curr_node = btrfs_first_delayed_node(delayed_root); + while (curr_node) { + root = curr_node->root; + ret = btrfs_insert_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + curr_node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, root, path, + curr_node); + if (ret) { + btrfs_release_delayed_node(curr_node); + break; + } + + prev_node = curr_node; + curr_node = btrfs_next_delayed_node(curr_node); + btrfs_release_delayed_node(prev_node); + } + + btrfs_free_path(path); + return ret; +} + +static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *node) +{ + struct btrfs_path *path; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; + + ret = btrfs_insert_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, node->root, node); + if (!ret) + ret = btrfs_update_delayed_inode(trans, node->root, path, node); + btrfs_free_path(path); + + return ret; +} + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); + int ret; + + if (!delayed_node) + return 0; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->count) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; + } + mutex_unlock(&delayed_node->mutex); + + ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +void btrfs_remove_delayed_node(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); + if (!delayed_node) + return; + + BTRFS_I(inode)->delayed_node = NULL; + btrfs_release_delayed_node(delayed_node); +} + +struct btrfs_async_delayed_node { + struct btrfs_root *root; + struct btrfs_delayed_node *delayed_node; + struct btrfs_work work; +}; + +static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_trans_handle *trans; + struct btrfs_path *path; + struct btrfs_delayed_node *delayed_node = NULL; + struct btrfs_root *root; + unsigned long nr = 0; + int need_requeue = 0; + int ret; + + async_node = container_of(work, struct btrfs_async_delayed_node, work); + + path = btrfs_alloc_path(); + if (!path) + goto out; + path->leave_spinning = 1; + + delayed_node = async_node->delayed_node; + root = delayed_node->root; + + trans = btrfs_join_transaction(root, 0); + if (IS_ERR(trans)) + goto free_path; + + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); + if (!ret) + ret = btrfs_delete_delayed_items(trans, path, root, + delayed_node); + + if (!ret) + btrfs_update_delayed_inode(trans, root, path, delayed_node); + + /* + * Maybe new delayed items have been inserted, so we need requeue + * the work. Besides that, we must dequeue the empty delayed nodes + * to avoid the race between delayed items balance and the worker. + * The race like this: + * Task1 Worker thread + * count == 0, needn't requeue + * also needn't insert the + * delayed node into prepare + * list again. + * add lots of delayed items + * queue the delayed node + * already in the list, + * and not in the prepare + * list, it means the delayed + * node is being dealt with + * by the worker. + * do delayed items balance + * the delayed node is being + * dealt with by the worker + * now, just wait. + * the worker goto idle. + * Task1 will sleep until the transaction is commited. + */ + mutex_lock(&delayed_node->mutex); + if (delayed_node->count) + need_requeue = 1; + else + btrfs_dequeue_delayed_node(root->fs_info->delayed_root, + delayed_node); + mutex_unlock(&delayed_node->mutex); + + nr = trans->blocks_used; + + btrfs_end_transaction_dmeta(trans, root); + __btrfs_btree_balance_dirty(root, nr); +free_path: + btrfs_free_path(path); +out: + if (need_requeue) + btrfs_requeue_work(&async_node->work); + else { + btrfs_release_prepared_delayed_node(delayed_node); + kfree(async_node); + } +} + +static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, + struct btrfs_root *root, int all) +{ + struct btrfs_async_delayed_node *async_node; + struct btrfs_delayed_node *curr; + int count = 0; + +again: + curr = btrfs_first_prepared_delayed_node(delayed_root); + if (!curr) + return 0; + + async_node = kmalloc(sizeof(*async_node), GFP_NOFS); + if (!async_node) { + btrfs_release_prepared_delayed_node(curr); + return -ENOMEM; + } + + async_node->root = root; + async_node->delayed_node = curr; + + async_node->work.func = btrfs_async_run_delayed_node_done; + async_node->work.flags = 0; + + btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); + count++; + + if (all || count < 4) + goto again; + + return 0; +} + +void btrfs_balance_delayed_items(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + + delayed_root = btrfs_get_delayed_root(root); + + if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) + return; + + if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { + int ret; + ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); + if (ret) + return; + + wait_event_interruptible_timeout( + delayed_root->wait, + (atomic_read(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND), + HZ); + return; + } + + btrfs_wq_run_delayed_node(delayed_root, root, 0); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *delayed_item; + struct btrfs_dir_item *dir_item; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); + if (!delayed_item) { + ret = -ENOMEM; + goto release_node; + } + + ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + delayed_item->key.objectid = dir->i_ino; + btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); + delayed_item->key.offset = index; + + dir_item = (struct btrfs_dir_item *)delayed_item->data; + dir_item->location = *disk_key; + dir_item->transid = cpu_to_le64(trans->transid); + dir_item->data_len = 0; + dir_item->name_len = cpu_to_le16(name_len); + dir_item->type = type; + memcpy((char *)(dir_item + 1), name, name_len); + + mutex_lock(&delayed_node->mutex); + ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(name: %s) into " + "the insertion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + name, + (unsigned long long)delayed_node->root->objectid, + (unsigned long long)delayed_node->inode_id, + ret); + BUG(); + } + mutex_unlock(&delayed_node->mutex); + +release_node: + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, + struct btrfs_delayed_node *node, + struct btrfs_key *key) +{ + struct btrfs_delayed_item *item; + + mutex_lock(&node->mutex); + item = __btrfs_lookup_delayed_insertion_item(node, key); + if (!item) { + mutex_unlock(&node->mutex); + return 1; + } + + btrfs_delayed_item_release_metadata(root, item); + btrfs_release_delayed_item(item); + mutex_unlock(&node->mutex); + return 0; +} + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index) +{ + struct btrfs_delayed_node *node; + struct btrfs_delayed_item *item; + struct btrfs_key item_key; + int ret; + + node = btrfs_get_or_create_delayed_node(dir); + if (IS_ERR(node)) + return PTR_ERR(node); + + item_key.objectid = dir->i_ino; + btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); + item_key.offset = index; + + ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); + if (!ret) + goto end; + + item = btrfs_alloc_delayed_item(0); + if (!item) { + ret = -ENOMEM; + goto end; + } + + item->key = item_key; + + ret = btrfs_delayed_item_reserve_metadata(trans, root, item); + /* + * we have reserved enough space when we start a new transaction, + * so reserving metadata failure is impossible. + */ + BUG_ON(ret); + + mutex_lock(&node->mutex); + ret = __btrfs_add_delayed_deletion_item(node, item); + if (unlikely(ret)) { + printk(KERN_ERR "err add delayed dir index item(index: %llu) " + "into the deletion tree of the delayed node" + "(root id: %llu, inode id: %llu, errno: %d)\n", + (unsigned long long)index, + (unsigned long long)node->root->objectid, + (unsigned long long)node->inode_id, + ret); + BUG(); + } + mutex_unlock(&node->mutex); +end: + btrfs_release_delayed_node(node); + return ret; +} + +int btrfs_inode_delayed_dir_index_count(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; + int ret = 0; + + if (!delayed_node) + return -ENOENT; + + /* + * Since we have held i_mutex of this directory, it is impossible that + * a new directory index is added into the delayed node and index_cnt + * is updated now. So we needn't lock the delayed node. + */ + if (!delayed_node->index_cnt) + return -EINVAL; + + BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; + return ret; +} + +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_delayed_item *item; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + mutex_lock(&delayed_node->mutex); + item = __btrfs_first_delayed_insertion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, ins_list); + item = __btrfs_next_delayed_item(item); + } + + item = __btrfs_first_delayed_deletion_item(delayed_node); + while (item) { + atomic_inc(&item->refs); + list_add_tail(&item->readdir_list, del_list); + item = __btrfs_next_delayed_item(item); + } + mutex_unlock(&delayed_node->mutex); + /* + * This delayed node is still cached in the btrfs inode, so refs + * must be > 1 now, and we needn't check it is going to be freed + * or not. + * + * Besides that, this function is used to read dir, we do not + * insert/delete delayed items in this period. So we also needn't + * requeue or dequeue this delayed node. + */ + atomic_dec(&delayed_node->refs); +} + +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list) +{ + struct btrfs_delayed_item *curr, *next; + + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + list_del(&curr->readdir_list); + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + } +} + +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index) +{ + struct btrfs_delayed_item *curr, *next; + int ret; + + if (list_empty(del_list)) + return 0; + + list_for_each_entry_safe(curr, next, del_list, readdir_list) { + if (curr->key.offset > index) + break; + + list_del(&curr->readdir_list); + ret = (curr->key.offset == index); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (ret) + return 1; + else + continue; + } + return 0; +} + +/* + * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree + * + */ +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list) +{ + struct btrfs_dir_item *di; + struct btrfs_delayed_item *curr, *next; + struct btrfs_key location; + char *name; + int name_len; + int over = 0; + unsigned char d_type; + + if (list_empty(ins_list)) + return 0; + + /* + * Changing the data of the delayed item is impossible. So + * we needn't lock them. And we have held i_mutex of the + * directory, nobody can delete any directory indexes now. + */ + list_for_each_entry_safe(curr, next, ins_list, readdir_list) { + list_del(&curr->readdir_list); + + if (curr->key.offset < filp->f_pos) { + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + continue; + } + + filp->f_pos = curr->key.offset; + + di = (struct btrfs_dir_item *)curr->data; + name = (char *)(di + 1); + name_len = le16_to_cpu(di->name_len); + + d_type = btrfs_filetype_table[di->type]; + btrfs_disk_key_to_cpu(&location, &di->location); + + over = filldir(dirent, name, name_len, curr->key.offset, + location.objectid, d_type); + + if (atomic_dec_and_test(&curr->refs)) + kfree(curr); + + if (over) + return 1; + } + return 0; +} + +BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, + generation, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, + sequence, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, + transid, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, + nbytes, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, + block_group, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); +BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); +BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); + +BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); +BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); + +static void fill_stack_inode_item(struct btrfs_trans_handle *trans, + struct btrfs_inode_item *inode_item, + struct inode *inode) +{ + btrfs_set_stack_inode_uid(inode_item, inode->i_uid); + btrfs_set_stack_inode_gid(inode_item, inode->i_gid); + btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); + btrfs_set_stack_inode_mode(inode_item, inode->i_mode); + btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); + btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); + btrfs_set_stack_inode_generation(inode_item, + BTRFS_I(inode)->generation); + btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); + btrfs_set_stack_inode_transid(inode_item, trans->transid); + btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); + btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); + btrfs_set_stack_inode_block_group(inode_item, + BTRFS_I(inode)->block_group); + + btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + inode->i_atime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + inode->i_mtime.tv_nsec); + + btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_sec); + btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + inode->i_ctime.tv_nsec); +} + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + int ret; + + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); + + mutex_lock(&delayed_node->mutex); + if (delayed_node->inode_dirty) { + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + goto release_node; + } + + ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); + /* + * we must reserve enough space when we start a new transaction, + * so reserving metadata failure is impossible + */ + BUG_ON(ret); + + fill_stack_inode_item(trans, &delayed_node->inode_item, inode); + delayed_node->inode_dirty = 1; + delayed_node->count++; + atomic_inc(&root->fs_info->delayed_root->items); +release_node: + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return ret; +} + +static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) +{ + struct btrfs_root *root = delayed_node->root; + struct btrfs_delayed_item *curr_item, *prev_item; + + mutex_lock(&delayed_node->mutex); + curr_item = __btrfs_first_delayed_insertion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + curr_item = __btrfs_first_delayed_deletion_item(delayed_node); + while (curr_item) { + btrfs_delayed_item_release_metadata(root, curr_item); + prev_item = curr_item; + curr_item = __btrfs_next_delayed_item(prev_item); + btrfs_release_delayed_item(prev_item); + } + + if (delayed_node->inode_dirty) { + btrfs_delayed_inode_release_metadata(root, delayed_node); + btrfs_release_delayed_inode(delayed_node); + } + mutex_unlock(&delayed_node->mutex); +} + +void btrfs_kill_delayed_inode_items(struct inode *inode) +{ + struct btrfs_delayed_node *delayed_node; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return; + + __btrfs_kill_delayed_node(delayed_node); + btrfs_release_delayed_node(delayed_node); +} + +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) +{ + u64 inode_id = 0; + struct btrfs_delayed_node *delayed_nodes[8]; + int i, n; + + while (1) { + spin_lock(&root->inode_lock); + n = radix_tree_gang_lookup(&root->delayed_nodes_tree, + (void **)delayed_nodes, inode_id, + ARRAY_SIZE(delayed_nodes)); + if (!n) { + spin_unlock(&root->inode_lock); + break; + } + + inode_id = delayed_nodes[n - 1]->inode_id + 1; + + for (i = 0; i < n; i++) + atomic_inc(&delayed_nodes[i]->refs); + spin_unlock(&root->inode_lock); + + for (i = 0; i < n; i++) { + __btrfs_kill_delayed_node(delayed_nodes[i]); + btrfs_release_delayed_node(delayed_nodes[i]); + } + } +} diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h new file mode 100644 index 00000000000..eb7d240aa64 --- /dev/null +++ b/fs/btrfs/delayed-inode.h @@ -0,0 +1,141 @@ +/* + * Copyright (C) 2011 Fujitsu. All rights reserved. + * Written by Miao Xie + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __DELAYED_TREE_OPERATION_H +#define __DELAYED_TREE_OPERATION_H + +#include +#include +#include +#include +#include +#include + +#include "ctree.h" + +/* types of the delayed item */ +#define BTRFS_DELAYED_INSERTION_ITEM 1 +#define BTRFS_DELAYED_DELETION_ITEM 2 + +struct btrfs_delayed_root { + spinlock_t lock; + struct list_head node_list; + /* + * Used for delayed nodes which is waiting to be dealt with by the + * worker. If the delayed node is inserted into the work queue, we + * drop it from this list. + */ + struct list_head prepare_list; + atomic_t items; /* for delayed items */ + int nodes; /* for delayed nodes */ + wait_queue_head_t wait; +}; + +struct btrfs_delayed_node { + u64 inode_id; + u64 bytes_reserved; + struct btrfs_root *root; + /* Used to add the node into the delayed root's node list. */ + struct list_head n_list; + /* + * Used to add the node into the prepare list, the nodes in this list + * is waiting to be dealt with by the async worker. + */ + struct list_head p_list; + struct rb_root ins_root; + struct rb_root del_root; + struct mutex mutex; + struct btrfs_inode_item inode_item; + atomic_t refs; + u64 index_cnt; + bool in_list; + bool inode_dirty; + int count; +}; + +struct btrfs_delayed_item { + struct rb_node rb_node; + struct btrfs_key key; + struct list_head tree_list; /* used for batch insert/delete items */ + struct list_head readdir_list; /* used for readdir items */ + u64 bytes_reserved; + struct btrfs_block_rsv *block_rsv; + struct btrfs_delayed_node *delayed_node; + atomic_t refs; + int ins_or_del; + u32 data_len; + char data[0]; +}; + +static inline void btrfs_init_delayed_root( + struct btrfs_delayed_root *delayed_root) +{ + atomic_set(&delayed_root->items, 0); + delayed_root->nodes = 0; + spin_lock_init(&delayed_root->lock); + init_waitqueue_head(&delayed_root->wait); + INIT_LIST_HEAD(&delayed_root->node_list); + INIT_LIST_HEAD(&delayed_root->prepare_list); +} + +int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, struct inode *dir, + struct btrfs_disk_key *disk_key, u8 type, + u64 index); + +int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *dir, + u64 index); + +int btrfs_inode_delayed_dir_index_count(struct inode *inode); + +int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, + struct btrfs_root *root); + +void btrfs_balance_delayed_items(struct btrfs_root *root); + +int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, + struct inode *inode); +/* Used for evicting the inode. */ +void btrfs_remove_delayed_node(struct inode *inode); +void btrfs_kill_delayed_inode_items(struct inode *inode); + + +int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode); + +/* Used for drop dead root */ +void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); + +/* Used for readdir() */ +void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, + struct list_head *del_list); +void btrfs_put_delayed_items(struct list_head *ins_list, + struct list_head *del_list); +int btrfs_should_delete_dir_index(struct list_head *del_list, + u64 index); +int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, + filldir_t filldir, + struct list_head *ins_list); + +/* for init */ +int __init btrfs_delayed_inode_init(void); +void btrfs_delayed_inode_exit(void); +#endif diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae6..f53fb3847c9 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -124,8 +124,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, * to use for the second index (if one is created). */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, - struct btrfs_key *location, u8 type, u64 index) + *root, const char *name, int name_len, + struct inode *dir, struct btrfs_key *location, + u8 type, u64 index) { int ret = 0; int ret2 = 0; @@ -137,13 +138,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = dir; + key.objectid = dir->i_ino; btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); key.offset = btrfs_name_hash(name, name_len); path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->leave_spinning = 1; + btrfs_cpu_key_to_disk(&disk_key, location); + data_size = sizeof(*dir_item) + name_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); @@ -155,7 +160,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root } leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_type(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); @@ -174,27 +178,9 @@ second_insert: } btrfs_release_path(root, path); - btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); - key.offset = index; - dir_item = insert_with_overflow(trans, root, path, &key, data_size, - name, name_len); - if (IS_ERR(dir_item)) { - ret2 = PTR_ERR(dir_item); - goto out_free; - } - leaf = path->nodes[0]; - btrfs_cpu_key_to_disk(&disk_key, location); - btrfs_set_dir_item_key(leaf, dir_item, &disk_key); - btrfs_set_dir_type(leaf, dir_item, type); - btrfs_set_dir_data_len(leaf, dir_item, 0); - btrfs_set_dir_name_len(leaf, dir_item, name_len); - btrfs_set_dir_transid(leaf, dir_item, trans->transid); - name_ptr = (unsigned long)(dir_item + 1); - write_extent_buffer(leaf, name, name_ptr, name_len); - btrfs_mark_buffer_dirty(leaf); - + ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, + &disk_key, type, index); out_free: - btrfs_free_path(path); if (ret) return ret; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece8..22c3c958604 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1058,6 +1058,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->name = NULL; root->in_sysfs = 0; root->inode_tree = RB_ROOT; + INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; root->orphan_block_rsv = NULL; @@ -1693,6 +1694,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_extents); spin_lock_init(&fs_info->ordered_extent_lock); + fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), + GFP_NOFS); + if (!fs_info->delayed_root) { + err = -ENOMEM; + goto fail_iput; + } + btrfs_init_delayed_root(fs_info->delayed_root); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); @@ -1760,7 +1768,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, bh = btrfs_read_dev_super(fs_devices->latest_bdev); if (!bh) { err = -EINVAL; - goto fail_iput; + goto fail_alloc; } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); @@ -1772,7 +1780,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) - goto fail_iput; + goto fail_alloc; /* check FS state, whether FS is broken. */ fs_info->fs_state |= btrfs_super_flags(disk_super); @@ -1788,7 +1796,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super) & @@ -1798,7 +1806,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported optional features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } features = btrfs_super_incompat_flags(disk_super); @@ -1814,7 +1822,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, "unsupported option features (%Lx).\n", (unsigned long long)features); err = -EINVAL; - goto fail_iput; + goto fail_alloc; } btrfs_init_workers(&fs_info->generic_worker, @@ -1861,6 +1869,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, &fs_info->generic_worker); btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", 1, &fs_info->generic_worker); + btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", + fs_info->thread_pool_size, + &fs_info->generic_worker); /* * endios are largely parallel and should have a very @@ -1882,6 +1893,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); btrfs_start_workers(&fs_info->endio_write_workers, 1); btrfs_start_workers(&fs_info->endio_freespace_worker, 1); + btrfs_start_workers(&fs_info->delayed_workers, 1); fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, @@ -2138,6 +2150,9 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); +fail_alloc: + kfree(fs_info->delayed_root); fail_iput: invalidate_inode_pages2(fs_info->btree_inode->i_mapping); iput(fs_info->btree_inode); @@ -2578,6 +2593,7 @@ int close_ctree(struct btrfs_root *root) del_fs_roots(fs_info); iput(fs_info->btree_inode); + kfree(fs_info->delayed_root); btrfs_stop_workers(&fs_info->generic_worker); btrfs_stop_workers(&fs_info->fixup_workers); @@ -2589,6 +2605,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->endio_freespace_worker); btrfs_stop_workers(&fs_info->submit_workers); + btrfs_stop_workers(&fs_info->delayed_workers); btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -2662,6 +2679,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) u64 num_dirty; unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) + return; + + btrfs_balance_delayed_items(root); + + num_dirty = root->fs_info->dirty_metadata_bytes; + + if (num_dirty > thresh) { + balance_dirty_pages_ratelimited_nr( + root->fs_info->btree_inode->i_mapping, 1); + } + return; +} + +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) +{ + /* + * looks as though older kernels can get into trouble with + * this code, they end up stuck in balance_dirty_pages forever + */ + u64 num_dirty; + unsigned long thresh = 32 * 1024 * 1024; + if (current->flags & PF_MEMALLOC) return; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd9..aca35af37db 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -71,6 +71,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root, u64 block_start, u64 num_blocks); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); +void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16..7b0433866f3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3973,12 +3973,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) WARN_ON(fs_info->chunk_block_rsv.reserved > 0); } -static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) -{ - return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * - 3 * num_items; -} - int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -3989,7 +3983,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, if (num_items == 0 || root->fs_info->chunk_root == root) return 0; - num_bytes = calc_trans_metadata_size(root, num_items); + num_bytes = btrfs_calc_trans_metadata_size(root, num_items); ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, num_bytes); if (!ret) { @@ -4028,14 +4022,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, * If all of the metadata space is used, we can commit * transaction and use space it freed. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } @@ -4049,7 +4043,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, * two for root back/forward refs, two for directory entries * and one for root of the snapshot. */ - u64 num_bytes = calc_trans_metadata_size(root, 5); + u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); dst_rsv->space_info = src_rsv->space_info; return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } @@ -4078,7 +4072,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (nr_extents > reserved_extents) { nr_extents -= reserved_extents; - to_reserve = calc_trans_metadata_size(root, nr_extents); + to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); } else { nr_extents = 0; to_reserve = 0; @@ -4132,7 +4126,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes); if (nr_extents > 0) - to_free += calc_trans_metadata_size(root, nr_extents); + to_free += btrfs_calc_trans_metadata_size(root, nr_extents); btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, to_free); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04..3470f67c625 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2647,11 +2647,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; int ret; + /* + * If root is tree root, it means this inode is used to + * store free space information. And these inodes are updated + * when committing the transaction, so they needn't delaye to + * be updated, or deadlock will occured. + */ + if (likely(root != root->fs_info->tree_root)) { + ret = btrfs_delayed_update_inode(trans, root, inode); + if (!ret) + btrfs_set_inode_last_trans(trans, inode); + return ret; + } + path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; + path->leave_spinning = 1; - ret = btrfs_lookup_inode(trans, root, path, - &BTRFS_I(inode)->location, 1); + ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, + 1); if (ret) { if (ret > 0) ret = -ENOENT; @@ -2661,7 +2676,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, btrfs_unlock_up_safe(path, 1); leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_inode_item); + struct btrfs_inode_item); fill_inode_item(trans, leaf, inode_item, inode); btrfs_mark_buffer_dirty(leaf); @@ -2672,7 +2687,6 @@ failed: return ret; } - /* * unlink helper that gets used here in inode.c and in the tree logging * recovery code. It remove a link in a directory with a given name, and @@ -2724,18 +2738,9 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, goto err; } - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - if (IS_ERR(di)) { - ret = PTR_ERR(di); - goto err; - } - if (!di) { - ret = -ENOENT; + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); + if (ret) goto err; - } - ret = btrfs_delete_one_dir_name(trans, root, path, di); - btrfs_release_path(root, path); ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode, dir->i_ino); @@ -2924,6 +2929,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, index = btrfs_inode_ref_index(path->nodes[0], ref); btrfs_release_path(root, path); + /* + * This is a commit root search, if we can lookup inode item and other + * relative items in the commit root, it means the transaction of + * dir/file creation has been committed, and the dir index item that we + * delay to insert has also been inserted into the commit root. So + * we needn't worry about the delayed insertion of the dir index item + * here. + */ di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, dentry->d_name.name, dentry->d_name.len, 0); if (IS_ERR(di)) { @@ -3029,24 +3042,16 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_release_path(root, path); index = key.offset; } + btrfs_release_path(root, path); - di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, - index, name, name_len, -1); - BUG_ON(!di || IS_ERR(di)); - - leaf = path->nodes[0]; - btrfs_dir_item_key_to_cpu(leaf, di, &key); - WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); - ret = btrfs_delete_one_dir_name(trans, root, path, di); + ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); BUG_ON(ret); - btrfs_release_path(root, path); btrfs_i_size_write(dir, dir->i_size - name_len * 2); dir->i_mtime = dir->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); - btrfs_free_path(path); return 0; } @@ -3306,6 +3311,15 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (root->ref_cows || root == root->fs_info->tree_root) btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); + /* + * This function is also used to drop the items in the log tree before + * we relog the inode, so if root != BTRFS_I(inode)->root, it means + * it is used to drop the loged items. So we shouldn't kill the delayed + * items. + */ + if (min_type == 0 && root == BTRFS_I(inode)->root) + btrfs_kill_delayed_inode_items(inode); + path = btrfs_alloc_path(); BUG_ON(!path); path->reada = -1; @@ -4208,7 +4222,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, return d_splice_alias(inode, dentry); } -static unsigned char btrfs_filetype_table[] = { +unsigned char btrfs_filetype_table[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK }; @@ -4222,6 +4236,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; + struct list_head ins_list; + struct list_head del_list; int ret; struct extent_buffer *leaf; int slot; @@ -4234,6 +4250,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, char tmp_name[32]; char *name_ptr; int name_len; + int is_curr = 0; /* filp->f_pos points to the current index? */ /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) @@ -4258,8 +4275,16 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; path->reada = 2; + if (key_type == BTRFS_DIR_INDEX_KEY) { + INIT_LIST_HEAD(&ins_list); + INIT_LIST_HEAD(&del_list); + btrfs_get_delayed_items(inode, &ins_list, &del_list); + } + btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; key.objectid = inode->i_ino; @@ -4289,8 +4314,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, break; if (found_key.offset < filp->f_pos) goto next; + if (key_type == BTRFS_DIR_INDEX_KEY && + btrfs_should_delete_dir_index(&del_list, + found_key.offset)) + goto next; filp->f_pos = found_key.offset; + is_curr = 1; di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); di_cur = 0; @@ -4345,6 +4375,15 @@ next: path->slots[0]++; } + if (key_type == BTRFS_DIR_INDEX_KEY) { + if (is_curr) + filp->f_pos++; + ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, + &ins_list); + if (ret) + goto nopos; + } + /* Reached end of directory/root. Bump pos past the last item. */ if (key_type == BTRFS_DIR_INDEX_KEY) /* @@ -4357,6 +4396,8 @@ next: nopos: ret = 0; err: + if (key_type == BTRFS_DIR_INDEX_KEY) + btrfs_put_delayed_items(&ins_list, &del_list); btrfs_free_path(path); return ret; } @@ -4434,6 +4475,8 @@ void btrfs_dirty_inode(struct inode *inode) } } btrfs_end_transaction(trans, root); + if (BTRFS_I(inode)->delayed_node) + btrfs_balance_delayed_items(root); } /* @@ -4502,9 +4545,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) int ret = 0; if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_set_inode_index_count(dir); - if (ret) - return ret; + ret = btrfs_inode_delayed_dir_index_count(dir); + if (ret) { + ret = btrfs_set_inode_index_count(dir); + if (ret) + return ret; + } } *index = BTRFS_I(dir)->index_cnt; @@ -4671,7 +4717,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, if (ret == 0) { ret = btrfs_insert_dir_item(trans, root, name, name_len, - parent_inode->i_ino, &key, + parent_inode, &key, btrfs_inode_type(inode), index); BUG_ON(ret); @@ -6784,6 +6830,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->dummy_inode = 0; ei->force_compress = BTRFS_COMPRESS_NONE; + ei->delayed_node = NULL; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); @@ -6874,6 +6922,7 @@ void btrfs_destroy_inode(struct inode *inode) inode_tree_del(inode); btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); free: + btrfs_remove_delayed_node(inode); call_rcu(&inode->i_rcu, btrfs_i_callback); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed479..df59401af74 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -422,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir->i_ino, &key, + name, namelen, dir, &key, BTRFS_FT_DIR, index); if (ret) goto fail; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf..cc5a2a8a5ac 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -40,6 +40,7 @@ #include #include #include "compat.h" +#include "delayed-inode.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -1206,10 +1207,14 @@ static int __init init_btrfs_fs(void) if (err) goto free_extent_io; - err = btrfs_interface_init(); + err = btrfs_delayed_inode_init(); if (err) goto free_extent_map; + err = btrfs_interface_init(); + if (err) + goto free_delayed_inode; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -1219,6 +1224,8 @@ static int __init init_btrfs_fs(void) unregister_ioctl: btrfs_interface_exit(); +free_delayed_inode: + btrfs_delayed_inode_exit(); free_extent_map: extent_map_exit(); free_extent_io: @@ -1235,6 +1242,7 @@ free_sysfs: static void __exit exit_btrfs_fs(void) { btrfs_destroy_cachep(); + btrfs_delayed_inode_exit(); extent_map_exit(); extent_io_exit(); btrfs_interface_exit(); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5..b83ed5e64a3 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -487,19 +487,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 1, 1); + int ret; + + ret = __btrfs_end_transaction(trans, root, 1, 1); + if (ret) + return ret; + return 0; } int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - return __btrfs_end_transaction(trans, root, 0, 0); + int ret; + + ret = __btrfs_end_transaction(trans, root, 0, 0); + if (ret) + return ret; + return 0; +} + +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + return __btrfs_end_transaction(trans, root, 1, 1); } /* @@ -967,7 +988,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, BUG_ON(ret); ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode->i_ino, &key, + parent_inode, &key, BTRFS_FT_DIR, index); BUG_ON(ret); @@ -1037,6 +1058,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { + /* + * We must deal with the delayed items before creating + * snapshots, or we will create a snapthot with inconsistent + * information. + */ + ret = btrfs_run_delayed_items(trans, fs_info->fs_root); + BUG_ON(ret); + ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1290,6 +1319,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, BUG_ON(ret); } + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + /* * rename don't use btrfs_join_transaction, so, once we * set the transaction to blocked above, we aren't going @@ -1316,6 +1348,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); @@ -1432,6 +1467,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) root = list_entry(list.next, struct btrfs_root, root_list); list_del(&root->root_list); + btrfs_kill_all_delayed_nodes(root); + if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) btrfs_drop_snapshot(root, NULL, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c58..cb928c6c42e 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -115,6 +115,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); +int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, + struct btrfs_root *root); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); void btrfs_throttle(struct btrfs_root *root); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba..ae0b72856bf 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2773,6 +2773,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; + ret = btrfs_commit_inode_delayed_items(trans, inode); + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; + } + mutex_lock(&BTRFS_I(inode)->log_mutex); /* -- cgit v1.2.3-70-g09d2 From 0956c798ef8dbe0fc215870eb68bd2d8e789f86a Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 18 May 2011 00:11:22 +0000 Subject: BTRFS: Remove unused node_lock 240f62c8756 replaced the node_lock with rcu_read_lock, but forgot to remove the actual lock in the data structure. Remove it here. Signed-off-by: Andi Kleen Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 --- fs/btrfs/disk-io.c | 1 - 2 files changed, 4 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae..f290b98e2fe 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1088,9 +1088,6 @@ struct btrfs_fs_info { struct btrfs_root { struct extent_buffer *node; - /* the node lock is held while changing the node pointer */ - spinlock_t node_lock; - struct extent_buffer *commit_root; struct btrfs_root *log_root; struct btrfs_root *reloc_root; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece8..64b289690f9 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1064,7 +1064,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); INIT_LIST_HEAD(&root->root_list); - spin_lock_init(&root->node_lock); spin_lock_init(&root->orphan_lock); spin_lock_init(&root->inode_lock); spin_lock_init(&root->accounting_lock); -- cgit v1.2.3-70-g09d2 From c9513edb0079f97749c2ac00c887a22c4ba44792 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:07:30 +0000 Subject: Btrfs: fix the race between reading and updating devices On btrfs_congested_fn and __unplug_io_fn paths, we should hold device_list_mutex to avoid remove/add device path to update fs_devices->devices On __btrfs_close_devices and btrfs_prepare_sprout paths, the devices in fs_devices->devices or fs_devices->devices is updated, so we should hold the mutex to avoid the reader side to reach them Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 ++ fs/btrfs/volumes.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 64b289690f9..4e53a4fc467 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,6 +1410,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; + mutex_lock(&info->fs_devices->device_list_mutex); list_for_each_entry(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; @@ -1419,6 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } + mutex_unlock(&info->fs_devices->device_list_mutex); return ret; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 290100fc47b..43c4f09e441 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -481,6 +481,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) if (--fs_devices->opened > 0) return 0; + mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { blkdev_put(device->bdev, device->mode); @@ -495,6 +496,8 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) device->writeable = 0; device->in_fs_metadata = 0; } + mutex_unlock(&fs_devices->device_list_mutex); + WARN_ON(fs_devices->open_devices); WARN_ON(fs_devices->rw_devices); fs_devices->opened = 0; @@ -1415,7 +1418,11 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&seed_devices->devices); INIT_LIST_HEAD(&seed_devices->alloc_list); mutex_init(&seed_devices->device_list_mutex); + + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->devices, &seed_devices->devices); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); list_for_each_entry(device, &seed_devices->devices, dev_list) { device->fs_devices = seed_devices; -- cgit v1.2.3-70-g09d2 From 1f78160ce1b1b8e657e2248118c4d91f881763f0 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Wed, 20 Apr 2011 10:09:16 +0000 Subject: Btrfs: using rcu lock in the reader side of devices list fs_devices->devices is only updated on remove and add device paths, so we can use rcu to protect it in the reader side Signed-off-by: Xiao Guangrong Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 14 ++++----- fs/btrfs/ioctl.c | 7 +++-- fs/btrfs/volumes.c | 85 +++++++++++++++++++++++++++++++++++++----------------- fs/btrfs/volumes.h | 2 ++ 4 files changed, 72 insertions(+), 36 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4e53a4fc467..deba3d9c885 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,8 +1410,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) struct btrfs_device *device; struct backing_dev_info *bdi; - mutex_lock(&info->fs_devices->device_list_mutex); - list_for_each_entry(device, &info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { if (!device->bdev) continue; bdi = blk_get_backing_dev_info(device->bdev); @@ -1420,7 +1420,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) break; } } - mutex_unlock(&info->fs_devices->device_list_mutex); + rcu_read_unlock(); return ret; } @@ -2332,9 +2332,9 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - mutex_lock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_lock(); head = &root->fs_info->fs_devices->devices; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; @@ -2367,7 +2367,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) } total_errors = 0; - list_for_each_entry(dev, head, dev_list) { + list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) continue; if (!dev->in_fs_metadata || !dev->writeable) @@ -2377,7 +2377,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ce773fb736a..0de71feb8e1 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -281,8 +281,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - mutex_lock(&fs_info->fs_devices->device_list_mutex); - list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { + rcu_read_lock(); + list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, + dev_list) { if (!device->bdev) continue; q = bdev_get_queue(device->bdev); @@ -292,7 +293,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) minlen); } } - mutex_unlock(&fs_info->fs_devices->device_list_mutex); + rcu_read_unlock(); if (!num_devices) return -EOPNOTSUPP; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0b5ca273726..e7844f8a347 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -363,7 +363,7 @@ static noinline int device_list_add(const char *path, INIT_LIST_HEAD(&device->dev_alloc_list); mutex_lock(&fs_devices->device_list_mutex); - list_add(&device->dev_list, &fs_devices->devices); + list_add_rcu(&device->dev_list, &fs_devices->devices); mutex_unlock(&fs_devices->device_list_mutex); device->fs_devices = fs_devices; @@ -471,6 +471,29 @@ again: return 0; } +static void __free_device(struct work_struct *work) +{ + struct btrfs_device *device; + + device = container_of(work, struct btrfs_device, rcu_work); + + if (device->bdev) + blkdev_put(device->bdev, device->mode); + + kfree(device->name); + kfree(device); +} + +static void free_device(struct rcu_head *head) +{ + struct btrfs_device *device; + + device = container_of(head, struct btrfs_device, rcu); + + INIT_WORK(&device->rcu_work, __free_device); + schedule_work(&device->rcu_work); +} + static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; @@ -480,18 +503,27 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { - if (device->bdev) { - blkdev_put(device->bdev, device->mode); + struct btrfs_device *new_device; + + if (device->bdev) fs_devices->open_devices--; - } + if (device->writeable) { list_del_init(&device->dev_alloc_list); fs_devices->rw_devices--; } - device->bdev = NULL; - device->writeable = 0; - device->in_fs_metadata = 0; + new_device = kmalloc(sizeof(*new_device), GFP_NOFS); + BUG_ON(!new_device); + memcpy(new_device, device, sizeof(*new_device)); + new_device->name = kstrdup(device->name, GFP_NOFS); + BUG_ON(!new_device->name); + new_device->bdev = NULL; + new_device->writeable = 0; + new_device->in_fs_metadata = 0; + list_replace_rcu(&device->dev_list, &new_device->dev_list); + + call_rcu(&device->rcu, free_device); } mutex_unlock(&fs_devices->device_list_mutex); @@ -1204,11 +1236,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) struct block_device *bdev; struct buffer_head *bh = NULL; struct btrfs_super_block *disk_super; + struct btrfs_fs_devices *cur_devices; u64 all_avail; u64 devid; u64 num_devices; u8 *dev_uuid; int ret = 0; + bool clear_super = false; mutex_lock(&uuid_mutex); mutex_lock(&root->fs_info->volume_mutex); @@ -1294,6 +1328,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) list_del_init(&device->dev_alloc_list); unlock_chunks(root); root->fs_info->fs_devices->rw_devices--; + clear_super = true; } ret = btrfs_shrink_device(device, 0); @@ -1304,16 +1339,15 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (ret) goto error_undo; - device->in_fs_metadata = 0; - /* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. */ + + cur_devices = device->fs_devices; mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_del_init(&device->dev_list); - mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); + list_del_rcu(&device->dev_list); device->fs_devices->num_devices--; @@ -1327,36 +1361,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev == root->fs_info->fs_devices->latest_bdev) root->fs_info->fs_devices->latest_bdev = next_device->bdev; - if (device->bdev) { - blkdev_put(device->bdev, device->mode); - device->bdev = NULL; + if (device->bdev) device->fs_devices->open_devices--; - } + + call_rcu(&device->rcu, free_device); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); - if (device->fs_devices->open_devices == 0) { + if (cur_devices->open_devices == 0) { struct btrfs_fs_devices *fs_devices; fs_devices = root->fs_info->fs_devices; while (fs_devices) { - if (fs_devices->seed == device->fs_devices) + if (fs_devices->seed == cur_devices) break; fs_devices = fs_devices->seed; } - fs_devices->seed = device->fs_devices->seed; - device->fs_devices->seed = NULL; + fs_devices->seed = cur_devices->seed; + cur_devices->seed = NULL; lock_chunks(root); - __btrfs_close_devices(device->fs_devices); + __btrfs_close_devices(cur_devices); unlock_chunks(root); - free_fs_devices(device->fs_devices); + free_fs_devices(cur_devices); } /* * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ - if (device->writeable) { + if (clear_super) { /* make sure this device isn't detected as part of * the FS anymore */ @@ -1365,8 +1399,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) sync_dirty_buffer(bh); } - kfree(device->name); - kfree(device); ret = 0; error_brelse: @@ -1425,7 +1457,8 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, mutex_init(&seed_devices->device_list_mutex); mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_splice_init(&fs_devices->devices, &seed_devices->devices); + list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, + synchronize_rcu); mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); @@ -1624,7 +1657,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) * half setup */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); - list_add(&device->dev_list, &root->fs_info->fs_devices->devices); + list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, &root->fs_info->fs_devices->alloc_list); root->fs_info->fs_devices->num_devices++; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a2..f1b2e4f53fc 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -86,6 +86,8 @@ struct btrfs_device { u8 uuid[BTRFS_UUID_SIZE]; struct btrfs_work work; + struct rcu_head rcu; + struct work_struct rcu_work; }; struct btrfs_fs_devices { -- cgit v1.2.3-70-g09d2 From 4cb5300bc839b8a943eb19c9f27f25470e22d0ca Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 24 May 2011 15:35:30 -0400 Subject: Btrfs: add mount -o auto_defrag This will detect small random writes into files and queue the up for an auto defrag process. It isn't well suited to database workloads yet, but works for smaller files such as rpm, sqlite or bdb databases. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/ctree.h | 45 ++++- fs/btrfs/disk-io.c | 12 ++ fs/btrfs/file.c | 257 ++++++++++++++++++++++++++++ fs/btrfs/inode.c | 12 ++ fs/btrfs/ioctl.c | 448 ++++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ioctl.h | 31 ---- fs/btrfs/super.c | 7 +- 8 files changed, 678 insertions(+), 135 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index d0b0e43a6a8..93b1aa93201 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -153,6 +153,7 @@ struct btrfs_inode { unsigned ordered_data_close:1; unsigned orphan_meta_reserved:1; unsigned dummy_inode:1; + unsigned in_defrag:1; /* * always compress this one file diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 026fc47b42c..332323e19dd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1074,6 +1074,11 @@ struct btrfs_fs_info { /* all metadata allocations go through this cluster */ struct btrfs_free_cluster meta_alloc_cluster; + /* auto defrag inodes go here */ + spinlock_t defrag_inodes_lock; + struct rb_root defrag_inodes; + atomic_t defrag_running; + spinlock_t ref_cache_lock; u64 total_ref_cache_size; @@ -1205,6 +1210,38 @@ struct btrfs_root { struct super_block anon_super; }; +struct btrfs_ioctl_defrag_range_args { + /* start of the defrag operation */ + __u64 start; + + /* number of bytes to defrag, use (u64)-1 to say all */ + __u64 len; + + /* + * flags for the operation, which can include turning + * on compression for this one defrag + */ + __u64 flags; + + /* + * any extent bigger than this will be considered + * already defragged. Use 0 to take the kernel default + * Use 1 to say every single extent must be rewritten + */ + __u32 extent_thresh; + + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + + /* spare for later */ + __u32 unused[4]; +}; + + /* * inode items have the data typically returned from stat and store other * info about object characteristics. There is one for every file and dir in @@ -1302,6 +1339,7 @@ struct btrfs_root { #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) +#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -2528,8 +2566,13 @@ extern const struct dentry_operations btrfs_dentry_operations; long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); - +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_pages); /* file.c */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode); +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 16d335b342a..b2588a55265 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1475,6 +1475,7 @@ static int cleaner_kthread(void *arg) btrfs_run_delayed_iputs(root); btrfs_clean_old_snapshots(root); mutex_unlock(&root->fs_info->cleaner_mutex); + btrfs_run_defrag_inodes(root->fs_info); } if (freezing(current)) { @@ -1616,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); + spin_lock_init(&fs_info->defrag_inodes_lock); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; @@ -1638,9 +1640,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); + atomic_set(&fs_info->defrag_running, 0); fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; + fs_info->defrag_inodes = RB_ROOT; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -2501,6 +2505,14 @@ int close_ctree(struct btrfs_root *root) smp_mb(); btrfs_scrub_cancel(root); + + /* wait for any defraggers to finish */ + wait_event(fs_info->transaction_wait, + (atomic_read(&fs_info->defrag_running) == 0)); + + /* clear out the rbtree of defraggable inodes */ + btrfs_run_defrag_inodes(root->fs_info); + btrfs_put_block_group_cache(fs_info); /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 58ddc444215..c6a22d783c3 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -40,6 +40,263 @@ #include "locking.h" #include "compat.h" +/* + * when auto defrag is enabled we + * queue up these defrag structs to remember which + * inodes need defragging passes + */ +struct inode_defrag { + struct rb_node rb_node; + /* objectid */ + u64 ino; + /* + * transid where the defrag was added, we search for + * extents newer than this + */ + u64 transid; + + /* root objectid */ + u64 root; + + /* last offset we were able to defrag */ + u64 last_offset; + + /* if we've wrapped around back to zero once already */ + int cycled; +}; + +/* pop a record for an inode into the defrag tree. The lock + * must be held already + * + * If you're inserting a record for an older transid than an + * existing record, the transid already in the tree is lowered + * + * If an existing record is found the defrag item you + * pass in is freed + */ +static int __btrfs_add_inode_defrag(struct inode *inode, + struct inode_defrag *defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *entry; + struct rb_node **p; + struct rb_node *parent = NULL; + + p = &root->fs_info->defrag_inodes.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (defrag->ino < entry->ino) + p = &parent->rb_left; + else if (defrag->ino > entry->ino) + p = &parent->rb_right; + else { + /* if we're reinserting an entry for + * an old defrag run, make sure to + * lower the transid of our existing record + */ + if (defrag->transid < entry->transid) + entry->transid = defrag->transid; + if (defrag->last_offset > entry->last_offset) + entry->last_offset = defrag->last_offset; + goto exists; + } + } + BTRFS_I(inode)->in_defrag = 1; + rb_link_node(&defrag->rb_node, parent, p); + rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); + return 0; + +exists: + kfree(defrag); + return 0; + +} + +/* + * insert a defrag record for this inode if auto defrag is + * enabled + */ +int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, + struct inode *inode) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct inode_defrag *defrag; + int ret = 0; + u64 transid; + + if (!btrfs_test_opt(root, AUTO_DEFRAG)) + return 0; + + if (root->fs_info->closing) + return 0; + + if (BTRFS_I(inode)->in_defrag) + return 0; + + if (trans) + transid = trans->transid; + else + transid = BTRFS_I(inode)->root->last_trans; + + defrag = kzalloc(sizeof(*defrag), GFP_NOFS); + if (!defrag) + return -ENOMEM; + + defrag->ino = inode->i_ino; + defrag->transid = transid; + defrag->root = root->root_key.objectid; + + spin_lock(&root->fs_info->defrag_inodes_lock); + if (!BTRFS_I(inode)->in_defrag) + ret = __btrfs_add_inode_defrag(inode, defrag); + spin_unlock(&root->fs_info->defrag_inodes_lock); + return ret; +} + +/* + * must be called with the defrag_inodes lock held + */ +struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, + struct rb_node **next) +{ + struct inode_defrag *entry = NULL; + struct rb_node *p; + struct rb_node *parent = NULL; + + p = info->defrag_inodes.rb_node; + while (p) { + parent = p; + entry = rb_entry(parent, struct inode_defrag, rb_node); + + if (ino < entry->ino) + p = parent->rb_left; + else if (ino > entry->ino) + p = parent->rb_right; + else + return entry; + } + + if (next) { + while (parent && ino > entry->ino) { + parent = rb_next(parent); + entry = rb_entry(parent, struct inode_defrag, rb_node); + } + *next = parent; + } + return NULL; +} + +/* + * run through the list of inodes in the FS that need + * defragging + */ +int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) +{ + struct inode_defrag *defrag; + struct btrfs_root *inode_root; + struct inode *inode; + struct rb_node *n; + struct btrfs_key key; + struct btrfs_ioctl_defrag_range_args range; + u64 first_ino = 0; + int num_defrag; + int defrag_batch = 1024; + + memset(&range, 0, sizeof(range)); + range.len = (u64)-1; + + atomic_inc(&fs_info->defrag_running); + spin_lock(&fs_info->defrag_inodes_lock); + while(1) { + n = NULL; + + /* find an inode to defrag */ + defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); + if (!defrag) { + if (n) + defrag = rb_entry(n, struct inode_defrag, rb_node); + else if (first_ino) { + first_ino = 0; + continue; + } else { + break; + } + } + + /* remove it from the rbtree */ + first_ino = defrag->ino + 1; + rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); + + if (fs_info->closing) + goto next_free; + + spin_unlock(&fs_info->defrag_inodes_lock); + + /* get the inode */ + key.objectid = defrag->root; + btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); + key.offset = (u64)-1; + inode_root = btrfs_read_fs_root_no_name(fs_info, &key); + if (IS_ERR(inode_root)) + goto next; + + key.objectid = defrag->ino; + btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); + key.offset = 0; + + inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); + if (IS_ERR(inode)) + goto next; + + /* do a chunk of defrag */ + BTRFS_I(inode)->in_defrag = 0; + range.start = defrag->last_offset; + num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, + defrag_batch); + /* + * if we filled the whole defrag batch, there + * must be more work to do. Queue this defrag + * again + */ + if (num_defrag == defrag_batch) { + defrag->last_offset = range.start; + __btrfs_add_inode_defrag(inode, defrag); + /* + * we don't want to kfree defrag, we added it back to + * the rbtree + */ + defrag = NULL; + } else if (defrag->last_offset && !defrag->cycled) { + /* + * we didn't fill our defrag batch, but + * we didn't start at zero. Make sure we loop + * around to the start of the file. + */ + defrag->last_offset = 0; + defrag->cycled = 1; + __btrfs_add_inode_defrag(inode, defrag); + defrag = NULL; + } + + iput(inode); +next: + spin_lock(&fs_info->defrag_inodes_lock); +next_free: + kfree(defrag); + } + spin_unlock(&fs_info->defrag_inodes_lock); + + atomic_dec(&fs_info->defrag_running); + + /* + * during unmount, we use the transaction_wait queue to + * wait for the defragger to stop + */ + wake_up(&fs_info->transaction_wait); + return 0; +} /* simple helper to fault in pages and copy. This should go away * and be replaced with calls into generic code. diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d378f8b70ef..bb51bb1fa44 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -342,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode, int will_compress; int compress_type = root->fs_info->compress_type; + /* if this is a small write inside eof, kick off a defragbot */ + if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024) + btrfs_add_inode_defrag(NULL, inode); + actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; @@ -799,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode, disk_num_bytes = num_bytes; ret = 0; + /* if this is a small write inside eof, kick off defrag */ + if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + if (start == 0) { /* lets try to make an inline extent */ ret = cow_file_range_inline(trans, root, inode, @@ -5371,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, if (IS_ERR(trans)) return ERR_CAST(trans); + if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) + btrfs_add_inode_defrag(trans, inode); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; alloc_hint = get_extent_allocation_hint(inode, start, len); @@ -6682,6 +6693,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; ei->dummy_inode = 0; + ei->in_defrag = 0; ei->force_compress = BTRFS_COMPRESS_NONE; ei->delayed_node = NULL; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index c4f17e4e2c9..85e818ce00c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -656,6 +656,106 @@ out_unlock: return error; } +/* + * When we're defragging a range, we don't want to kick it off again + * if it is really just waiting for delalloc to send it down. + * If we find a nice big extent or delalloc range for the bytes in the + * file you want to defrag, we return 0 to let you know to skip this + * part of the file + */ +static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh) +{ + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map *em = NULL; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 end; + + read_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); + read_unlock(&em_tree->lock); + + if (em) { + end = extent_map_end(em); + free_extent_map(em); + if (end - offset > thresh) + return 0; + } + /* if we already have a nice delalloc here, just stop */ + thresh /= 2; + end = count_range_bits(io_tree, &offset, offset + thresh, + thresh, EXTENT_DELALLOC, 1); + if (end >= thresh) + return 0; + return 1; +} + +/* + * helper function to walk through a file and find extents + * newer than a specific transid, and smaller than thresh. + * + * This is used by the defragging code to find new and small + * extents + */ +static int find_new_extents(struct btrfs_root *root, + struct inode *inode, u64 newer_than, + u64 *off, int thresh) +{ + struct btrfs_path *path; + struct btrfs_key min_key; + struct btrfs_key max_key; + struct extent_buffer *leaf; + struct btrfs_file_extent_item *extent; + int type; + int ret; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + min_key.objectid = inode->i_ino; + min_key.type = BTRFS_EXTENT_DATA_KEY; + min_key.offset = *off; + + max_key.objectid = inode->i_ino; + max_key.type = (u8)-1; + max_key.offset = (u64)-1; + + path->keep_locks = 1; + + while(1) { + ret = btrfs_search_forward(root, &min_key, &max_key, + path, 0, newer_than); + if (ret != 0) + goto none; + if (min_key.objectid != inode->i_ino) + goto none; + if (min_key.type != BTRFS_EXTENT_DATA_KEY) + goto none; + + leaf = path->nodes[0]; + extent = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + + type = btrfs_file_extent_type(leaf, extent); + if (type == BTRFS_FILE_EXTENT_REG && + btrfs_file_extent_num_bytes(leaf, extent) < thresh && + check_defrag_in_cache(inode, min_key.offset, thresh)) { + *off = min_key.offset; + btrfs_free_path(path); + return 0; + } + + if (min_key.offset == (u64)-1) + goto none; + + min_key.offset++; + btrfs_release_path(path); + } +none: + btrfs_free_path(path); + return -ENOENT; +} + static int should_defrag_range(struct inode *inode, u64 start, u64 len, int thresh, u64 *last_len, u64 *skip, u64 *defrag_end) @@ -665,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 1; - - if (thresh == 0) - thresh = 256 * 1024; - /* * make sure that once we start defragging and extent, we keep on * defragging it @@ -727,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, return ret; } -static int btrfs_defrag_file(struct file *file, - struct btrfs_ioctl_defrag_range_args *range) +/* + * it doesn't do much good to defrag one or two pages + * at a time. This pulls in a nice chunk of pages + * to COW and defrag. + * + * It also makes sure the delalloc code has enough + * dirty data to avoid making new small extents as part + * of the defrag + * + * It's a good idea to start RA on this range + * before calling this. + */ +static int cluster_pages_for_defrag(struct inode *inode, + struct page **pages, + unsigned long start_index, + int num_pages) { - struct inode *inode = fdentry(file)->d_inode; - struct btrfs_root *root = BTRFS_I(inode)->root; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + unsigned long file_end; + u64 isize = i_size_read(inode); + u64 page_start; + u64 page_end; + int ret; + int i; + int i_done; struct btrfs_ordered_extent *ordered; - struct page *page; + struct extent_state *cached_state = NULL; + + if (isize == 0) + return 0; + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + + ret = btrfs_delalloc_reserve_space(inode, + num_pages << PAGE_CACHE_SHIFT); + if (ret) + return ret; +again: + ret = 0; + i_done = 0; + + /* step one, lock all the pages */ + for (i = 0; i < num_pages; i++) { + struct page *page; + page = grab_cache_page(inode->i_mapping, + start_index + i); + if (!page) + break; + + if (!PageUptodate(page)) { + btrfs_readpage(NULL, page); + lock_page(page); + if (!PageUptodate(page)) { + unlock_page(page); + page_cache_release(page); + ret = -EIO; + break; + } + } + isize = i_size_read(inode); + file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + if (!isize || page->index > file_end || + page->mapping != inode->i_mapping) { + /* whoops, we blew past eof, skip this page */ + unlock_page(page); + page_cache_release(page); + break; + } + pages[i] = page; + i_done++; + } + if (!i_done || ret) + goto out; + + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + goto out; + + /* + * so now we have a nice long stream of locked + * and up to date pages, lets wait on them + */ + for (i = 0; i < i_done; i++) + wait_on_page_writeback(pages[i]); + + page_start = page_offset(pages[0]); + page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; + + lock_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, 0, &cached_state, + GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); + if (ordered && + ordered->file_offset + ordered->len > page_start && + ordered->file_offset < page_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, + &cached_state, GFP_NOFS); + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_wait_ordered_range(inode, page_start, + page_end - page_start); + goto again; + } + if (ordered) + btrfs_put_ordered_extent(ordered); + + clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, + page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, + GFP_NOFS); + + if (i_done != num_pages) { + atomic_inc(&BTRFS_I(inode)->outstanding_extents); + btrfs_delalloc_release_space(inode, + (num_pages - i_done) << PAGE_CACHE_SHIFT); + } + + + btrfs_set_extent_delalloc(inode, page_start, page_end - 1, + &cached_state); + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + page_start, page_end - 1, &cached_state, + GFP_NOFS); + + for (i = 0; i < i_done; i++) { + clear_page_dirty_for_io(pages[i]); + ClearPageChecked(pages[i]); + set_page_extent_mapped(pages[i]); + set_page_dirty(pages[i]); + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + return i_done; +out: + for (i = 0; i < i_done; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); + return ret; + +} + +int btrfs_defrag_file(struct inode *inode, struct file *file, + struct btrfs_ioctl_defrag_range_args *range, + u64 newer_than, unsigned long max_to_defrag) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *disk_super; + struct file_ra_state *ra = NULL; unsigned long last_index; - unsigned long ra_pages = root->fs_info->bdi.ra_pages; - unsigned long total_read = 0; u64 features; - u64 page_start; - u64 page_end; u64 last_len = 0; u64 skip = 0; u64 defrag_end = 0; + u64 newer_off = range->start; + int newer_left = 0; unsigned long i; int ret; + int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; + int extent_thresh = range->extent_thresh; + int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; + u64 new_align = ~((u64)128 * 1024 - 1); + struct page **pages = NULL; + + if (extent_thresh == 0) + extent_thresh = 256 * 1024; if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { if (range->compress_type > BTRFS_COMPRESS_TYPES) @@ -759,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file, if (inode->i_size == 0) return 0; + /* + * if we were not given a file, allocate a readahead + * context + */ + if (!file) { + ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; + file_ra_state_init(ra, inode->i_mapping); + } else { + ra = &file->f_ra; + } + + pages = kmalloc(sizeof(struct page *) * newer_cluster, + GFP_NOFS); + if (!pages) { + ret = -ENOMEM; + goto out_ra; + } + + /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, inode->i_size - 1, range->start + range->len - 1) >> PAGE_CACHE_SHIFT; @@ -766,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file, last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; } - i = range->start >> PAGE_CACHE_SHIFT; - while (i <= last_index) { - if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + if (newer_than) { + ret = find_new_extents(root, inode, newer_than, + &newer_off, 64 * 1024); + if (!ret) { + range->start = newer_off; + /* + * we always align our defrag to help keep + * the extents in the file evenly spaced + */ + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else + goto out_ra; + } else { + i = range->start >> PAGE_CACHE_SHIFT; + } + if (!max_to_defrag) + max_to_defrag = last_index - 1; + + while (i <= last_index && defrag_count < max_to_defrag) { + /* + * make sure we stop running if someone unmounts + * the FS + */ + if (!(inode->i_sb->s_flags & MS_ACTIVE)) + break; + + if (!newer_than && + !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, - range->extent_thresh, + extent_thresh, &last_len, &skip, &defrag_end)) { unsigned long next; @@ -782,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file, i = max(i + 1, next); continue; } - - if (total_read % ra_pages == 0) { - btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, - min(last_index, i + ra_pages - 1)); - } - total_read++; - mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) BTRFS_I(inode)->force_compress = compress_type; - ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); - if (ret) - goto err_unlock; -again: - if (inode->i_size == 0 || - i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { - ret = 0; - goto err_reservations; - } + btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster); - page = grab_cache_page(inode->i_mapping, i); - if (!page) { - ret = -ENOMEM; - goto err_reservations; - } - - if (!PageUptodate(page)) { - btrfs_readpage(NULL, page); - lock_page(page); - if (!PageUptodate(page)) { - unlock_page(page); - page_cache_release(page); - ret = -EIO; - goto err_reservations; - } - } - - if (page->mapping != inode->i_mapping) { - unlock_page(page); - page_cache_release(page); - goto again; - } - - wait_on_page_writeback(page); + ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster); + if (ret < 0) + goto out_ra; - if (PageDirty(page)) { - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); - goto loop_unlock; - } + defrag_count += ret; + balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); + i += ret; - page_start = (u64)page->index << PAGE_CACHE_SHIFT; - page_end = page_start + PAGE_CACHE_SIZE - 1; - lock_extent(io_tree, page_start, page_end, GFP_NOFS); + if (newer_than) { + if (newer_off == (u64)-1) + break; - ordered = btrfs_lookup_ordered_extent(inode, page_start); - if (ordered) { - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - unlock_page(page); - page_cache_release(page); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - goto again; + newer_off = max(newer_off + 1, + (u64)i << PAGE_CACHE_SHIFT); + + ret = find_new_extents(root, inode, + newer_than, &newer_off, + 64 * 1024); + if (!ret) { + range->start = newer_off; + i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + newer_left = newer_cluster; + } else { + break; + } + } else { + i++; } - set_page_extent_mapped(page); - - /* - * this makes sure page_mkwrite is called on the - * page if it is dirtied again later - */ - clear_page_dirty_for_io(page); - clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, - page_end, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING, GFP_NOFS); - - btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); - ClearPageChecked(page); - set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - -loop_unlock: - unlock_page(page); - page_cache_release(page); - mutex_unlock(&inode->i_mutex); - - balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); - i++; } if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) @@ -899,12 +1138,14 @@ loop_unlock: btrfs_set_super_incompat_flags(disk_super, features); } - return 0; + if (!file) + kfree(ra); + return defrag_count; -err_reservations: - btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); -err_unlock: - mutex_unlock(&inode->i_mutex); +out_ra: + if (!file) + kfree(ra); + kfree(pages); return ret; } @@ -1756,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) /* the rest are all set to zero by kzalloc */ range->len = (u64)-1; } - ret = btrfs_defrag_file(file, range); + ret = btrfs_defrag_file(fdentry(file)->d_inode, file, + range, 0, 0); + if (ret > 0) + ret = 0; kfree(range); break; default: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index e5e0ee2cad4..ad1ea789fcb 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -181,37 +181,6 @@ struct btrfs_ioctl_clone_range_args { #define BTRFS_DEFRAG_RANGE_COMPRESS 1 #define BTRFS_DEFRAG_RANGE_START_IO 2 -struct btrfs_ioctl_defrag_range_args { - /* start of the defrag operation */ - __u64 start; - - /* number of bytes to defrag, use (u64)-1 to say all */ - __u64 len; - - /* - * flags for the operation, which can include turning - * on compression for this one defrag - */ - __u64 flags; - - /* - * any extent bigger than this will be considered - * already defragged. Use 0 to take the kernel default - * Use 1 to say every single extent must be rewritten - */ - __u32 extent_thresh; - - /* - * which compression method to use if turning on compression - * for this defrag operation. If unspecified, zlib will - * be used - */ - __u32 compress_type; - - /* spare for later */ - __u32 unused[4]; -}; - struct btrfs_ioctl_space_info { __u64 flags; __u64 total_bytes; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index cd0c7cd2c8f..28e3cb2607f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -160,7 +160,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, }; static match_table_t tokens = { @@ -191,6 +191,7 @@ static match_table_t tokens = { {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, + {Opt_defrag, "autodefrag"}, {Opt_err, NULL}, }; @@ -369,6 +370,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_enospc_debug: btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); break; + case Opt_defrag: + printk(KERN_INFO "btrfs: enabling auto defrag"); + btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.3-70-g09d2 From 174ba50915b08dcfd07c8b5fb795b46a165fa09a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 27 May 2011 10:03:58 -0400 Subject: Btrfs: use the device_list_mutex during write_dev_supers write_dev_supers was changed to use RCU to protect the list of devices, but it was then sleeping while it actually wrote the supers. This fixes it to just use the mutex, since we really don't any concurrency in write_dev_supers anyway. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b2588a55265..98b6a71decb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2310,7 +2310,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) sb = &root->fs_info->super_for_commit; dev_item = &sb->dev_item; - rcu_read_lock(); + mutex_lock(&root->fs_info->fs_devices->device_list_mutex); head = &root->fs_info->fs_devices->devices; list_for_each_entry_rcu(dev, head, dev_list) { if (!dev->bdev) { @@ -2355,7 +2355,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) total_errors++; } - rcu_read_unlock(); + mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { printk(KERN_ERR "btrfs: %d errors while writing supers\n", total_errors); -- cgit v1.2.3-70-g09d2