diff options
Diffstat (limited to 'fs/btrfs/relocation.c')
-rw-r--r-- | fs/btrfs/relocation.c | 142 |
1 files changed, 78 insertions, 64 deletions
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 65245a07275..74257d6436a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -736,7 +736,8 @@ again: err = ret; goto out; } - BUG_ON(!ret || !path1->slots[0]); + ASSERT(ret); + ASSERT(path1->slots[0]); path1->slots[0]--; @@ -746,10 +747,10 @@ again: * the backref was added previously when processing * backref of type BTRFS_TREE_BLOCK_REF_KEY */ - BUG_ON(!list_is_singular(&cur->upper)); + ASSERT(list_is_singular(&cur->upper)); edge = list_entry(cur->upper.next, struct backref_edge, list[LOWER]); - BUG_ON(!list_empty(&edge->list[UPPER])); + ASSERT(list_empty(&edge->list[UPPER])); exist = edge->node[UPPER]; /* * add the upper level block to pending list if we need @@ -831,7 +832,7 @@ again: cur->cowonly = 1; } #else - BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); + ASSERT(key.type != BTRFS_EXTENT_REF_V0_KEY); if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { #endif if (key.objectid == key.offset) { @@ -840,7 +841,7 @@ again: * backref of this type. */ root = find_reloc_root(rc, cur->bytenr); - BUG_ON(!root); + ASSERT(root); cur->root = root; break; } @@ -868,7 +869,7 @@ again: } else { upper = rb_entry(rb_node, struct backref_node, rb_node); - BUG_ON(!upper->checked); + ASSERT(upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); } list_add_tail(&edge->list[LOWER], &cur->upper); @@ -892,7 +893,7 @@ again: if (btrfs_root_level(&root->root_item) == cur->level) { /* tree root */ - BUG_ON(btrfs_root_bytenr(&root->root_item) != + ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); if (should_ignore_root(root)) list_add(&cur->list, &useless); @@ -927,7 +928,7 @@ again: need_check = true; for (; level < BTRFS_MAX_LEVEL; level++) { if (!path2->nodes[level]) { - BUG_ON(btrfs_root_bytenr(&root->root_item) != + ASSERT(btrfs_root_bytenr(&root->root_item) == lower->bytenr); if (should_ignore_root(root)) list_add(&lower->list, &useless); @@ -977,12 +978,15 @@ again: need_check = false; list_add_tail(&edge->list[UPPER], &list); - } else + } else { + if (upper->checked) + need_check = true; INIT_LIST_HEAD(&edge->list[UPPER]); + } } else { upper = rb_entry(rb_node, struct backref_node, rb_node); - BUG_ON(!upper->checked); + ASSERT(upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); if (!upper->owner) upper->owner = btrfs_header_owner(eb); @@ -1026,7 +1030,7 @@ next: * everything goes well, connect backref nodes and insert backref nodes * into the cache. */ - BUG_ON(!node->checked); + ASSERT(node->checked); cowonly = node->cowonly; if (!cowonly) { rb_node = tree_insert(&cache->rb_root, node->bytenr, @@ -1062,8 +1066,21 @@ next: continue; } - BUG_ON(!upper->checked); - BUG_ON(cowonly != upper->cowonly); + if (!upper->checked) { + /* + * Still want to blow up for developers since this is a + * logic bug. + */ + ASSERT(0); + err = -EINVAL; + goto out; + } + if (cowonly != upper->cowonly) { + ASSERT(0); + err = -EINVAL; + goto out; + } + if (!cowonly) { rb_node = tree_insert(&cache->rb_root, upper->bytenr, &upper->rb_node); @@ -1086,7 +1103,7 @@ next: while (!list_empty(&useless)) { upper = list_entry(useless.next, struct backref_node, list); list_del_init(&upper->list); - BUG_ON(!list_empty(&upper->upper)); + ASSERT(list_empty(&upper->upper)); if (upper == node) node = NULL; if (upper->lowest) { @@ -1119,29 +1136,45 @@ out: if (err) { while (!list_empty(&useless)) { lower = list_entry(useless.next, - struct backref_node, upper); - list_del_init(&lower->upper); + struct backref_node, list); + list_del_init(&lower->list); } - upper = node; - INIT_LIST_HEAD(&list); - while (upper) { - if (RB_EMPTY_NODE(&upper->rb_node)) { - list_splice_tail(&upper->upper, &list); - free_backref_node(cache, upper); - } - - if (list_empty(&list)) - break; - - edge = list_entry(list.next, struct backref_edge, - list[LOWER]); + while (!list_empty(&list)) { + edge = list_first_entry(&list, struct backref_edge, + list[UPPER]); + list_del(&edge->list[UPPER]); list_del(&edge->list[LOWER]); + lower = edge->node[LOWER]; upper = edge->node[UPPER]; free_backref_edge(cache, edge); + + /* + * Lower is no longer linked to any upper backref nodes + * and isn't in the cache, we can free it ourselves. + */ + if (list_empty(&lower->upper) && + RB_EMPTY_NODE(&lower->rb_node)) + list_add(&lower->list, &useless); + + if (!RB_EMPTY_NODE(&upper->rb_node)) + continue; + + /* Add this guy's upper edges to the list to proces */ + list_for_each_entry(edge, &upper->upper, list[LOWER]) + list_add_tail(&edge->list[UPPER], &list); + if (list_empty(&upper->upper)) + list_add(&upper->list, &useless); + } + + while (!list_empty(&useless)) { + lower = list_entry(useless.next, + struct backref_node, list); + list_del_init(&lower->list); + free_backref_node(cache, lower); } return ERR_PTR(err); } - BUG_ON(node && node->detached); + ASSERT(!node || !node->detached); return node; } @@ -1787,7 +1820,7 @@ again: btrfs_node_key_to_cpu(parent, next_key, slot + 1); old_bytenr = btrfs_node_blockptr(parent, slot); - blocksize = btrfs_level_size(dest, level - 1); + blocksize = dest->nodesize; old_ptr_gen = btrfs_node_ptr_generation(parent, slot); if (level <= max_level) { @@ -1813,8 +1846,7 @@ again: break; } - eb = read_tree_block(dest, old_bytenr, blocksize, - old_ptr_gen); + eb = read_tree_block(dest, old_bytenr, old_ptr_gen); if (!eb || !extent_buffer_uptodate(eb)) { ret = (!eb) ? -ENOMEM : -EIO; free_extent_buffer(eb); @@ -1944,7 +1976,6 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, u64 bytenr; u64 ptr_gen = 0; u64 last_snapshot; - u32 blocksize; u32 nritems; last_snapshot = btrfs_root_last_snapshot(&root->root_item); @@ -1970,8 +2001,7 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path, } bytenr = btrfs_node_blockptr(eb, path->slots[i]); - blocksize = btrfs_level_size(root, i - 1); - eb = read_tree_block(root, bytenr, blocksize, ptr_gen); + eb = read_tree_block(root, bytenr, ptr_gen); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); return -EIO; @@ -2316,7 +2346,7 @@ void free_reloc_roots(struct list_head *list) } static noinline_for_stack -int merge_reloc_roots(struct reloc_control *rc) +void merge_reloc_roots(struct reloc_control *rc) { struct btrfs_root *root; struct btrfs_root *reloc_root; @@ -2397,7 +2427,6 @@ out: } BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root)); - return ret; } static void free_block_list(struct rb_root *blocks) @@ -2544,8 +2573,7 @@ u64 calcu_metadata_size(struct reloc_control *rc, if (next->processed && (reserve || next != node)) break; - num_bytes += btrfs_level_size(rc->extent_root, - next->level); + num_bytes += rc->extent_root->nodesize; if (list_empty(&next->upper)) break; @@ -2679,9 +2707,9 @@ static int do_relocation(struct btrfs_trans_handle *trans, goto next; } - blocksize = btrfs_level_size(root, node->level); + blocksize = root->nodesize; generation = btrfs_node_ptr_generation(upper->eb, slot); - eb = read_tree_block(root, bytenr, blocksize, generation); + eb = read_tree_block(root, bytenr, generation); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); err = -EIO; @@ -2789,7 +2817,7 @@ static void __mark_block_processed(struct reloc_control *rc, u32 blocksize; if (node->level == 0 || in_block_group(node->bytenr, rc->block_group)) { - blocksize = btrfs_level_size(rc->extent_root, node->level); + blocksize = rc->extent_root->nodesize; mark_block_processed(rc, node->bytenr, blocksize); } node->processed = 1; @@ -2843,7 +2871,7 @@ static int get_tree_block_key(struct reloc_control *rc, BUG_ON(block->key_ready); eb = read_tree_block(rc->extent_root, block->bytenr, - block->key.objectid, block->key.offset); + block->key.offset); if (!eb || !extent_buffer_uptodate(eb)) { free_extent_buffer(eb); return -EIO; @@ -2858,20 +2886,6 @@ static int get_tree_block_key(struct reloc_control *rc, return 0; } -static int reada_tree_block(struct reloc_control *rc, - struct tree_block *block) -{ - BUG_ON(block->key_ready); - if (block->key.type == BTRFS_METADATA_ITEM_KEY) - readahead_tree_block(rc->extent_root, block->bytenr, - block->key.objectid, - rc->extent_root->leafsize); - else - readahead_tree_block(rc->extent_root, block->bytenr, - block->key.objectid, block->key.offset); - return 0; -} - /* * helper function to relocate a tree block */ @@ -2951,7 +2965,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, while (rb_node) { block = rb_entry(rb_node, struct tree_block, rb_node); if (!block->key_ready) - reada_tree_block(rc, block); + readahead_tree_block(rc->extent_root, block->bytenr, + block->key.objectid); rb_node = rb_next(rb_node); } @@ -3313,7 +3328,7 @@ static int add_tree_block(struct reloc_control *rc, return -ENOMEM; block->bytenr = extent_key->objectid; - block->key.objectid = rc->extent_root->leafsize; + block->key.objectid = rc->extent_root->nodesize; block->key.offset = generation; block->level = level; block->key_ready = 0; @@ -3640,7 +3655,7 @@ int add_data_references(struct reloc_control *rc, struct btrfs_extent_inline_ref *iref; unsigned long ptr; unsigned long end; - u32 blocksize = btrfs_level_size(rc->extent_root, 0); + u32 blocksize = rc->extent_root->nodesize; int ret = 0; int err = 0; @@ -3783,7 +3798,7 @@ next: } if (key.type == BTRFS_METADATA_ITEM_KEY && - key.objectid + rc->extent_root->leafsize <= + key.objectid + rc->extent_root->nodesize <= rc->search_start) { path->slots[0]++; goto next; @@ -3801,7 +3816,7 @@ next: rc->search_start = key.objectid + key.offset; else rc->search_start = key.objectid + - rc->extent_root->leafsize; + rc->extent_root->nodesize; memcpy(extent_key, &key, sizeof(key)); return 0; } @@ -4096,7 +4111,6 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC); btrfs_mark_buffer_dirty(leaf); - btrfs_release_path(path); out: btrfs_free_path(path); return ret; |