From d1310b2e0cd98eb1348553e69b73827b436dca7b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 24 Jan 2008 16:13:08 -0500 Subject: Btrfs: Split the extent_map code into two parts There is now extent_map for mapping offsets in the file to disk and extent_io for state tracking, IO submission and extent_bufers. The new extent_map code shifts from [start,end] pairs to [start,len], and pushes the locking out into the caller. This allows a few performance optimizations and is easier to use. A number of extent_map usage bugs were fixed, mostly with failing to remove extent_map entries when changing the file. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3089 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 3089 insertions(+) create mode 100644 fs/btrfs/extent_io.c (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c new file mode 100644 index 00000000000..15cc158a049 --- /dev/null +++ b/fs/btrfs/extent_io.c @@ -0,0 +1,3089 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "extent_io.h" +#include "extent_map.h" + +/* temporary define until extent_map moves out of btrfs */ +struct kmem_cache *btrfs_cache_create(const char *name, size_t size, + unsigned long extra_flags, + void (*ctor)(void *, struct kmem_cache *, + unsigned long)); + +static struct kmem_cache *extent_state_cache; +static struct kmem_cache *extent_buffer_cache; + +static LIST_HEAD(buffers); +static LIST_HEAD(states); + +static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; +#define BUFFER_LRU_MAX 64 + +struct tree_entry { + u64 start; + u64 end; + int in_tree; + struct rb_node rb_node; +}; + +struct extent_page_data { + struct bio *bio; + struct extent_io_tree *tree; + get_extent_t *get_extent; +}; + +int __init extent_io_init(void) +{ + extent_state_cache = btrfs_cache_create("extent_state", + sizeof(struct extent_state), 0, + NULL); + if (!extent_state_cache) + return -ENOMEM; + + extent_buffer_cache = btrfs_cache_create("extent_buffers", + sizeof(struct extent_buffer), 0, + NULL); + if (!extent_buffer_cache) + goto free_state_cache; + return 0; + +free_state_cache: + kmem_cache_destroy(extent_state_cache); + return -ENOMEM; +} + +void extent_io_exit(void) +{ + struct extent_state *state; + + while (!list_empty(&states)) { + state = list_entry(states.next, struct extent_state, list); + printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); + list_del(&state->list); + kmem_cache_free(extent_state_cache, state); + + } + + if (extent_state_cache) + kmem_cache_destroy(extent_state_cache); + if (extent_buffer_cache) + kmem_cache_destroy(extent_buffer_cache); +} + +void extent_io_tree_init(struct extent_io_tree *tree, + struct address_space *mapping, gfp_t mask) +{ + tree->state.rb_node = NULL; + tree->ops = NULL; + tree->dirty_bytes = 0; + rwlock_init(&tree->lock); + spin_lock_init(&tree->lru_lock); + tree->mapping = mapping; + INIT_LIST_HEAD(&tree->buffer_lru); + tree->lru_size = 0; +} +EXPORT_SYMBOL(extent_io_tree_init); + +void extent_io_tree_empty_lru(struct extent_io_tree *tree) +{ + struct extent_buffer *eb; + while(!list_empty(&tree->buffer_lru)) { + eb = list_entry(tree->buffer_lru.next, struct extent_buffer, + lru); + list_del_init(&eb->lru); + free_extent_buffer(eb); + } +} +EXPORT_SYMBOL(extent_io_tree_empty_lru); + +struct extent_state *alloc_extent_state(gfp_t mask) +{ + struct extent_state *state; + unsigned long flags; + + state = kmem_cache_alloc(extent_state_cache, mask); + if (!state || IS_ERR(state)) + return state; + state->state = 0; + state->in_tree = 0; + state->private = 0; + + spin_lock_irqsave(&state_lock, flags); + list_add(&state->list, &states); + spin_unlock_irqrestore(&state_lock, flags); + + atomic_set(&state->refs, 1); + init_waitqueue_head(&state->wq); + return state; +} +EXPORT_SYMBOL(alloc_extent_state); + +void free_extent_state(struct extent_state *state) +{ + unsigned long flags; + if (!state) + return; + if (atomic_dec_and_test(&state->refs)) { + WARN_ON(state->in_tree); + spin_lock_irqsave(&state_lock, flags); + list_del(&state->list); + spin_unlock_irqrestore(&state_lock, flags); + kmem_cache_free(extent_state_cache, state); + } +} +EXPORT_SYMBOL(free_extent_state); + +static struct rb_node *tree_insert(struct rb_root *root, u64 offset, + struct rb_node *node) +{ + struct rb_node ** p = &root->rb_node; + struct rb_node * parent = NULL; + struct tree_entry *entry; + + while(*p) { + parent = *p; + entry = rb_entry(parent, struct tree_entry, rb_node); + + if (offset < entry->start) + p = &(*p)->rb_left; + else if (offset > entry->end) + p = &(*p)->rb_right; + else + return parent; + } + + entry = rb_entry(node, struct tree_entry, rb_node); + entry->in_tree = 1; + rb_link_node(node, parent, p); + rb_insert_color(node, root); + return NULL; +} + +static struct rb_node *__tree_search(struct rb_root *root, u64 offset, + struct rb_node **prev_ret, + struct rb_node **next_ret) +{ + struct rb_node * n = root->rb_node; + struct rb_node *prev = NULL; + struct rb_node *orig_prev = NULL; + struct tree_entry *entry; + struct tree_entry *prev_entry = NULL; + + while(n) { + entry = rb_entry(n, struct tree_entry, rb_node); + prev = n; + prev_entry = entry; + + if (offset < entry->start) + n = n->rb_left; + else if (offset > entry->end) + n = n->rb_right; + else + return n; + } + + if (prev_ret) { + orig_prev = prev; + while(prev && offset > prev_entry->end) { + prev = rb_next(prev); + prev_entry = rb_entry(prev, struct tree_entry, rb_node); + } + *prev_ret = prev; + prev = orig_prev; + } + + if (next_ret) { + prev_entry = rb_entry(prev, struct tree_entry, rb_node); + while(prev && offset < prev_entry->start) { + prev = rb_prev(prev); + prev_entry = rb_entry(prev, struct tree_entry, rb_node); + } + *next_ret = prev; + } + return NULL; +} + +static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) +{ + struct rb_node *prev; + struct rb_node *ret; + ret = __tree_search(root, offset, &prev, NULL); + if (!ret) + return prev; + return ret; +} + +/* + * utility function to look for merge candidates inside a given range. + * Any extents with matching state are merged together into a single + * extent in the tree. Extents with EXTENT_IO in their state field + * are not merged because the end_io handlers need to be able to do + * operations on them without sleeping (or doing allocations/splits). + * + * This should be called with the tree lock held. + */ +static int merge_state(struct extent_io_tree *tree, + struct extent_state *state) +{ + struct extent_state *other; + struct rb_node *other_node; + + if (state->state & EXTENT_IOBITS) + return 0; + + other_node = rb_prev(&state->rb_node); + if (other_node) { + other = rb_entry(other_node, struct extent_state, rb_node); + if (other->end == state->start - 1 && + other->state == state->state) { + state->start = other->start; + other->in_tree = 0; + rb_erase(&other->rb_node, &tree->state); + free_extent_state(other); + } + } + other_node = rb_next(&state->rb_node); + if (other_node) { + other = rb_entry(other_node, struct extent_state, rb_node); + if (other->start == state->end + 1 && + other->state == state->state) { + other->start = state->start; + state->in_tree = 0; + rb_erase(&state->rb_node, &tree->state); + free_extent_state(state); + } + } + return 0; +} + +/* + * insert an extent_state struct into the tree. 'bits' are set on the + * struct before it is inserted. + * + * This may return -EEXIST if the extent is already there, in which case the + * state struct is freed. + * + * The tree lock is not taken internally. This is a utility function and + * probably isn't what you want to call (see set/clear_extent_bit). + */ +static int insert_state(struct extent_io_tree *tree, + struct extent_state *state, u64 start, u64 end, + int bits) +{ + struct rb_node *node; + + if (end < start) { + printk("end < start %Lu %Lu\n", end, start); + WARN_ON(1); + } + if (bits & EXTENT_DIRTY) + tree->dirty_bytes += end - start + 1; + state->state |= bits; + state->start = start; + state->end = end; + node = tree_insert(&tree->state, end, &state->rb_node); + if (node) { + struct extent_state *found; + found = rb_entry(node, struct extent_state, rb_node); + printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); + free_extent_state(state); + return -EEXIST; + } + merge_state(tree, state); + return 0; +} + +/* + * split a given extent state struct in two, inserting the preallocated + * struct 'prealloc' as the newly created second half. 'split' indicates an + * offset inside 'orig' where it should be split. + * + * Before calling, + * the tree has 'orig' at [orig->start, orig->end]. After calling, there + * are two extent state structs in the tree: + * prealloc: [orig->start, split - 1] + * orig: [ split, orig->end ] + * + * The tree locks are not taken by this function. They need to be held + * by the caller. + */ +static int split_state(struct extent_io_tree *tree, struct extent_state *orig, + struct extent_state *prealloc, u64 split) +{ + struct rb_node *node; + prealloc->start = orig->start; + prealloc->end = split - 1; + prealloc->state = orig->state; + orig->start = split; + + node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node); + if (node) { + struct extent_state *found; + found = rb_entry(node, struct extent_state, rb_node); + printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); + free_extent_state(prealloc); + return -EEXIST; + } + return 0; +} + +/* + * utility function to clear some bits in an extent state struct. + * it will optionally wake up any one waiting on this state (wake == 1), or + * forcibly remove the state from the tree (delete == 1). + * + * If no bits are set on the state struct after clearing things, the + * struct is freed and removed from the tree + */ +static int clear_state_bit(struct extent_io_tree *tree, + struct extent_state *state, int bits, int wake, + int delete) +{ + int ret = state->state & bits; + + if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { + u64 range = state->end - state->start + 1; + WARN_ON(range > tree->dirty_bytes); + tree->dirty_bytes -= range; + } + state->state &= ~bits; + if (wake) + wake_up(&state->wq); + if (delete || state->state == 0) { + if (state->in_tree) { + rb_erase(&state->rb_node, &tree->state); + state->in_tree = 0; + free_extent_state(state); + } else { + WARN_ON(1); + } + } else { + merge_state(tree, state); + } + return ret; +} + +/* + * clear some bits on a range in the tree. This may require splitting + * or inserting elements in the tree, so the gfp mask is used to + * indicate which allocations or sleeping are allowed. + * + * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove + * the given range from the tree regardless of state (ie for truncate). + * + * the range [start, end] is inclusive. + * + * This takes the tree lock, and returns < 0 on error, > 0 if any of the + * bits were already set, or zero if none of the bits were already set. + */ +int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int wake, int delete, gfp_t mask) +{ + struct extent_state *state; + struct extent_state *prealloc = NULL; + struct rb_node *node; + unsigned long flags; + int err; + int set = 0; + +again: + if (!prealloc && (mask & __GFP_WAIT)) { + prealloc = alloc_extent_state(mask); + if (!prealloc) + return -ENOMEM; + } + + write_lock_irqsave(&tree->lock, flags); + /* + * this search will find the extents that end after + * our range starts + */ + node = tree_search(&tree->state, start); + if (!node) + goto out; + state = rb_entry(node, struct extent_state, rb_node); + if (state->start > end) + goto out; + WARN_ON(state->end < start); + + /* + * | ---- desired range ---- | + * | state | or + * | ------------- state -------------- | + * + * We need to split the extent we found, and may flip + * bits on second half. + * + * If the extent we found extends past our range, we + * just split and search again. It'll get split again + * the next time though. + * + * If the extent we found is inside our range, we clear + * the desired bit on it. + */ + + if (state->start < start) { + err = split_state(tree, state, prealloc, start); + BUG_ON(err == -EEXIST); + prealloc = NULL; + if (err) + goto out; + if (state->end <= end) { + start = state->end + 1; + set |= clear_state_bit(tree, state, bits, + wake, delete); + } else { + start = state->start; + } + goto search_again; + } + /* + * | ---- desired range ---- | + * | state | + * We need to split the extent, and clear the bit + * on the first half + */ + if (state->start <= end && state->end > end) { + err = split_state(tree, state, prealloc, end + 1); + BUG_ON(err == -EEXIST); + + if (wake) + wake_up(&state->wq); + set |= clear_state_bit(tree, prealloc, bits, + wake, delete); + prealloc = NULL; + goto out; + } + + start = state->end + 1; + set |= clear_state_bit(tree, state, bits, wake, delete); + goto search_again; + +out: + write_unlock_irqrestore(&tree->lock, flags); + if (prealloc) + free_extent_state(prealloc); + + return set; + +search_again: + if (start > end) + goto out; + write_unlock_irqrestore(&tree->lock, flags); + if (mask & __GFP_WAIT) + cond_resched(); + goto again; +} +EXPORT_SYMBOL(clear_extent_bit); + +static int wait_on_state(struct extent_io_tree *tree, + struct extent_state *state) +{ + DEFINE_WAIT(wait); + prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); + read_unlock_irq(&tree->lock); + schedule(); + read_lock_irq(&tree->lock); + finish_wait(&state->wq, &wait); + return 0; +} + +/* + * waits for one or more bits to clear on a range in the state tree. + * The range [start, end] is inclusive. + * The tree lock is taken by this function + */ +int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) +{ + struct extent_state *state; + struct rb_node *node; + + read_lock_irq(&tree->lock); +again: + while (1) { + /* + * this search will find all the extents that end after + * our range starts + */ + node = tree_search(&tree->state, start); + if (!node) + break; + + state = rb_entry(node, struct extent_state, rb_node); + + if (state->start > end) + goto out; + + if (state->state & bits) { + start = state->start; + atomic_inc(&state->refs); + wait_on_state(tree, state); + free_extent_state(state); + goto again; + } + start = state->end + 1; + + if (start > end) + break; + + if (need_resched()) { + read_unlock_irq(&tree->lock); + cond_resched(); + read_lock_irq(&tree->lock); + } + } +out: + read_unlock_irq(&tree->lock); + return 0; +} +EXPORT_SYMBOL(wait_extent_bit); + +static void set_state_bits(struct extent_io_tree *tree, + struct extent_state *state, + int bits) +{ + if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { + u64 range = state->end - state->start + 1; + tree->dirty_bytes += range; + } + state->state |= bits; +} + +/* + * set some bits on a range in the tree. This may require allocations + * or sleeping, so the gfp mask is used to indicate what is allowed. + * + * If 'exclusive' == 1, this will fail with -EEXIST if some part of the + * range already has the desired bits set. The start of the existing + * range is returned in failed_start in this case. + * + * [start, end] is inclusive + * This takes the tree lock. + */ +int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, + int exclusive, u64 *failed_start, gfp_t mask) +{ + struct extent_state *state; + struct extent_state *prealloc = NULL; + struct rb_node *node; + unsigned long flags; + int err = 0; + int set; + u64 last_start; + u64 last_end; +again: + if (!prealloc && (mask & __GFP_WAIT)) { + prealloc = alloc_extent_state(mask); + if (!prealloc) + return -ENOMEM; + } + + write_lock_irqsave(&tree->lock, flags); + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(&tree->state, start); + if (!node) { + err = insert_state(tree, prealloc, start, end, bits); + prealloc = NULL; + BUG_ON(err == -EEXIST); + goto out; + } + + state = rb_entry(node, struct extent_state, rb_node); + last_start = state->start; + last_end = state->end; + + /* + * | ---- desired range ---- | + * | state | + * + * Just lock what we found and keep going + */ + if (state->start == start && state->end <= end) { + set = state->state & bits; + if (set && exclusive) { + *failed_start = state->start; + err = -EEXIST; + goto out; + } + set_state_bits(tree, state, bits); + start = state->end + 1; + merge_state(tree, state); + goto search_again; + } + + /* + * | ---- desired range ---- | + * | state | + * or + * | ------------- state -------------- | + * + * We need to split the extent we found, and may flip bits on + * second half. + * + * If the extent we found extends past our + * range, we just split and search again. It'll get split + * again the next time though. + * + * If the extent we found is inside our range, we set the + * desired bit on it. + */ + if (state->start < start) { + set = state->state & bits; + if (exclusive && set) { + *failed_start = start; + err = -EEXIST; + goto out; + } + err = split_state(tree, state, prealloc, start); + BUG_ON(err == -EEXIST); + prealloc = NULL; + if (err) + goto out; + if (state->end <= end) { + set_state_bits(tree, state, bits); + start = state->end + 1; + merge_state(tree, state); + } else { + start = state->start; + } + goto search_again; + } + /* + * | ---- desired range ---- | + * | state | or | state | + * + * There's a hole, we need to insert something in it and + * ignore the extent we found. + */ + if (state->start > start) { + u64 this_end; + if (end < last_start) + this_end = end; + else + this_end = last_start -1; + err = insert_state(tree, prealloc, start, this_end, + bits); + prealloc = NULL; + BUG_ON(err == -EEXIST); + if (err) + goto out; + start = this_end + 1; + goto search_again; + } + /* + * | ---- desired range ---- | + * | state | + * We need to split the extent, and set the bit + * on the first half + */ + if (state->start <= end && state->end > end) { + set = state->state & bits; + if (exclusive && set) { + *failed_start = start; + err = -EEXIST; + goto out; + } + err = split_state(tree, state, prealloc, end + 1); + BUG_ON(err == -EEXIST); + + set_state_bits(tree, prealloc, bits); + merge_state(tree, prealloc); + prealloc = NULL; + goto out; + } + + goto search_again; + +out: + write_unlock_irqrestore(&tree->lock, flags); + if (prealloc) + free_extent_state(prealloc); + + return err; + +search_again: + if (start > end) + goto out; + write_unlock_irqrestore(&tree->lock, flags); + if (mask & __GFP_WAIT) + cond_resched(); + goto again; +} +EXPORT_SYMBOL(set_extent_bit); + +/* wrappers around set/clear extent bit */ +int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, + mask); +} +EXPORT_SYMBOL(set_extent_dirty); + +int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + int bits, gfp_t mask) +{ + return set_extent_bit(tree, start, end, bits, 0, NULL, + mask); +} +EXPORT_SYMBOL(set_extent_bits); + +int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, + int bits, gfp_t mask) +{ + return clear_extent_bit(tree, start, end, bits, 0, 0, mask); +} +EXPORT_SYMBOL(clear_extent_bits); + +int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, + EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, + mask); +} +EXPORT_SYMBOL(set_extent_delalloc); + +int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, + EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); +} +EXPORT_SYMBOL(clear_extent_dirty); + +int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, + mask); +} +EXPORT_SYMBOL(set_extent_new); + +int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); +} +EXPORT_SYMBOL(clear_extent_new); + +int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, + mask); +} +EXPORT_SYMBOL(set_extent_uptodate); + +int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); +} +EXPORT_SYMBOL(clear_extent_uptodate); + +int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, + 0, NULL, mask); +} +EXPORT_SYMBOL(set_extent_writeback); + +int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); +} +EXPORT_SYMBOL(clear_extent_writeback); + +int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) +{ + return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); +} +EXPORT_SYMBOL(wait_on_extent_writeback); + +/* + * locks a range in ascending order, waiting for any locked regions + * it hits on the way. [start,end] are inclusive, and this will sleep. + */ +int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) +{ + int err; + u64 failed_start; + while (1) { + err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, + &failed_start, mask); + if (err == -EEXIST && (mask & __GFP_WAIT)) { + wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); + start = failed_start; + } else { + break; + } + WARN_ON(start > end); + } + return err; +} +EXPORT_SYMBOL(lock_extent); + +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); +} +EXPORT_SYMBOL(unlock_extent); + +/* + * helper function to set pages and extents in the tree dirty + */ +int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + + while (index <= end_index) { + page = find_get_page(tree->mapping, index); + BUG_ON(!page); + __set_page_dirty_nobuffers(page); + page_cache_release(page); + index++; + } + set_extent_dirty(tree, start, end, GFP_NOFS); + return 0; +} +EXPORT_SYMBOL(set_range_dirty); + +/* + * helper function to set both pages and extents in the tree writeback + */ +int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + + while (index <= end_index) { + page = find_get_page(tree->mapping, index); + BUG_ON(!page); + set_page_writeback(page); + page_cache_release(page); + index++; + } + set_extent_writeback(tree, start, end, GFP_NOFS); + return 0; +} +EXPORT_SYMBOL(set_range_writeback); + +int find_first_extent_bit(struct extent_io_tree *tree, u64 start, + u64 *start_ret, u64 *end_ret, int bits) +{ + struct rb_node *node; + struct extent_state *state; + int ret = 1; + + read_lock_irq(&tree->lock); + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(&tree->state, start); + if (!node || IS_ERR(node)) { + goto out; + } + + while(1) { + state = rb_entry(node, struct extent_state, rb_node); + if (state->end >= start && (state->state & bits)) { + *start_ret = state->start; + *end_ret = state->end; + ret = 0; + break; + } + node = rb_next(node); + if (!node) + break; + } +out: + read_unlock_irq(&tree->lock); + return ret; +} +EXPORT_SYMBOL(find_first_extent_bit); + +u64 find_lock_delalloc_range(struct extent_io_tree *tree, + u64 *start, u64 *end, u64 max_bytes) +{ + struct rb_node *node; + struct extent_state *state; + u64 cur_start = *start; + u64 found = 0; + u64 total_bytes = 0; + + write_lock_irq(&tree->lock); + /* + * this search will find all the extents that end after + * our range starts. + */ +search_again: + node = tree_search(&tree->state, cur_start); + if (!node || IS_ERR(node)) { + *end = (u64)-1; + goto out; + } + + while(1) { + state = rb_entry(node, struct extent_state, rb_node); + if (found && state->start != cur_start) { + goto out; + } + if (!(state->state & EXTENT_DELALLOC)) { + if (!found) + *end = state->end; + goto out; + } + if (!found) { + struct extent_state *prev_state; + struct rb_node *prev_node = node; + while(1) { + prev_node = rb_prev(prev_node); + if (!prev_node) + break; + prev_state = rb_entry(prev_node, + struct extent_state, + rb_node); + if (!(prev_state->state & EXTENT_DELALLOC)) + break; + state = prev_state; + node = prev_node; + } + } + if (state->state & EXTENT_LOCKED) { + DEFINE_WAIT(wait); + atomic_inc(&state->refs); + prepare_to_wait(&state->wq, &wait, + TASK_UNINTERRUPTIBLE); + write_unlock_irq(&tree->lock); + schedule(); + write_lock_irq(&tree->lock); + finish_wait(&state->wq, &wait); + free_extent_state(state); + goto search_again; + } + state->state |= EXTENT_LOCKED; + if (!found) + *start = state->start; + found++; + *end = state->end; + cur_start = state->end + 1; + node = rb_next(node); + if (!node) + break; + total_bytes += state->end - state->start + 1; + if (total_bytes >= max_bytes) + break; + } +out: + write_unlock_irq(&tree->lock); + return found; +} + +u64 count_range_bits(struct extent_io_tree *tree, + u64 *start, u64 search_end, u64 max_bytes, + unsigned long bits) +{ + struct rb_node *node; + struct extent_state *state; + u64 cur_start = *start; + u64 total_bytes = 0; + int found = 0; + + if (search_end <= cur_start) { + printk("search_end %Lu start %Lu\n", search_end, cur_start); + WARN_ON(1); + return 0; + } + + write_lock_irq(&tree->lock); + if (cur_start == 0 && bits == EXTENT_DIRTY) { + total_bytes = tree->dirty_bytes; + goto out; + } + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(&tree->state, cur_start); + if (!node || IS_ERR(node)) { + goto out; + } + + while(1) { + state = rb_entry(node, struct extent_state, rb_node); + if (state->start > search_end) + break; + if (state->end >= cur_start && (state->state & bits)) { + total_bytes += min(search_end, state->end) + 1 - + max(cur_start, state->start); + if (total_bytes >= max_bytes) + break; + if (!found) { + *start = state->start; + found = 1; + } + } + node = rb_next(node); + if (!node) + break; + } +out: + write_unlock_irq(&tree->lock); + return total_bytes; +} +/* + * helper function to lock both pages and extents in the tree. + * pages must be locked first. + */ +int lock_range(struct extent_io_tree *tree, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + int err; + + while (index <= end_index) { + page = grab_cache_page(tree->mapping, index); + if (!page) { + err = -ENOMEM; + goto failed; + } + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto failed; + } + index++; + } + lock_extent(tree, start, end, GFP_NOFS); + return 0; + +failed: + /* + * we failed above in getting the page at 'index', so we undo here + * up to but not including the page at 'index' + */ + end_index = index; + index = start >> PAGE_CACHE_SHIFT; + while (index < end_index) { + page = find_get_page(tree->mapping, index); + unlock_page(page); + page_cache_release(page); + index++; + } + return err; +} +EXPORT_SYMBOL(lock_range); + +/* + * helper function to unlock both pages and extents in the tree. + */ +int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + struct page *page; + + while (index <= end_index) { + page = find_get_page(tree->mapping, index); + unlock_page(page); + page_cache_release(page); + index++; + } + unlock_extent(tree, start, end, GFP_NOFS); + return 0; +} +EXPORT_SYMBOL(unlock_range); + +int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) +{ + struct rb_node *node; + struct extent_state *state; + int ret = 0; + + write_lock_irq(&tree->lock); + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(&tree->state, start); + if (!node || IS_ERR(node)) { + ret = -ENOENT; + goto out; + } + state = rb_entry(node, struct extent_state, rb_node); + if (state->start != start) { + ret = -ENOENT; + goto out; + } + state->private = private; +out: + write_unlock_irq(&tree->lock); + return ret; +} + +int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) +{ + struct rb_node *node; + struct extent_state *state; + int ret = 0; + + read_lock_irq(&tree->lock); + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(&tree->state, start); + if (!node || IS_ERR(node)) { + ret = -ENOENT; + goto out; + } + state = rb_entry(node, struct extent_state, rb_node); + if (state->start != start) { + ret = -ENOENT; + goto out; + } + *private = state->private; +out: + read_unlock_irq(&tree->lock); + return ret; +} + +/* + * searches a range in the state tree for a given mask. + * If 'filled' == 1, this returns 1 only if ever extent in the tree + * has the bits set. Otherwise, 1 is returned if any bit in the + * range is found set. + */ +int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int filled) +{ + struct extent_state *state = NULL; + struct rb_node *node; + int bitset = 0; + unsigned long flags; + + read_lock_irqsave(&tree->lock, flags); + node = tree_search(&tree->state, start); + while (node && start <= end) { + state = rb_entry(node, struct extent_state, rb_node); + + if (filled && state->start > start) { + bitset = 0; + break; + } + + if (state->start > end) + break; + + if (state->state & bits) { + bitset = 1; + if (!filled) + break; + } else if (filled) { + bitset = 0; + break; + } + start = state->end + 1; + if (start > end) + break; + node = rb_next(node); + if (!node) { + if (filled) + bitset = 0; + break; + } + } + read_unlock_irqrestore(&tree->lock, flags); + return bitset; +} +EXPORT_SYMBOL(test_range_bit); + +/* + * helper function to set a given page up to date if all the + * extents in the tree for that page are up to date + */ +static int check_page_uptodate(struct extent_io_tree *tree, + struct page *page) +{ + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 end = start + PAGE_CACHE_SIZE - 1; + if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1)) + SetPageUptodate(page); + return 0; +} + +/* + * helper function to unlock a page if all the extents in the tree + * for that page are unlocked + */ +static int check_page_locked(struct extent_io_tree *tree, + struct page *page) +{ + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 end = start + PAGE_CACHE_SIZE - 1; + if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0)) + unlock_page(page); + return 0; +} + +/* + * helper function to end page writeback if all the extents + * in the tree for that page are done with writeback + */ +static int check_page_writeback(struct extent_io_tree *tree, + struct page *page) +{ + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 end = start + PAGE_CACHE_SIZE - 1; + if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0)) + end_page_writeback(page); + return 0; +} + +/* lots and lots of room for performance fixes in the end_bio funcs */ + +/* + * after a writepage IO is done, we need to: + * clear the uptodate bits on error + * clear the writeback bits in the extent tree for this IO + * end_page_writeback if the page has no more pending IO + * + * Scheduling is not allowed, so the extent state tree is expected + * to have one and only one object corresponding to this IO. + */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +static void end_bio_extent_writepage(struct bio *bio, int err) +#else +static int end_bio_extent_writepage(struct bio *bio, + unsigned int bytes_done, int err) +#endif +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct extent_io_tree *tree = bio->bi_private; + u64 start; + u64 end; + int whole_page; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + if (bio->bi_size) + return 1; +#endif + + do { + struct page *page = bvec->bv_page; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + + bvec->bv_offset; + end = start + bvec->bv_len - 1; + + if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) + whole_page = 1; + else + whole_page = 0; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + + if (!uptodate) { + clear_extent_uptodate(tree, start, end, GFP_ATOMIC); + ClearPageUptodate(page); + SetPageError(page); + } + clear_extent_writeback(tree, start, end, GFP_ATOMIC); + + if (whole_page) + end_page_writeback(page); + else + check_page_writeback(tree, page); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, end); + } while (bvec >= bio->bi_io_vec); + + bio_put(bio); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + return 0; +#endif +} + +/* + * after a readpage IO is done, we need to: + * clear the uptodate bits on error + * set the uptodate bits if things worked + * set the page up to date if all extents in the tree are uptodate + * clear the lock bit in the extent tree + * unlock the page if there are no other extents locked for it + * + * Scheduling is not allowed, so the extent state tree is expected + * to have one and only one object corresponding to this IO. + */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +static void end_bio_extent_readpage(struct bio *bio, int err) +#else +static int end_bio_extent_readpage(struct bio *bio, + unsigned int bytes_done, int err) +#endif +{ + int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct extent_io_tree *tree = bio->bi_private; + u64 start; + u64 end; + int whole_page; + int ret; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + if (bio->bi_size) + return 1; +#endif + + do { + struct page *page = bvec->bv_page; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + + bvec->bv_offset; + end = start + bvec->bv_len - 1; + + if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) + whole_page = 1; + else + whole_page = 0; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + + if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { + ret = tree->ops->readpage_end_io_hook(page, start, end); + if (ret) + uptodate = 0; + } + if (uptodate) { + set_extent_uptodate(tree, start, end, GFP_ATOMIC); + if (whole_page) + SetPageUptodate(page); + else + check_page_uptodate(tree, page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + + unlock_extent(tree, start, end, GFP_ATOMIC); + + if (whole_page) + unlock_page(page); + else + check_page_locked(tree, page); + } while (bvec >= bio->bi_io_vec); + + bio_put(bio); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + return 0; +#endif +} + +/* + * IO done from prepare_write is pretty simple, we just unlock + * the structs in the extent tree when done, and set the uptodate bits + * as appropriate. + */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +static void end_bio_extent_preparewrite(struct bio *bio, int err) +#else +static int end_bio_extent_preparewrite(struct bio *bio, + unsigned int bytes_done, int err) +#endif +{ + const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct extent_io_tree *tree = bio->bi_private; + u64 start; + u64 end; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + if (bio->bi_size) + return 1; +#endif + + do { + struct page *page = bvec->bv_page; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + + bvec->bv_offset; + end = start + bvec->bv_len - 1; + + if (--bvec >= bio->bi_io_vec) + prefetchw(&bvec->bv_page->flags); + + if (uptodate) { + set_extent_uptodate(tree, start, end, GFP_ATOMIC); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + + unlock_extent(tree, start, end, GFP_ATOMIC); + + } while (bvec >= bio->bi_io_vec); + + bio_put(bio); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + return 0; +#endif +} + +static struct bio * +extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, + gfp_t gfp_flags) +{ + struct bio *bio; + + bio = bio_alloc(gfp_flags, nr_vecs); + + if (bio == NULL && (current->flags & PF_MEMALLOC)) { + while (!bio && (nr_vecs /= 2)) + bio = bio_alloc(gfp_flags, nr_vecs); + } + + if (bio) { + bio->bi_bdev = bdev; + bio->bi_sector = first_sector; + } + return bio; +} + +static int submit_one_bio(int rw, struct bio *bio) +{ + u64 maxsector; + int ret = 0; + + bio_get(bio); + + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector < bio->bi_sector) { + printk("sector too large max %Lu got %llu\n", maxsector, + (unsigned long long)bio->bi_sector); + WARN_ON(1); + } + + submit_bio(rw, bio); + if (bio_flagged(bio, BIO_EOPNOTSUPP)) + ret = -EOPNOTSUPP; + bio_put(bio); + return ret; +} + +static int submit_extent_page(int rw, struct extent_io_tree *tree, + struct page *page, sector_t sector, + size_t size, unsigned long offset, + struct block_device *bdev, + struct bio **bio_ret, + unsigned long max_pages, + bio_end_io_t end_io_func) +{ + int ret = 0; + struct bio *bio; + int nr; + + if (bio_ret && *bio_ret) { + bio = *bio_ret; + if (bio->bi_sector + (bio->bi_size >> 9) != sector || + bio_add_page(bio, page, size, offset) < size) { + ret = submit_one_bio(rw, bio); + bio = NULL; + } else { + return 0; + } + } + nr = min_t(int, max_pages, bio_get_nr_vecs(bdev)); + bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + if (!bio) { + printk("failed to allocate bio nr %d\n", nr); + } + bio_add_page(bio, page, size, offset); + bio->bi_end_io = end_io_func; + bio->bi_private = tree; + if (bio_ret) { + *bio_ret = bio; + } else { + ret = submit_one_bio(rw, bio); + } + + return ret; +} + +void set_page_extent_mapped(struct page *page) +{ + if (!PagePrivate(page)) { + SetPagePrivate(page); + WARN_ON(!page->mapping->a_ops->invalidatepage); + set_page_private(page, EXTENT_PAGE_PRIVATE); + page_cache_get(page); + } +} + +void set_page_extent_head(struct page *page, unsigned long len) +{ + set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); +} + +/* + * basic readpage implementation. Locked extent state structs are inserted + * into the tree that are removed when the IO is done (by the end_io + * handlers) + */ +static int __extent_read_full_page(struct extent_io_tree *tree, + struct page *page, + get_extent_t *get_extent, + struct bio **bio) +{ + struct inode *inode = page->mapping->host; + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 end; + u64 cur = start; + u64 extent_offset; + u64 last_byte = i_size_read(inode); + u64 block_start; + u64 cur_end; + sector_t sector; + struct extent_map *em; + struct block_device *bdev; + int ret; + int nr = 0; + size_t page_offset = 0; + size_t iosize; + size_t blocksize = inode->i_sb->s_blocksize; + + set_page_extent_mapped(page); + + end = page_end; + lock_extent(tree, start, end, GFP_NOFS); + + while (cur <= end) { + if (cur >= last_byte) { + char *userpage; + iosize = PAGE_CACHE_SIZE - page_offset; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + page_offset, 0, iosize); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + set_extent_uptodate(tree, cur, cur + iosize - 1, + GFP_NOFS); + unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + break; + } + em = get_extent(inode, page, page_offset, cur, + end - cur + 1, 0); + if (IS_ERR(em) || !em) { + SetPageError(page); + unlock_extent(tree, cur, end, GFP_NOFS); + break; + } + + extent_offset = cur - em->start; + BUG_ON(extent_map_end(em) <= cur); + BUG_ON(end < cur); + + iosize = min(extent_map_end(em) - cur, end - cur + 1); + cur_end = min(extent_map_end(em) - 1, end); + iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); + sector = (em->block_start + extent_offset) >> 9; + bdev = em->bdev; + block_start = em->block_start; + free_extent_map(em); + em = NULL; + + /* we've found a hole, just zero and go on */ + if (block_start == EXTENT_MAP_HOLE) { + char *userpage; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + page_offset, 0, iosize); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + + set_extent_uptodate(tree, cur, cur + iosize - 1, + GFP_NOFS); + unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + cur = cur + iosize; + page_offset += iosize; + continue; + } + /* the get_extent function already copied into the page */ + if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { + unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + cur = cur + iosize; + page_offset += iosize; + continue; + } + + ret = 0; + if (tree->ops && tree->ops->readpage_io_hook) { + ret = tree->ops->readpage_io_hook(page, cur, + cur + iosize - 1); + } + if (!ret) { + unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + nr -= page->index; + ret = submit_extent_page(READ, tree, page, + sector, iosize, page_offset, + bdev, bio, nr, + end_bio_extent_readpage); + } + if (ret) + SetPageError(page); + cur = cur + iosize; + page_offset += iosize; + nr++; + } + if (!nr) { + if (!PageError(page)) + SetPageUptodate(page); + unlock_page(page); + } + return 0; +} + +int extent_read_full_page(struct extent_io_tree *tree, struct page *page, + get_extent_t *get_extent) +{ + struct bio *bio = NULL; + int ret; + + ret = __extent_read_full_page(tree, page, get_extent, &bio); + if (bio) + submit_one_bio(READ, bio); + return ret; +} +EXPORT_SYMBOL(extent_read_full_page); + +/* + * the writepage semantics are similar to regular writepage. extent + * records are inserted to lock ranges in the tree, and as dirty areas + * are found, they are marked writeback. Then the lock bits are removed + * and the end_io handler clears the writeback ranges + */ +static int __extent_writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct inode *inode = page->mapping->host; + struct extent_page_data *epd = data; + struct extent_io_tree *tree = epd->tree; + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 delalloc_start; + u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 end; + u64 cur = start; + u64 extent_offset; + u64 last_byte = i_size_read(inode); + u64 block_start; + u64 iosize; + sector_t sector; + struct extent_map *em; + struct block_device *bdev; + int ret; + int nr = 0; + size_t page_offset = 0; + size_t blocksize; + loff_t i_size = i_size_read(inode); + unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; + u64 nr_delalloc; + u64 delalloc_end; + + WARN_ON(!PageLocked(page)); + if (page->index > end_index) { + clear_extent_dirty(tree, start, page_end, GFP_NOFS); + unlock_page(page); + return 0; + } + + if (page->index == end_index) { + char *userpage; + + size_t offset = i_size & (PAGE_CACHE_SIZE - 1); + + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + } + + set_page_extent_mapped(page); + + delalloc_start = start; + delalloc_end = 0; + while(delalloc_end < page_end) { + nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start, + &delalloc_end, + 128 * 1024 * 1024); + if (nr_delalloc == 0) { + delalloc_start = delalloc_end + 1; + continue; + } + tree->ops->fill_delalloc(inode, delalloc_start, + delalloc_end); + clear_extent_bit(tree, delalloc_start, + delalloc_end, + EXTENT_LOCKED | EXTENT_DELALLOC, + 1, 0, GFP_NOFS); + delalloc_start = delalloc_end + 1; + } + lock_extent(tree, start, page_end, GFP_NOFS); + + end = page_end; + if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { + printk("found delalloc bits after lock_extent\n"); + } + + if (last_byte <= start) { + clear_extent_dirty(tree, start, page_end, GFP_NOFS); + goto done; + } + + set_extent_uptodate(tree, start, page_end, GFP_NOFS); + blocksize = inode->i_sb->s_blocksize; + + while (cur <= end) { + if (cur >= last_byte) { + clear_extent_dirty(tree, cur, page_end, GFP_NOFS); + break; + } + em = epd->get_extent(inode, page, page_offset, cur, + end - cur + 1, 1); + if (IS_ERR(em) || !em) { + SetPageError(page); + break; + } + + extent_offset = cur - em->start; + BUG_ON(extent_map_end(em) <= cur); + BUG_ON(end < cur); + iosize = min(extent_map_end(em) - cur, end - cur + 1); + iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); + sector = (em->block_start + extent_offset) >> 9; + bdev = em->bdev; + block_start = em->block_start; + free_extent_map(em); + em = NULL; + + if (block_start == EXTENT_MAP_HOLE || + block_start == EXTENT_MAP_INLINE) { + clear_extent_dirty(tree, cur, + cur + iosize - 1, GFP_NOFS); + cur = cur + iosize; + page_offset += iosize; + continue; + } + + /* leave this out until we have a page_mkwrite call */ + if (0 && !test_range_bit(tree, cur, cur + iosize - 1, + EXTENT_DIRTY, 0)) { + cur = cur + iosize; + page_offset += iosize; + continue; + } + clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); + if (tree->ops && tree->ops->writepage_io_hook) { + ret = tree->ops->writepage_io_hook(page, cur, + cur + iosize - 1); + } else { + ret = 0; + } + if (ret) + SetPageError(page); + else { + unsigned long max_nr = end_index + 1; + set_range_writeback(tree, cur, cur + iosize - 1); + if (!PageWriteback(page)) { + printk("warning page %lu not writeback, " + "cur %llu end %llu\n", page->index, + (unsigned long long)cur, + (unsigned long long)end); + } + + ret = submit_extent_page(WRITE, tree, page, sector, + iosize, page_offset, bdev, + &epd->bio, max_nr, + end_bio_extent_writepage); + if (ret) + SetPageError(page); + } + cur = cur + iosize; + page_offset += iosize; + nr++; + } +done: + if (nr == 0) { + /* make sure the mapping tag for page dirty gets cleared */ + set_page_writeback(page); + end_page_writeback(page); + } + unlock_extent(tree, start, page_end, GFP_NOFS); + unlock_page(page); + return 0; +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) + +/* Taken directly from 2.6.23 for 2.6.18 back port */ +typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, + void *data); + +/** + * write_cache_pages - walk the list of dirty pages of the given address space + * and write all of them. + * @mapping: address space structure to write + * @wbc: subtract the number of written pages from *@wbc->nr_to_write + * @writepage: function called for each page + * @data: data passed to writepage function + * + * If a page is already under I/O, write_cache_pages() skips it, even + * if it's dirty. This is desirable behaviour for memory-cleaning writeback, + * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() + * and msync() need to guarantee that all the data which was dirty at the time + * the call was made get new I/O started against them. If wbc->sync_mode is + * WB_SYNC_ALL then we were called for data integrity and we must wait for + * existing IO to complete. + */ +static int write_cache_pages(struct address_space *mapping, + struct writeback_control *wbc, writepage_t writepage, + void *data) +{ + struct backing_dev_info *bdi = mapping->backing_dev_info; + int ret = 0; + int done = 0; + struct pagevec pvec; + int nr_pages; + pgoff_t index; + pgoff_t end; /* Inclusive */ + int scanned = 0; + int range_whole = 0; + + if (wbc->nonblocking && bdi_write_congested(bdi)) { + wbc->encountered_congestion = 1; + return 0; + } + + pagevec_init(&pvec, 0); + if (wbc->range_cyclic) { + index = mapping->writeback_index; /* Start from prev offset */ + end = -1; + } else { + index = wbc->range_start >> PAGE_CACHE_SHIFT; + end = wbc->range_end >> PAGE_CACHE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; + scanned = 1; + } +retry: + while (!done && (index <= end) && + (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { + unsigned i; + + scanned = 1; + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* + * At this point we hold neither mapping->tree_lock nor + * lock on the page itself: the page may be truncated or + * invalidated (changing page->mapping to NULL), or even + * swizzled back from swapper_space to tmpfs file + * mapping + */ + lock_page(page); + + if (unlikely(page->mapping != mapping)) { + unlock_page(page); + continue; + } + + if (!wbc->range_cyclic && page->index > end) { + done = 1; + unlock_page(page); + continue; + } + + if (wbc->sync_mode != WB_SYNC_NONE) + wait_on_page_writeback(page); + + if (PageWriteback(page) || + !clear_page_dirty_for_io(page)) { + unlock_page(page); + continue; + } + + ret = (*writepage)(page, wbc, data); + + if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { + unlock_page(page); + ret = 0; + } + if (ret || (--(wbc->nr_to_write) <= 0)) + done = 1; + if (wbc->nonblocking && bdi_write_congested(bdi)) { + wbc->encountered_congestion = 1; + done = 1; + } + } + pagevec_release(&pvec); + cond_resched(); + } + if (!scanned && !done) { + /* + * We hit the last page and there is more work to be done: wrap + * back to the start of the file + */ + scanned = 1; + index = 0; + goto retry; + } + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) + mapping->writeback_index = index; + return ret; +} +#endif + +int extent_write_full_page(struct extent_io_tree *tree, struct page *page, + get_extent_t *get_extent, + struct writeback_control *wbc) +{ + int ret; + struct address_space *mapping = page->mapping; + struct extent_page_data epd = { + .bio = NULL, + .tree = tree, + .get_extent = get_extent, + }; + struct writeback_control wbc_writepages = { + .bdi = wbc->bdi, + .sync_mode = WB_SYNC_NONE, + .older_than_this = NULL, + .nr_to_write = 64, + .range_start = page_offset(page) + PAGE_CACHE_SIZE, + .range_end = (loff_t)-1, + }; + + + ret = __extent_writepage(page, wbc, &epd); + + write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); + if (epd.bio) { + submit_one_bio(WRITE, epd.bio); + } + return ret; +} +EXPORT_SYMBOL(extent_write_full_page); + + +int extent_writepages(struct extent_io_tree *tree, + struct address_space *mapping, + get_extent_t *get_extent, + struct writeback_control *wbc) +{ + int ret = 0; + struct extent_page_data epd = { + .bio = NULL, + .tree = tree, + .get_extent = get_extent, + }; + + ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); + if (epd.bio) { + submit_one_bio(WRITE, epd.bio); + } + return ret; +} +EXPORT_SYMBOL(extent_writepages); + +int extent_readpages(struct extent_io_tree *tree, + struct address_space *mapping, + struct list_head *pages, unsigned nr_pages, + get_extent_t get_extent) +{ + struct bio *bio = NULL; + unsigned page_idx; + struct pagevec pvec; + + pagevec_init(&pvec, 0); + for (page_idx = 0; page_idx < nr_pages; page_idx++) { + struct page *page = list_entry(pages->prev, struct page, lru); + + prefetchw(&page->flags); + list_del(&page->lru); + /* + * what we want to do here is call add_to_page_cache_lru, + * but that isn't exported, so we reproduce it here + */ + if (!add_to_page_cache(page, mapping, + page->index, GFP_KERNEL)) { + + /* open coding of lru_cache_add, also not exported */ + page_cache_get(page); + if (!pagevec_add(&pvec, page)) + __pagevec_lru_add(&pvec); + __extent_read_full_page(tree, page, get_extent, &bio); + } + page_cache_release(page); + } + if (pagevec_count(&pvec)) + __pagevec_lru_add(&pvec); + BUG_ON(!list_empty(pages)); + if (bio) + submit_one_bio(READ, bio); + return 0; +} +EXPORT_SYMBOL(extent_readpages); + +/* + * basic invalidatepage code, this waits on any locked or writeback + * ranges corresponding to the page, and then deletes any extent state + * records from the tree + */ +int extent_invalidatepage(struct extent_io_tree *tree, + struct page *page, unsigned long offset) +{ + u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); + u64 end = start + PAGE_CACHE_SIZE - 1; + size_t blocksize = page->mapping->host->i_sb->s_blocksize; + + start += (offset + blocksize -1) & ~(blocksize - 1); + if (start > end) + return 0; + + lock_extent(tree, start, end, GFP_NOFS); + wait_on_extent_writeback(tree, start, end); + clear_extent_bit(tree, start, end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC, + 1, 1, GFP_NOFS); + return 0; +} +EXPORT_SYMBOL(extent_invalidatepage); + +/* + * simple commit_write call, set_range_dirty is used to mark both + * the pages and the extent records as dirty + */ +int extent_commit_write(struct extent_io_tree *tree, + struct inode *inode, struct page *page, + unsigned from, unsigned to) +{ + loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; + + set_page_extent_mapped(page); + set_page_dirty(page); + + if (pos > inode->i_size) { + i_size_write(inode, pos); + mark_inode_dirty(inode); + } + return 0; +} +EXPORT_SYMBOL(extent_commit_write); + +int extent_prepare_write(struct extent_io_tree *tree, + struct inode *inode, struct page *page, + unsigned from, unsigned to, get_extent_t *get_extent) +{ + u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + u64 block_start; + u64 orig_block_start; + u64 block_end; + u64 cur_end; + struct extent_map *em; + unsigned blocksize = 1 << inode->i_blkbits; + size_t page_offset = 0; + size_t block_off_start; + size_t block_off_end; + int err = 0; + int iocount = 0; + int ret = 0; + int isnew; + + set_page_extent_mapped(page); + + block_start = (page_start + from) & ~((u64)blocksize - 1); + block_end = (page_start + to - 1) | (blocksize - 1); + orig_block_start = block_start; + + lock_extent(tree, page_start, page_end, GFP_NOFS); + while(block_start <= block_end) { + em = get_extent(inode, page, page_offset, block_start, + block_end - block_start + 1, 1); + if (IS_ERR(em) || !em) { + goto err; + } + cur_end = min(block_end, extent_map_end(em) - 1); + block_off_start = block_start & (PAGE_CACHE_SIZE - 1); + block_off_end = block_off_start + blocksize; + isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); + + if (!PageUptodate(page) && isnew && + (block_off_end > to || block_off_start < from)) { + void *kaddr; + + kaddr = kmap_atomic(page, KM_USER0); + if (block_off_end > to) + memset(kaddr + to, 0, block_off_end - to); + if (block_off_start < from) + memset(kaddr + block_off_start, 0, + from - block_off_start); + flush_dcache_page(page); + kunmap_atomic(kaddr, KM_USER0); + } + if ((em->block_start != EXTENT_MAP_HOLE && + em->block_start != EXTENT_MAP_INLINE) && + !isnew && !PageUptodate(page) && + (block_off_end > to || block_off_start < from) && + !test_range_bit(tree, block_start, cur_end, + EXTENT_UPTODATE, 1)) { + u64 sector; + u64 extent_offset = block_start - em->start; + size_t iosize; + sector = (em->block_start + extent_offset) >> 9; + iosize = (cur_end - block_start + blocksize) & + ~((u64)blocksize - 1); + /* + * we've already got the extent locked, but we + * need to split the state such that our end_bio + * handler can clear the lock. + */ + set_extent_bit(tree, block_start, + block_start + iosize - 1, + EXTENT_LOCKED, 0, NULL, GFP_NOFS); + ret = submit_extent_page(READ, tree, page, + sector, iosize, page_offset, em->bdev, + NULL, 1, + end_bio_extent_preparewrite); + iocount++; + block_start = block_start + iosize; + } else { + set_extent_uptodate(tree, block_start, cur_end, + GFP_NOFS); + unlock_extent(tree, block_start, cur_end, GFP_NOFS); + block_start = cur_end + 1; + } + page_offset = block_start & (PAGE_CACHE_SIZE - 1); + free_extent_map(em); + } + if (iocount) { + wait_extent_bit(tree, orig_block_start, + block_end, EXTENT_LOCKED); + } + check_page_uptodate(tree, page); +err: + /* FIXME, zero out newly allocated blocks on error */ + return err; +} +EXPORT_SYMBOL(extent_prepare_write); + +/* + * a helper for releasepage. As long as there are no locked extents + * in the range corresponding to the page, both state records and extent + * map records are removed + */ +int try_release_extent_mapping(struct extent_map_tree *map, + struct extent_io_tree *tree, struct page *page) +{ + struct extent_map *em; + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 end = start + PAGE_CACHE_SIZE - 1; + u64 orig_start = start; + int ret = 1; + + while (start <= end) { + spin_lock(&map->lock); + em = lookup_extent_mapping(map, start, end); + if (!em || IS_ERR(em)) { + spin_unlock(&map->lock); + break; + } + if (!test_range_bit(tree, em->start, extent_map_end(em) - 1, + EXTENT_LOCKED, 0)) { + remove_extent_mapping(map, em); + /* once for the rb tree */ + free_extent_map(em); + } + start = extent_map_end(em); + spin_unlock(&map->lock); + + /* once for us */ + free_extent_map(em); + } + if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) + ret = 0; + else + clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, + 1, 1, GFP_NOFS); + return ret; +} +EXPORT_SYMBOL(try_release_extent_mapping); + +sector_t extent_bmap(struct address_space *mapping, sector_t iblock, + get_extent_t *get_extent) +{ + struct inode *inode = mapping->host; + u64 start = iblock << inode->i_blkbits; + sector_t sector = 0; + struct extent_map *em; + + em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0); + if (!em || IS_ERR(em)) + return 0; + + if (em->block_start == EXTENT_MAP_INLINE || + em->block_start == EXTENT_MAP_HOLE) + goto out; + + sector = (em->block_start + start - em->start) >> inode->i_blkbits; +printk("bmap finds %Lu %Lu block %Lu\n", em->start, em->len, em->block_start); +out: + free_extent_map(em); + return sector; +} + +static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb) +{ + if (list_empty(&eb->lru)) { + extent_buffer_get(eb); + list_add(&eb->lru, &tree->buffer_lru); + tree->lru_size++; + if (tree->lru_size >= BUFFER_LRU_MAX) { + struct extent_buffer *rm; + rm = list_entry(tree->buffer_lru.prev, + struct extent_buffer, lru); + tree->lru_size--; + list_del_init(&rm->lru); + free_extent_buffer(rm); + } + } else + list_move(&eb->lru, &tree->buffer_lru); + return 0; +} +static struct extent_buffer *find_lru(struct extent_io_tree *tree, + u64 start, unsigned long len) +{ + struct list_head *lru = &tree->buffer_lru; + struct list_head *cur = lru->next; + struct extent_buffer *eb; + + if (list_empty(lru)) + return NULL; + + do { + eb = list_entry(cur, struct extent_buffer, lru); + if (eb->start == start && eb->len == len) { + extent_buffer_get(eb); + return eb; + } + cur = cur->next; + } while (cur != lru); + return NULL; +} + +static inline unsigned long num_extent_pages(u64 start, u64 len) +{ + return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + (start >> PAGE_CACHE_SHIFT); +} + +static inline struct page *extent_buffer_page(struct extent_buffer *eb, + unsigned long i) +{ + struct page *p; + struct address_space *mapping; + + if (i == 0) + return eb->first_page; + i += eb->start >> PAGE_CACHE_SHIFT; + mapping = eb->first_page->mapping; + read_lock_irq(&mapping->tree_lock); + p = radix_tree_lookup(&mapping->page_tree, i); + read_unlock_irq(&mapping->tree_lock); + return p; +} + +static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, + u64 start, + unsigned long len, + gfp_t mask) +{ + struct extent_buffer *eb = NULL; + + spin_lock(&tree->lru_lock); + eb = find_lru(tree, start, len); + spin_unlock(&tree->lru_lock); + if (eb) { + return eb; + } + + eb = kmem_cache_zalloc(extent_buffer_cache, mask); + INIT_LIST_HEAD(&eb->lru); + eb->start = start; + eb->len = len; + atomic_set(&eb->refs, 1); + + return eb; +} + +static void __free_extent_buffer(struct extent_buffer *eb) +{ + kmem_cache_free(extent_buffer_cache, eb); +} + +struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, + u64 start, unsigned long len, + struct page *page0, + gfp_t mask) +{ + unsigned long num_pages = num_extent_pages(start, len); + unsigned long i; + unsigned long index = start >> PAGE_CACHE_SHIFT; + struct extent_buffer *eb; + struct page *p; + struct address_space *mapping = tree->mapping; + int uptodate = 1; + + eb = __alloc_extent_buffer(tree, start, len, mask); + if (!eb || IS_ERR(eb)) + return NULL; + + if (eb->flags & EXTENT_BUFFER_FILLED) + goto lru_add; + + if (page0) { + eb->first_page = page0; + i = 1; + index++; + page_cache_get(page0); + mark_page_accessed(page0); + set_page_extent_mapped(page0); + WARN_ON(!PageUptodate(page0)); + set_page_extent_head(page0, len); + } else { + i = 0; + } + for (; i < num_pages; i++, index++) { + p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); + if (!p) { + WARN_ON(1); + goto fail; + } + set_page_extent_mapped(p); + mark_page_accessed(p); + if (i == 0) { + eb->first_page = p; + set_page_extent_head(p, len); + } else { + set_page_private(p, EXTENT_PAGE_PRIVATE); + } + if (!PageUptodate(p)) + uptodate = 0; + unlock_page(p); + } + if (uptodate) + eb->flags |= EXTENT_UPTODATE; + eb->flags |= EXTENT_BUFFER_FILLED; + +lru_add: + spin_lock(&tree->lru_lock); + add_lru(tree, eb); + spin_unlock(&tree->lru_lock); + return eb; + +fail: + spin_lock(&tree->lru_lock); + list_del_init(&eb->lru); + spin_unlock(&tree->lru_lock); + if (!atomic_dec_and_test(&eb->refs)) + return NULL; + for (index = 1; index < i; index++) { + page_cache_release(extent_buffer_page(eb, index)); + } + if (i > 0) + page_cache_release(extent_buffer_page(eb, 0)); + __free_extent_buffer(eb); + return NULL; +} +EXPORT_SYMBOL(alloc_extent_buffer); + +struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, + u64 start, unsigned long len, + gfp_t mask) +{ + unsigned long num_pages = num_extent_pages(start, len); + unsigned long i; + unsigned long index = start >> PAGE_CACHE_SHIFT; + struct extent_buffer *eb; + struct page *p; + struct address_space *mapping = tree->mapping; + int uptodate = 1; + + eb = __alloc_extent_buffer(tree, start, len, mask); + if (!eb || IS_ERR(eb)) + return NULL; + + if (eb->flags & EXTENT_BUFFER_FILLED) + goto lru_add; + + for (i = 0; i < num_pages; i++, index++) { + p = find_lock_page(mapping, index); + if (!p) { + goto fail; + } + set_page_extent_mapped(p); + mark_page_accessed(p); + + if (i == 0) { + eb->first_page = p; + set_page_extent_head(p, len); + } else { + set_page_private(p, EXTENT_PAGE_PRIVATE); + } + + if (!PageUptodate(p)) + uptodate = 0; + unlock_page(p); + } + if (uptodate) + eb->flags |= EXTENT_UPTODATE; + eb->flags |= EXTENT_BUFFER_FILLED; + +lru_add: + spin_lock(&tree->lru_lock); + add_lru(tree, eb); + spin_unlock(&tree->lru_lock); + return eb; +fail: + spin_lock(&tree->lru_lock); + list_del_init(&eb->lru); + spin_unlock(&tree->lru_lock); + if (!atomic_dec_and_test(&eb->refs)) + return NULL; + for (index = 1; index < i; index++) { + page_cache_release(extent_buffer_page(eb, index)); + } + if (i > 0) + page_cache_release(extent_buffer_page(eb, 0)); + __free_extent_buffer(eb); + return NULL; +} +EXPORT_SYMBOL(find_extent_buffer); + +void free_extent_buffer(struct extent_buffer *eb) +{ + unsigned long i; + unsigned long num_pages; + + if (!eb) + return; + + if (!atomic_dec_and_test(&eb->refs)) + return; + + WARN_ON(!list_empty(&eb->lru)); + num_pages = num_extent_pages(eb->start, eb->len); + + for (i = 1; i < num_pages; i++) { + page_cache_release(extent_buffer_page(eb, i)); + } + page_cache_release(extent_buffer_page(eb, 0)); + __free_extent_buffer(eb); +} +EXPORT_SYMBOL(free_extent_buffer); + +int clear_extent_buffer_dirty(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + int set; + unsigned long i; + unsigned long num_pages; + struct page *page; + + u64 start = eb->start; + u64 end = start + eb->len - 1; + + set = clear_extent_dirty(tree, start, end, GFP_NOFS); + num_pages = num_extent_pages(eb->start, eb->len); + + for (i = 0; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + lock_page(page); + if (i == 0) + set_page_extent_head(page, eb->len); + else + set_page_private(page, EXTENT_PAGE_PRIVATE); + + /* + * if we're on the last page or the first page and the + * block isn't aligned on a page boundary, do extra checks + * to make sure we don't clean page that is partially dirty + */ + if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || + ((i == num_pages - 1) && + ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { + start = (u64)page->index << PAGE_CACHE_SHIFT; + end = start + PAGE_CACHE_SIZE - 1; + if (test_range_bit(tree, start, end, + EXTENT_DIRTY, 0)) { + unlock_page(page); + continue; + } + } + clear_page_dirty_for_io(page); + write_lock_irq(&page->mapping->tree_lock); + if (!PageDirty(page)) { + radix_tree_tag_clear(&page->mapping->page_tree, + page_index(page), + PAGECACHE_TAG_DIRTY); + } + write_unlock_irq(&page->mapping->tree_lock); + unlock_page(page); + } + return 0; +} +EXPORT_SYMBOL(clear_extent_buffer_dirty); + +int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + return wait_on_extent_writeback(tree, eb->start, + eb->start + eb->len - 1); +} +EXPORT_SYMBOL(wait_on_extent_buffer_writeback); + +int set_extent_buffer_dirty(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + unsigned long i; + unsigned long num_pages; + + num_pages = num_extent_pages(eb->start, eb->len); + for (i = 0; i < num_pages; i++) { + struct page *page = extent_buffer_page(eb, i); + /* writepage may need to do something special for the + * first page, we have to make sure page->private is + * properly set. releasepage may drop page->private + * on us if the page isn't already dirty. + */ + if (i == 0) { + lock_page(page); + set_page_extent_head(page, eb->len); + } else if (PagePrivate(page) && + page->private != EXTENT_PAGE_PRIVATE) { + lock_page(page); + set_page_extent_mapped(page); + unlock_page(page); + } + __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); + if (i == 0) + unlock_page(page); + } + return set_extent_dirty(tree, eb->start, + eb->start + eb->len - 1, GFP_NOFS); +} +EXPORT_SYMBOL(set_extent_buffer_dirty); + +int set_extent_buffer_uptodate(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + unsigned long i; + struct page *page; + unsigned long num_pages; + + num_pages = num_extent_pages(eb->start, eb->len); + + set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, + GFP_NOFS); + for (i = 0; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || + ((i == num_pages - 1) && + ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) { + check_page_uptodate(tree, page); + continue; + } + SetPageUptodate(page); + } + return 0; +} +EXPORT_SYMBOL(set_extent_buffer_uptodate); + +int extent_buffer_uptodate(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + if (eb->flags & EXTENT_UPTODATE) + return 1; + return test_range_bit(tree, eb->start, eb->start + eb->len - 1, + EXTENT_UPTODATE, 1); +} +EXPORT_SYMBOL(extent_buffer_uptodate); + +int read_extent_buffer_pages(struct extent_io_tree *tree, + struct extent_buffer *eb, + u64 start, + int wait) +{ + unsigned long i; + unsigned long start_i; + struct page *page; + int err; + int ret = 0; + unsigned long num_pages; + + if (eb->flags & EXTENT_UPTODATE) + return 0; + + if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1, + EXTENT_UPTODATE, 1)) { + return 0; + } + + if (start) { + WARN_ON(start < eb->start); + start_i = (start >> PAGE_CACHE_SHIFT) - + (eb->start >> PAGE_CACHE_SHIFT); + } else { + start_i = 0; + } + + num_pages = num_extent_pages(eb->start, eb->len); + for (i = start_i; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + if (PageUptodate(page)) { + continue; + } + if (!wait) { + if (TestSetPageLocked(page)) { + continue; + } + } else { + lock_page(page); + } + if (!PageUptodate(page)) { + err = page->mapping->a_ops->readpage(NULL, page); + if (err) { + ret = err; + } + } else { + unlock_page(page); + } + } + + if (ret || !wait) { + return ret; + } + + for (i = start_i; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + wait_on_page_locked(page); + if (!PageUptodate(page)) { + ret = -EIO; + } + } + if (!ret) + eb->flags |= EXTENT_UPTODATE; + return ret; +} +EXPORT_SYMBOL(read_extent_buffer_pages); + +void read_extent_buffer(struct extent_buffer *eb, void *dstv, + unsigned long start, + unsigned long len) +{ + size_t cur; + size_t offset; + struct page *page; + char *kaddr; + char *dst = (char *)dstv; + size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + unsigned long num_pages = num_extent_pages(eb->start, eb->len); + + WARN_ON(start > eb->len); + WARN_ON(start + len > eb->start + eb->len); + + offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); + + while(len > 0) { + page = extent_buffer_page(eb, i); + if (!PageUptodate(page)) { + printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len); + WARN_ON(1); + } + WARN_ON(!PageUptodate(page)); + + cur = min(len, (PAGE_CACHE_SIZE - offset)); + kaddr = kmap_atomic(page, KM_USER1); + memcpy(dst, kaddr + offset, cur); + kunmap_atomic(kaddr, KM_USER1); + + dst += cur; + len -= cur; + offset = 0; + i++; + } +} +EXPORT_SYMBOL(read_extent_buffer); + +int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, + unsigned long min_len, char **token, char **map, + unsigned long *map_start, + unsigned long *map_len, int km) +{ + size_t offset = start & (PAGE_CACHE_SIZE - 1); + char *kaddr; + struct page *p; + size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + unsigned long end_i = (start_offset + start + min_len - 1) >> + PAGE_CACHE_SHIFT; + + if (i != end_i) + return -EINVAL; + + if (i == 0) { + offset = start_offset; + *map_start = 0; + } else { + offset = 0; + *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; + } + if (start + min_len > eb->len) { +printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); + WARN_ON(1); + } + + p = extent_buffer_page(eb, i); + WARN_ON(!PageUptodate(p)); + kaddr = kmap_atomic(p, km); + *token = kaddr; + *map = kaddr + offset; + *map_len = PAGE_CACHE_SIZE - offset; + return 0; +} +EXPORT_SYMBOL(map_private_extent_buffer); + +int map_extent_buffer(struct extent_buffer *eb, unsigned long start, + unsigned long min_len, + char **token, char **map, + unsigned long *map_start, + unsigned long *map_len, int km) +{ + int err; + int save = 0; + if (eb->map_token) { + unmap_extent_buffer(eb, eb->map_token, km); + eb->map_token = NULL; + save = 1; + } + err = map_private_extent_buffer(eb, start, min_len, token, map, + map_start, map_len, km); + if (!err && save) { + eb->map_token = *token; + eb->kaddr = *map; + eb->map_start = *map_start; + eb->map_len = *map_len; + } + return err; +} +EXPORT_SYMBOL(map_extent_buffer); + +void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) +{ + kunmap_atomic(token, km); +} +EXPORT_SYMBOL(unmap_extent_buffer); + +int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, + unsigned long start, + unsigned long len) +{ + size_t cur; + size_t offset; + struct page *page; + char *kaddr; + char *ptr = (char *)ptrv; + size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + int ret = 0; + + WARN_ON(start > eb->len); + WARN_ON(start + len > eb->start + eb->len); + + offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); + + while(len > 0) { + page = extent_buffer_page(eb, i); + WARN_ON(!PageUptodate(page)); + + cur = min(len, (PAGE_CACHE_SIZE - offset)); + + kaddr = kmap_atomic(page, KM_USER0); + ret = memcmp(ptr, kaddr + offset, cur); + kunmap_atomic(kaddr, KM_USER0); + if (ret) + break; + + ptr += cur; + len -= cur; + offset = 0; + i++; + } + return ret; +} +EXPORT_SYMBOL(memcmp_extent_buffer); + +void write_extent_buffer(struct extent_buffer *eb, const void *srcv, + unsigned long start, unsigned long len) +{ + size_t cur; + size_t offset; + struct page *page; + char *kaddr; + char *src = (char *)srcv; + size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + + WARN_ON(start > eb->len); + WARN_ON(start + len > eb->start + eb->len); + + offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); + + while(len > 0) { + page = extent_buffer_page(eb, i); + WARN_ON(!PageUptodate(page)); + + cur = min(len, PAGE_CACHE_SIZE - offset); + kaddr = kmap_atomic(page, KM_USER1); + memcpy(kaddr + offset, src, cur); + kunmap_atomic(kaddr, KM_USER1); + + src += cur; + len -= cur; + offset = 0; + i++; + } +} +EXPORT_SYMBOL(write_extent_buffer); + +void memset_extent_buffer(struct extent_buffer *eb, char c, + unsigned long start, unsigned long len) +{ + size_t cur; + size_t offset; + struct page *page; + char *kaddr; + size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + + WARN_ON(start > eb->len); + WARN_ON(start + len > eb->start + eb->len); + + offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); + + while(len > 0) { + page = extent_buffer_page(eb, i); + WARN_ON(!PageUptodate(page)); + + cur = min(len, PAGE_CACHE_SIZE - offset); + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + offset, c, cur); + kunmap_atomic(kaddr, KM_USER0); + + len -= cur; + offset = 0; + i++; + } +} +EXPORT_SYMBOL(memset_extent_buffer); + +void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, + unsigned long dst_offset, unsigned long src_offset, + unsigned long len) +{ + u64 dst_len = dst->len; + size_t cur; + size_t offset; + struct page *page; + char *kaddr; + size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; + + WARN_ON(src->len != dst_len); + + offset = (start_offset + dst_offset) & + ((unsigned long)PAGE_CACHE_SIZE - 1); + + while(len > 0) { + page = extent_buffer_page(dst, i); + WARN_ON(!PageUptodate(page)); + + cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); + + kaddr = kmap_atomic(page, KM_USER0); + read_extent_buffer(src, kaddr + offset, src_offset, cur); + kunmap_atomic(kaddr, KM_USER0); + + src_offset += cur; + len -= cur; + offset = 0; + i++; + } +} +EXPORT_SYMBOL(copy_extent_buffer); + +static void move_pages(struct page *dst_page, struct page *src_page, + unsigned long dst_off, unsigned long src_off, + unsigned long len) +{ + char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); + if (dst_page == src_page) { + memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len); + } else { + char *src_kaddr = kmap_atomic(src_page, KM_USER1); + char *p = dst_kaddr + dst_off + len; + char *s = src_kaddr + src_off + len; + + while (len--) + *--p = *--s; + + kunmap_atomic(src_kaddr, KM_USER1); + } + kunmap_atomic(dst_kaddr, KM_USER0); +} + +static void copy_pages(struct page *dst_page, struct page *src_page, + unsigned long dst_off, unsigned long src_off, + unsigned long len) +{ + char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); + char *src_kaddr; + + if (dst_page != src_page) + src_kaddr = kmap_atomic(src_page, KM_USER1); + else + src_kaddr = dst_kaddr; + + memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); + kunmap_atomic(dst_kaddr, KM_USER0); + if (dst_page != src_page) + kunmap_atomic(src_kaddr, KM_USER1); +} + +void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, + unsigned long src_offset, unsigned long len) +{ + size_t cur; + size_t dst_off_in_page; + size_t src_off_in_page; + size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long dst_i; + unsigned long src_i; + + if (src_offset + len > dst->len) { + printk("memmove bogus src_offset %lu move len %lu len %lu\n", + src_offset, len, dst->len); + BUG_ON(1); + } + if (dst_offset + len > dst->len) { + printk("memmove bogus dst_offset %lu move len %lu len %lu\n", + dst_offset, len, dst->len); + BUG_ON(1); + } + + while(len > 0) { + dst_off_in_page = (start_offset + dst_offset) & + ((unsigned long)PAGE_CACHE_SIZE - 1); + src_off_in_page = (start_offset + src_offset) & + ((unsigned long)PAGE_CACHE_SIZE - 1); + + dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; + src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; + + cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - + src_off_in_page)); + cur = min_t(unsigned long, cur, + (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); + + copy_pages(extent_buffer_page(dst, dst_i), + extent_buffer_page(dst, src_i), + dst_off_in_page, src_off_in_page, cur); + + src_offset += cur; + dst_offset += cur; + len -= cur; + } +} +EXPORT_SYMBOL(memcpy_extent_buffer); + +void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, + unsigned long src_offset, unsigned long len) +{ + size_t cur; + size_t dst_off_in_page; + size_t src_off_in_page; + unsigned long dst_end = dst_offset + len - 1; + unsigned long src_end = src_offset + len - 1; + size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + unsigned long dst_i; + unsigned long src_i; + + if (src_offset + len > dst->len) { + printk("memmove bogus src_offset %lu move len %lu len %lu\n", + src_offset, len, dst->len); + BUG_ON(1); + } + if (dst_offset + len > dst->len) { + printk("memmove bogus dst_offset %lu move len %lu len %lu\n", + dst_offset, len, dst->len); + BUG_ON(1); + } + if (dst_offset < src_offset) { + memcpy_extent_buffer(dst, dst_offset, src_offset, len); + return; + } + while(len > 0) { + dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; + src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; + + dst_off_in_page = (start_offset + dst_end) & + ((unsigned long)PAGE_CACHE_SIZE - 1); + src_off_in_page = (start_offset + src_end) & + ((unsigned long)PAGE_CACHE_SIZE - 1); + + cur = min_t(unsigned long, len, src_off_in_page + 1); + cur = min(cur, dst_off_in_page + 1); + move_pages(extent_buffer_page(dst, dst_i), + extent_buffer_page(dst, src_i), + dst_off_in_page - cur + 1, + src_off_in_page - cur + 1, cur); + + dst_end -= cur; + src_end -= cur; + len -= cur; + } +} +EXPORT_SYMBOL(memmove_extent_buffer); -- cgit v1.2.3-70-g09d2 From 70dec8079d78691e476cc6c7cede40656078ad30 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 29 Jan 2008 09:59:12 -0500 Subject: Btrfs: extent_io and extent_state optimizations The end_bio routines are changed to take a pointer to the extent state struct, and the state tree is walked in order to set/clear appropriate bits as IO completes. This greatly reduces the number of rbtree searches done by the end_bio handlers, and reduces lock contention. The extent_io releasepage function is changed to avoid expensive searches for locked state. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 +- fs/btrfs/extent_io.c | 358 +++++++++++++++++++++++++++++++++++++-------------- fs/btrfs/extent_io.h | 15 ++- fs/btrfs/inode.c | 35 +++-- 4 files changed, 298 insertions(+), 114 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4c4ebea0b2a..e29a0716f6a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -274,7 +274,7 @@ int btree_readpage(struct file *file, struct page *page) return extent_read_full_page(tree, page, btree_get_extent); } -static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags) +static int btree_releasepage(struct page *page, gfp_t gfp_flags) { struct extent_io_tree *tree; struct extent_map_tree *map; @@ -282,7 +282,7 @@ static int btree_releasepage(struct page *page, gfp_t unused_gfp_flags) tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; - ret = try_release_extent_mapping(map, tree, page); + ret = try_release_extent_mapping(map, tree, page, gfp_flags); if (ret == 1) { ClearPagePrivate(page); set_page_private(page, 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 15cc158a049..1f734c34dc2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -27,13 +27,11 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); -static spinlock_t state_lock = SPIN_LOCK_UNLOCKED; #define BUFFER_LRU_MAX 64 struct tree_entry { u64 start; u64 end; - int in_tree; struct rb_node rb_node; }; @@ -69,7 +67,7 @@ void extent_io_exit(void) while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, list); - printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs)); + printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); list_del(&state->list); kmem_cache_free(extent_state_cache, state); @@ -87,7 +85,7 @@ void extent_io_tree_init(struct extent_io_tree *tree, tree->state.rb_node = NULL; tree->ops = NULL; tree->dirty_bytes = 0; - rwlock_init(&tree->lock); + spin_lock_init(&tree->lock); spin_lock_init(&tree->lru_lock); tree->mapping = mapping; INIT_LIST_HEAD(&tree->buffer_lru); @@ -110,18 +108,13 @@ EXPORT_SYMBOL(extent_io_tree_empty_lru); struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; - unsigned long flags; state = kmem_cache_alloc(extent_state_cache, mask); if (!state || IS_ERR(state)) return state; state->state = 0; - state->in_tree = 0; state->private = 0; - - spin_lock_irqsave(&state_lock, flags); - list_add(&state->list, &states); - spin_unlock_irqrestore(&state_lock, flags); + state->tree = NULL; atomic_set(&state->refs, 1); init_waitqueue_head(&state->wq); @@ -131,14 +124,10 @@ EXPORT_SYMBOL(alloc_extent_state); void free_extent_state(struct extent_state *state) { - unsigned long flags; if (!state) return; if (atomic_dec_and_test(&state->refs)) { - WARN_ON(state->in_tree); - spin_lock_irqsave(&state_lock, flags); - list_del(&state->list); - spin_unlock_irqrestore(&state_lock, flags); + WARN_ON(state->tree); kmem_cache_free(extent_state_cache, state); } } @@ -164,7 +153,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset, } entry = rb_entry(node, struct tree_entry, rb_node); - entry->in_tree = 1; rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; @@ -216,8 +204,9 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) { - struct rb_node *prev; + struct rb_node *prev = NULL; struct rb_node *ret; + ret = __tree_search(root, offset, &prev, NULL); if (!ret) return prev; @@ -248,7 +237,7 @@ static int merge_state(struct extent_io_tree *tree, if (other->end == state->start - 1 && other->state == state->state) { state->start = other->start; - other->in_tree = 0; + other->tree = NULL; rb_erase(&other->rb_node, &tree->state); free_extent_state(other); } @@ -259,7 +248,7 @@ static int merge_state(struct extent_io_tree *tree, if (other->start == state->end + 1 && other->state == state->state) { other->start = state->start; - state->in_tree = 0; + state->tree = NULL; rb_erase(&state->rb_node, &tree->state); free_extent_state(state); } @@ -300,6 +289,7 @@ static int insert_state(struct extent_io_tree *tree, free_extent_state(state); return -EEXIST; } + state->tree = tree; merge_state(tree, state); return 0; } @@ -335,6 +325,7 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, free_extent_state(prealloc); return -EEXIST; } + prealloc->tree = tree; return 0; } @@ -361,9 +352,9 @@ static int clear_state_bit(struct extent_io_tree *tree, if (wake) wake_up(&state->wq); if (delete || state->state == 0) { - if (state->in_tree) { + if (state->tree) { rb_erase(&state->rb_node, &tree->state); - state->in_tree = 0; + state->tree = NULL; free_extent_state(state); } else { WARN_ON(1); @@ -404,7 +395,7 @@ again: return -ENOMEM; } - write_lock_irqsave(&tree->lock, flags); + spin_lock_irqsave(&tree->lock, flags); /* * this search will find the extents that end after * our range starts @@ -434,6 +425,8 @@ again: */ if (state->start < start) { + if (!prealloc) + prealloc = alloc_extent_state(GFP_ATOMIC); err = split_state(tree, state, prealloc, start); BUG_ON(err == -EEXIST); prealloc = NULL; @@ -455,6 +448,8 @@ again: * on the first half */ if (state->start <= end && state->end > end) { + if (!prealloc) + prealloc = alloc_extent_state(GFP_ATOMIC); err = split_state(tree, state, prealloc, end + 1); BUG_ON(err == -EEXIST); @@ -471,7 +466,7 @@ again: goto search_again; out: - write_unlock_irqrestore(&tree->lock, flags); + spin_unlock_irqrestore(&tree->lock, flags); if (prealloc) free_extent_state(prealloc); @@ -480,7 +475,7 @@ out: search_again: if (start > end) goto out; - write_unlock_irqrestore(&tree->lock, flags); + spin_unlock_irqrestore(&tree->lock, flags); if (mask & __GFP_WAIT) cond_resched(); goto again; @@ -492,9 +487,9 @@ static int wait_on_state(struct extent_io_tree *tree, { DEFINE_WAIT(wait); prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); - read_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); schedule(); - read_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); finish_wait(&state->wq, &wait); return 0; } @@ -509,7 +504,7 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) struct extent_state *state; struct rb_node *node; - read_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); again: while (1) { /* @@ -538,13 +533,13 @@ again: break; if (need_resched()) { - read_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); cond_resched(); - read_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); } } out: - read_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return 0; } EXPORT_SYMBOL(wait_extent_bit); @@ -589,7 +584,7 @@ again: return -ENOMEM; } - write_lock_irqsave(&tree->lock, flags); + spin_lock_irqsave(&tree->lock, flags); /* * this search will find all the extents that end after * our range starts. @@ -709,7 +704,7 @@ again: goto search_again; out: - write_unlock_irqrestore(&tree->lock, flags); + spin_unlock_irqrestore(&tree->lock, flags); if (prealloc) free_extent_state(prealloc); @@ -718,7 +713,7 @@ out: search_again: if (start > end) goto out; - write_unlock_irqrestore(&tree->lock, flags); + spin_unlock_irqrestore(&tree->lock, flags); if (mask & __GFP_WAIT) cond_resched(); goto again; @@ -817,10 +812,6 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(wait_on_extent_writeback); -/* - * locks a range in ascending order, waiting for any locked regions - * it hits on the way. [start,end] are inclusive, and this will sleep. - */ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { int err; @@ -896,7 +887,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, struct extent_state *state; int ret = 1; - read_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -919,7 +910,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, break; } out: - read_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return ret; } EXPORT_SYMBOL(find_first_extent_bit); @@ -933,7 +924,7 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, u64 found = 0; u64 total_bytes = 0; - write_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -976,9 +967,9 @@ search_again: atomic_inc(&state->refs); prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); - write_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); schedule(); - write_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); finish_wait(&state->wq, &wait); free_extent_state(state); goto search_again; @@ -997,7 +988,7 @@ search_again: break; } out: - write_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return found; } @@ -1017,7 +1008,7 @@ u64 count_range_bits(struct extent_io_tree *tree, return 0; } - write_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); if (cur_start == 0 && bits == EXTENT_DIRTY) { total_bytes = tree->dirty_bytes; goto out; @@ -1050,7 +1041,7 @@ u64 count_range_bits(struct extent_io_tree *tree, break; } out: - write_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return total_bytes; } /* @@ -1122,7 +1113,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) struct extent_state *state; int ret = 0; - write_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -1139,7 +1130,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) } state->private = private; out: - write_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return ret; } @@ -1149,7 +1140,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) struct extent_state *state; int ret = 0; - read_lock_irq(&tree->lock); + spin_lock_irq(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -1166,13 +1157,13 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) } *private = state->private; out: - read_unlock_irq(&tree->lock); + spin_unlock_irq(&tree->lock); return ret; } /* * searches a range in the state tree for a given mask. - * If 'filled' == 1, this returns 1 only if ever extent in the tree + * If 'filled' == 1, this returns 1 only if every extent in the tree * has the bits set. Otherwise, 1 is returned if any bit in the * range is found set. */ @@ -1184,7 +1175,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int bitset = 0; unsigned long flags; - read_lock_irqsave(&tree->lock, flags); + spin_lock_irqsave(&tree->lock, flags); node = tree_search(&tree->state, start); while (node && start <= end) { state = rb_entry(node, struct extent_state, rb_node); @@ -1215,7 +1206,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, break; } } - read_unlock_irqrestore(&tree->lock, flags); + spin_unlock_irqrestore(&tree->lock, flags); return bitset; } EXPORT_SYMBOL(test_range_bit); @@ -1282,16 +1273,19 @@ static int end_bio_extent_writepage(struct bio *bio, { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree = bio->bi_private; + struct extent_state *state = bio->bi_private; + struct extent_io_tree *tree = state->tree; + struct rb_node *node; u64 start; u64 end; + u64 cur; int whole_page; + unsigned long flags; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) if (bio->bi_size) return 1; #endif - do { struct page *page = bvec->bv_page; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1311,16 +1305,80 @@ static int end_bio_extent_writepage(struct bio *bio, ClearPageUptodate(page); SetPageError(page); } - clear_extent_writeback(tree, start, end, GFP_ATOMIC); + + if (tree->ops && tree->ops->writepage_end_io_hook) { + tree->ops->writepage_end_io_hook(page, start, end, + state); + } + + /* + * bios can get merged in funny ways, and so we need to + * be careful with the state variable. We know the + * state won't be merged with others because it has + * WRITEBACK set, but we can't be sure each biovec is + * sequential in the file. So, if our cached state + * doesn't match the expected end, search the tree + * for the correct one. + */ + + spin_lock_irqsave(&tree->lock, flags); + if (!state || state->end != end) { + state = NULL; + node = __tree_search(&tree->state, start, NULL, NULL); + if (node) { + state = rb_entry(node, struct extent_state, + rb_node); + if (state->end != end || + !(state->state & EXTENT_WRITEBACK)) + state = NULL; + } + if (!state) { + spin_unlock_irqrestore(&tree->lock, flags); + clear_extent_writeback(tree, start, + end, GFP_ATOMIC); + goto next_io; + } + } + cur = end; + while(1) { + struct extent_state *clear = state; + cur = state->start; + node = rb_prev(&state->rb_node); + if (node) { + state = rb_entry(node, + struct extent_state, + rb_node); + } else { + state = NULL; + } + + clear_state_bit(tree, clear, EXTENT_WRITEBACK, + 1, 0); + if (cur == start) + break; + if (cur < start) { + WARN_ON(1); + break; + } + if (!node) + break; + } + /* before releasing the lock, make sure the next state + * variable has the expected bits set and corresponds + * to the correct offsets in the file + */ + if (state && (state->end + 1 != start || + !state->state & EXTENT_WRITEBACK)) { + state = NULL; + } + spin_unlock_irqrestore(&tree->lock, flags); +next_io: if (whole_page) end_page_writeback(page); else check_page_writeback(tree, page); - if (tree->ops && tree->ops->writepage_end_io_hook) - tree->ops->writepage_end_io_hook(page, start, end); } while (bvec >= bio->bi_io_vec); - bio_put(bio); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) return 0; @@ -1347,9 +1405,13 @@ static int end_bio_extent_readpage(struct bio *bio, { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree = bio->bi_private; + struct extent_state *state = bio->bi_private; + struct extent_io_tree *tree = state->tree; + struct rb_node *node; u64 start; u64 end; + u64 cur; + unsigned long flags; int whole_page; int ret; @@ -1373,27 +1435,83 @@ static int end_bio_extent_readpage(struct bio *bio, prefetchw(&bvec->bv_page->flags); if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { - ret = tree->ops->readpage_end_io_hook(page, start, end); + ret = tree->ops->readpage_end_io_hook(page, start, end, + state); if (ret) uptodate = 0; } - if (uptodate) { - set_extent_uptodate(tree, start, end, GFP_ATOMIC); - if (whole_page) - SetPageUptodate(page); - else - check_page_uptodate(tree, page); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - unlock_extent(tree, start, end, GFP_ATOMIC); + spin_lock_irqsave(&tree->lock, flags); + if (!state || state->end != end) { + state = NULL; + node = __tree_search(&tree->state, start, NULL, NULL); + if (node) { + state = rb_entry(node, struct extent_state, + rb_node); + if (state->end != end || + !(state->state & EXTENT_LOCKED)) + state = NULL; + } + if (!state) { + spin_unlock_irqrestore(&tree->lock, flags); + set_extent_uptodate(tree, start, end, + GFP_ATOMIC); + unlock_extent(tree, start, end, GFP_ATOMIC); + goto next_io; + } + } - if (whole_page) + cur = end; + while(1) { + struct extent_state *clear = state; + cur = state->start; + node = rb_prev(&state->rb_node); + if (node) { + state = rb_entry(node, + struct extent_state, + rb_node); + } else { + state = NULL; + } + clear->state |= EXTENT_UPTODATE; + clear_state_bit(tree, clear, EXTENT_LOCKED, + 1, 0); + if (cur == start) + break; + if (cur < start) { + WARN_ON(1); + break; + } + if (!node) + break; + } + /* before releasing the lock, make sure the next state + * variable has the expected bits set and corresponds + * to the correct offsets in the file + */ + if (state && (state->end + 1 != start || + !state->state & EXTENT_WRITEBACK)) { + state = NULL; + } + spin_unlock_irqrestore(&tree->lock, flags); +next_io: + if (whole_page) { + if (uptodate) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } unlock_page(page); - else + } else { + if (uptodate) { + check_page_uptodate(tree, page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } check_page_locked(tree, page); + } } while (bvec >= bio->bi_io_vec); bio_put(bio); @@ -1416,7 +1534,8 @@ static int end_bio_extent_preparewrite(struct bio *bio, { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_io_tree *tree = bio->bi_private; + struct extent_state *state = bio->bi_private; + struct extent_io_tree *tree = state->tree; u64 start; u64 end; @@ -1475,6 +1594,29 @@ static int submit_one_bio(int rw, struct bio *bio) { u64 maxsector; int ret = 0; + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct page *page = bvec->bv_page; + struct extent_io_tree *tree = bio->bi_private; + struct rb_node *node; + struct extent_state *state; + u64 start; + u64 end; + + start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; + end = start + bvec->bv_len - 1; + + spin_lock_irq(&tree->lock); + node = __tree_search(&tree->state, start, NULL, NULL); + BUG_ON(!node); + state = rb_entry(node, struct extent_state, rb_node); + while(state->end < end) { + node = rb_next(node); + state = rb_entry(node, struct extent_state, rb_node); + } + BUG_ON(state->end != end); + spin_unlock_irq(&tree->lock); + + bio->bi_private = state; bio_get(bio); @@ -1519,9 +1661,12 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, if (!bio) { printk("failed to allocate bio nr %d\n", nr); } + + bio_add_page(bio, page, size, offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; + if (bio_ret) { *bio_ret = bio; } else { @@ -1635,6 +1780,16 @@ static int __extent_read_full_page(struct extent_io_tree *tree, page_offset += iosize; continue; } + /* we have an inline extent but it didn't get marked up + * to date. Error out + */ + if (block_start == EXTENT_MAP_INLINE) { + SetPageError(page); + unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + cur = cur + iosize; + page_offset += iosize; + continue; + } ret = 0; if (tree->ops && tree->ops->readpage_io_hook) { @@ -2205,7 +2360,8 @@ EXPORT_SYMBOL(extent_prepare_write); * map records are removed */ int try_release_extent_mapping(struct extent_map_tree *map, - struct extent_io_tree *tree, struct page *page) + struct extent_io_tree *tree, struct page *page, + gfp_t mask) { struct extent_map *em; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -2213,30 +2369,42 @@ int try_release_extent_mapping(struct extent_map_tree *map, u64 orig_start = start; int ret = 1; - while (start <= end) { - spin_lock(&map->lock); - em = lookup_extent_mapping(map, start, end); - if (!em || IS_ERR(em)) { + if ((mask & __GFP_WAIT) && + page->mapping->host->i_size > 16 * 1024 * 1024) { + while (start <= end) { + spin_lock(&map->lock); + em = lookup_extent_mapping(map, start, end); + if (!em || IS_ERR(em)) { + spin_unlock(&map->lock); + break; + } + if (em->start != start) { + spin_unlock(&map->lock); + free_extent_map(em); + break; + } + if (!test_range_bit(tree, em->start, + extent_map_end(em) - 1, + EXTENT_LOCKED, 0)) { + remove_extent_mapping(map, em); + /* once for the rb tree */ + free_extent_map(em); + } + start = extent_map_end(em); spin_unlock(&map->lock); - break; - } - if (!test_range_bit(tree, em->start, extent_map_end(em) - 1, - EXTENT_LOCKED, 0)) { - remove_extent_mapping(map, em); - /* once for the rb tree */ + + /* once for us */ free_extent_map(em); } - start = extent_map_end(em); - spin_unlock(&map->lock); - - /* once for us */ - free_extent_map(em); } - if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0)) + if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0)) ret = 0; - else + else { + if ((mask & GFP_NOFS) == GFP_NOFS) + mask = GFP_NOFS; clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, - 1, 1, GFP_NOFS); + 1, 1, mask); + } return ret; } EXPORT_SYMBOL(try_release_extent_mapping); @@ -2553,13 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, } } clear_page_dirty_for_io(page); - write_lock_irq(&page->mapping->tree_lock); + read_lock_irq(&page->mapping->tree_lock); if (!PageDirty(page)) { radix_tree_tag_clear(&page->mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } - write_unlock_irq(&page->mapping->tree_lock); + read_unlock_irq(&page->mapping->tree_lock); unlock_page(page); } return 0; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 06be1fe84b2..f1e8bf251f3 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -23,19 +23,23 @@ #define EXTENT_PAGE_PRIVATE 1 #define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3 +struct extent_state; + struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end); - void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end); + int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state); + void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state); }; struct extent_io_tree { struct rb_root state; struct address_space *mapping; u64 dirty_bytes; - rwlock_t lock; + spinlock_t lock; struct extent_io_ops *ops; spinlock_t lru_lock; struct list_head buffer_lru; @@ -45,8 +49,8 @@ struct extent_io_tree { struct extent_state { u64 start; u64 end; /* inclusive */ - int in_tree; struct rb_node rb_node; + struct extent_io_tree *tree; wait_queue_head_t wq; atomic_t refs; unsigned long state; @@ -82,7 +86,8 @@ void extent_io_tree_init(struct extent_io_tree *tree, struct address_space *mapping, gfp_t mask); void extent_io_tree_empty_lru(struct extent_io_tree *tree); int try_release_extent_mapping(struct extent_map_tree *map, - struct extent_io_tree *tree, struct page *page); + struct extent_io_tree *tree, struct page *page, + gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 48455127410..bac8722e14e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -331,7 +331,8 @@ out: return ret; } -int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end) +int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, + struct extent_state *state) { size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); struct inode *inode = page->mapping->host; @@ -347,7 +348,12 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end) btrfs_test_flag(inode, NODATASUM)) return 0; - ret = get_state_private(io_tree, start, &private); + if (state->start == start) { + private = state->private; + ret = 0; + } else { + ret = get_state_private(io_tree, start, &private); + } local_irq_save(flags); kaddr = kmap_atomic(page, KM_IRQ0); if (ret) { @@ -1830,7 +1836,7 @@ out_unlock: } struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 len, + size_t pg_offset, u64 start, u64 len, int create) { int ret; @@ -1865,7 +1871,10 @@ again: start, len, em->start, em->len); WARN_ON(1); } - goto out; + if (em->block_start == EXTENT_MAP_INLINE && page) + free_extent_map(em); + else + goto out; } em = alloc_extent_map(GFP_NOFS); if (!em) { @@ -1930,6 +1939,7 @@ again: em->len = extent_end - extent_start; goto insert; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { + u64 page_start; unsigned long ptr; char *map; size_t size; @@ -1959,16 +1969,17 @@ again: goto out; } - extent_offset = ((u64)page->index << PAGE_CACHE_SHIFT) - - extent_start + page_offset; - copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset, + page_start = page_offset(page) + pg_offset; + extent_offset = page_start - extent_start; + copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, size - extent_offset); em->start = extent_start + extent_offset; - em->len = copy_size; + em->len = (copy_size + root->sectorsize - 1) & + ~((u64)root->sectorsize - 1); map = kmap(page); ptr = btrfs_file_extent_inline_start(item) + extent_offset; if (create == 0 && !PageUptodate(page)) { - read_extent_buffer(leaf, map + page_offset, ptr, + read_extent_buffer(leaf, map + pg_offset, ptr, copy_size); flush_dcache_page(page); } else if (create && PageUptodate(page)) { @@ -1980,7 +1991,7 @@ again: trans = btrfs_start_transaction(root, 1); goto again; } - write_extent_buffer(leaf, map + page_offset, ptr, + write_extent_buffer(leaf, map + pg_offset, ptr, copy_size); btrfs_mark_buffer_dirty(leaf); } @@ -2077,7 +2088,7 @@ btrfs_readpages(struct file *file, struct address_space *mapping, btrfs_get_extent); } -static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags) +static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) { struct extent_io_tree *tree; struct extent_map_tree *map; @@ -2085,7 +2096,7 @@ static int btrfs_releasepage(struct page *page, gfp_t unused_gfp_flags) tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; - ret = try_release_extent_mapping(map, tree, page); + ret = try_release_extent_mapping(map, tree, page, gfp_flags); if (ret == 1) { ClearPagePrivate(page); set_page_private(page, 0); -- cgit v1.2.3-70-g09d2 From 85e21bac165b4ba1f6f90431ad6fc658ffcbaf3a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 29 Jan 2008 15:11:36 -0500 Subject: Btrfs: During deletes and truncate, remove many items at once from the tree Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 38 +++++++++-------- fs/btrfs/ctree.h | 12 +++++- fs/btrfs/extent_io.c | 1 - fs/btrfs/inode.c | 114 +++++++++++++++++++++++++++++---------------------- 4 files changed, 95 insertions(+), 70 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 43d23148a4f..84ad53e06b3 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2514,34 +2514,36 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, * delete the item at the leaf level in path. If that empties * the leaf, remove it from the tree */ -int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_path *path) +int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct btrfs_path *path, int slot, int nr) { - int slot; struct extent_buffer *leaf; struct btrfs_item *item; - int doff; - int dsize; + int last_off; + int dsize = 0; int ret = 0; int wret; + int i; u32 nritems; leaf = path->nodes[0]; - slot = path->slots[0]; - doff = btrfs_item_offset_nr(leaf, slot); - dsize = btrfs_item_size_nr(leaf, slot); + last_off = btrfs_item_offset_nr(leaf, slot + nr - 1); + + for (i = 0; i < nr; i++) + dsize += btrfs_item_size_nr(leaf, slot + i); + nritems = btrfs_header_nritems(leaf); - if (slot != nritems - 1) { + if (slot + nr != nritems) { int i; int data_end = leaf_data_end(root, leaf); memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + data_end + dsize, btrfs_leaf_data(leaf) + data_end, - doff - data_end); + last_off - data_end); - for (i = slot + 1; i < nritems; i++) { + for (i = slot + nr; i < nritems; i++) { u32 ioff; item = btrfs_item_nr(leaf, i); @@ -2562,12 +2564,12 @@ int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, } memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot), - btrfs_item_nr_offset(slot + 1), + btrfs_item_nr_offset(slot + nr), sizeof(struct btrfs_item) * - (nritems - slot - 1)); + (nritems - slot - nr)); } - btrfs_set_header_nritems(leaf, nritems - 1); - nritems--; + btrfs_set_header_nritems(leaf, nritems - nr); + nritems -= nr; /* delete the leaf if we've emptied it */ if (nritems == 0) { @@ -2600,7 +2602,7 @@ int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, } /* delete the leaf if it is mostly empty */ - if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) { + if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) { /* push_leaf_left fixes the path. * make sure the path still points to our leaf * for possible call to del_ptr below @@ -2608,13 +2610,13 @@ int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, slot = path->slots[1]; extent_buffer_get(leaf); - wret = push_leaf_right(trans, root, path, 1, 1); + wret = push_leaf_left(trans, root, path, 1, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (path->nodes[0] == leaf && btrfs_header_nritems(leaf)) { - wret = push_leaf_left(trans, root, path, 1, 1); + wret = push_leaf_right(trans, root, path, 1, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6c65473e0fe..098cf088315 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1038,8 +1038,16 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); struct btrfs_path *btrfs_alloc_path(void); void btrfs_free_path(struct btrfs_path *p); void btrfs_init_path(struct btrfs_path *p); -int btrfs_del_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_path *path); +int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct btrfs_path *path, int slot, int nr); + +static inline int btrfs_del_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path) +{ + return btrfs_del_items(trans, root, path, path->slots[0], 1); +} + int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, void *data, u32 data_size); int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, struct btrfs_root diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1f734c34dc2..8aec72253a1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2863,7 +2863,6 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (ret || !wait) { return ret; } - for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bac8722e14e..0a2fe51c412 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -692,27 +692,6 @@ fail: return err; } -static int btrfs_free_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode) -{ - struct btrfs_path *path; - int ret; - - clear_inode(inode); - - path = btrfs_alloc_path(); - BUG_ON(!path); - ret = btrfs_lookup_inode(trans, root, path, - &BTRFS_I(inode)->location, -1); - if (ret > 0) - ret = -ENOENT; - if (!ret) - ret = btrfs_del_item(trans, root, path); - btrfs_free_path(path); - return ret; -} - /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find @@ -723,7 +702,8 @@ static int btrfs_free_inode(struct btrfs_trans_handle *trans, */ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct inode *inode) + struct inode *inode, + u32 min_type) { int ret; struct btrfs_path *path; @@ -739,6 +719,8 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, u64 root_owner = 0; int found_extent; int del_item; + int pending_del_nr = 0; + int pending_del_slot = 0; int extent_type = -1; btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1); @@ -751,17 +733,19 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, key.offset = (u64)-1; key.type = (u8)-1; + btrfs_init_path(path); +search_again: + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret < 0) { + goto error; + } + if (ret > 0) { + BUG_ON(path->slots[0] == 0); + path->slots[0]--; + } + while(1) { - btrfs_init_path(path); fi = NULL; - ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) { - goto error; - } - if (ret > 0) { - BUG_ON(path->slots[0] == 0); - path->slots[0]--; - } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); @@ -769,10 +753,7 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, if (found_key.objectid != inode->i_ino) break; - if (found_type != BTRFS_CSUM_ITEM_KEY && - found_type != BTRFS_DIR_ITEM_KEY && - found_type != BTRFS_DIR_INDEX_KEY && - found_type != BTRFS_EXTENT_DATA_KEY) + if (found_type < min_type) break; item_end = found_key.offset; @@ -801,14 +782,17 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, found_type = BTRFS_INODE_ITEM_KEY; } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { found_type = BTRFS_CSUM_ITEM_KEY; + } else if (found_type == BTRFS_EXTENT_DATA_KEY) { + found_type = BTRFS_XATTR_ITEM_KEY; + } else if (found_type == BTRFS_XATTR_ITEM_KEY) { + found_type = BTRFS_INODE_REF_KEY; } else if (found_type) { found_type--; } else { break; } btrfs_set_key_type(&key, found_type); - btrfs_release_path(root, path); - continue; + goto next; } if (found_key.offset >= inode->i_size) del_item = 1; @@ -860,13 +844,21 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, } delete: if (del_item) { - ret = btrfs_del_item(trans, root, path); - if (ret) - goto error; + if (!pending_del_nr) { + /* no pending yet, add ourselves */ + pending_del_slot = path->slots[0]; + pending_del_nr = 1; + } else if (pending_del_nr && + path->slots[0] + 1 == pending_del_slot) { + /* hop on the pending chunk */ + pending_del_nr++; + pending_del_slot = path->slots[0]; + } else { + printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); + } } else { break; } - btrfs_release_path(root, path); if (found_extent) { ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, @@ -875,9 +867,36 @@ delete: found_key.offset, 0); BUG_ON(ret); } +next: + if (path->slots[0] == 0) { + if (pending_del_nr) + goto del_pending; + btrfs_release_path(root, path); + goto search_again; + } + + path->slots[0]--; + if (pending_del_nr && + path->slots[0] + 1 != pending_del_slot) { + struct btrfs_key debug; +del_pending: + btrfs_item_key_to_cpu(path->nodes[0], &debug, + pending_del_slot); + ret = btrfs_del_items(trans, root, path, + pending_del_slot, + pending_del_nr); + BUG_ON(ret); + pending_del_nr = 0; + btrfs_release_path(root, path); + goto search_again; + } } ret = 0; error: + if (pending_del_nr) { + ret = btrfs_del_items(trans, root, path, pending_del_slot, + pending_del_nr); + } btrfs_release_path(root, path); btrfs_free_path(path); inode->i_sb->s_dirt = 1; @@ -1067,16 +1086,12 @@ void btrfs_delete_inode(struct inode *inode) trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); - ret = btrfs_truncate_in_trans(trans, root, inode); - if (ret) - goto no_delete_lock; - ret = btrfs_delete_xattrs(trans, root, inode); - if (ret) - goto no_delete_lock; - ret = btrfs_free_inode(trans, root, inode); + ret = btrfs_truncate_in_trans(trans, root, inode, 0); if (ret) goto no_delete_lock; + nr = trans->blocks_used; + clear_inode(inode); btrfs_end_transaction(trans, root); mutex_unlock(&root->fs_info->fs_mutex); @@ -2190,7 +2205,8 @@ static void btrfs_truncate(struct inode *inode) btrfs_set_trans_block_group(trans, inode); /* FIXME, add redo link to tree so we don't leak on crash */ - ret = btrfs_truncate_in_trans(trans, root, inode); + ret = btrfs_truncate_in_trans(trans, root, inode, + BTRFS_EXTENT_DATA_KEY); btrfs_update_inode(trans, root, inode); nr = trans->blocks_used; -- cgit v1.2.3-70-g09d2 From 291d673e6a22d9c6834e939f66c7cfef90669021 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 29 Jan 2008 15:55:23 -0500 Subject: Btrfs: Do delalloc accounting via hooks in the extent_state code Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 6 ----- fs/btrfs/extent_io.c | 25 +++++++++++++++++++++ fs/btrfs/extent_io.h | 4 ++++ fs/btrfs/file.c | 4 ---- fs/btrfs/inode.c | 60 ++++++++++++++++++++------------------------------ 5 files changed, 53 insertions(+), 46 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c4fe37791d3..90718f77bbc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2180,7 +2180,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, unsigned long last_index; unsigned long i; struct page *page; - struct btrfs_root *root = BTRFS_I(inode)->root; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct file_ra_state *ra; @@ -2220,11 +2219,6 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start, set_extent_delalloc(io_tree, page_start, page_end, GFP_NOFS); - spin_lock(&root->fs_info->delalloc_lock); - root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - - existing_delalloc; - spin_unlock(&root->fs_info->delalloc_lock); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); set_page_dirty(page); unlock_page(page); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8aec72253a1..e81b35b0618 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -256,6 +256,26 @@ static int merge_state(struct extent_io_tree *tree, return 0; } +static void set_state_cb(struct extent_io_tree *tree, + struct extent_state *state, + unsigned long bits) +{ + if (tree->ops && tree->ops->set_bit_hook) { + tree->ops->set_bit_hook(tree->mapping->host, state->start, + state->end, bits); + } +} + +static void clear_state_cb(struct extent_io_tree *tree, + struct extent_state *state, + unsigned long bits) +{ + if (tree->ops && tree->ops->set_bit_hook) { + tree->ops->clear_bit_hook(tree->mapping->host, state->start, + state->end, bits); + } +} + /* * insert an extent_state struct into the tree. 'bits' are set on the * struct before it is inserted. @@ -281,6 +301,7 @@ static int insert_state(struct extent_io_tree *tree, state->state |= bits; state->start = start; state->end = end; + set_state_cb(tree, state, bits); node = tree_insert(&tree->state, end, &state->rb_node); if (node) { struct extent_state *found; @@ -349,6 +370,7 @@ static int clear_state_bit(struct extent_io_tree *tree, tree->dirty_bytes -= range; } state->state &= ~bits; + clear_state_cb(tree, state, bits); if (wake) wake_up(&state->wq); if (delete || state->state == 0) { @@ -553,6 +575,7 @@ static void set_state_bits(struct extent_io_tree *tree, tree->dirty_bytes += range; } state->state |= bits; + set_state_cb(tree, state, bits); } /* @@ -975,6 +998,7 @@ search_again: goto search_again; } state->state |= EXTENT_LOCKED; + set_state_cb(tree, state, EXTENT_LOCKED); if (!found) *start = state->start; found++; @@ -1474,6 +1498,7 @@ static int end_bio_extent_readpage(struct bio *bio, state = NULL; } clear->state |= EXTENT_UPTODATE; + set_state_cb(tree, clear, EXTENT_UPTODATE); clear_state_bit(tree, clear, EXTENT_LOCKED, 1, 0); if (cur == start) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f1e8bf251f3..a96c5a14134 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -33,6 +33,10 @@ struct extent_io_ops { struct extent_state *state); void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); + int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, + unsigned long bits); + int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, + unsigned long bits); }; struct extent_io_tree { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c5bb00f9239..8e210616d70 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -323,10 +323,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, } set_extent_delalloc(io_tree, start_pos, end_of_last_block, GFP_NOFS); - spin_lock(&root->fs_info->delalloc_lock); - root->fs_info->delalloc_bytes += (end_of_last_block + 1 - - start_pos) - existing_delalloc; - spin_unlock(&root->fs_info->delalloc_lock); btrfs_add_ordered_inode(inode); } else { u64 aligned_end; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 413b1012de5..5a38443a24e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -80,8 +80,6 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, u64 thresh; int ret = 0; - return 0; - if (for_del) thresh = total * 90; else @@ -249,7 +247,6 @@ not_found: static int run_delalloc_range(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes; int ret; mutex_lock(&root->fs_info->fs_mutex); if (btrfs_test_opt(root, NODATACOW) || @@ -258,20 +255,34 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) else ret = cow_file_range(inode, start, end); - spin_lock(&root->fs_info->delalloc_lock); - num_bytes = end + 1 - start; - if (root->fs_info->delalloc_bytes < num_bytes) { - printk("delalloc accounting error total %llu sub %llu\n", - root->fs_info->delalloc_bytes, num_bytes); - } else { - root->fs_info->delalloc_bytes -= num_bytes; - } - spin_unlock(&root->fs_info->delalloc_lock); - mutex_unlock(&root->fs_info->fs_mutex); return ret; } +int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, + unsigned long bits) +{ + if ((bits & EXTENT_DELALLOC)) { + struct btrfs_root *root = BTRFS_I(inode)->root; + spin_lock(&root->fs_info->delalloc_lock); + root->fs_info->delalloc_bytes += end - start + 1; + spin_unlock(&root->fs_info->delalloc_lock); + } + return 0; +} + +int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, + unsigned long bits) +{ + if ((bits & EXTENT_DELALLOC)) { + struct btrfs_root *root = BTRFS_I(inode)->root; + spin_lock(&root->fs_info->delalloc_lock); + root->fs_info->delalloc_bytes -= end - start + 1; + spin_unlock(&root->fs_info->delalloc_lock); + } + return 0; +} + int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end) { struct inode *inode = page->mapping->host; @@ -908,28 +919,17 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page, { char *kaddr; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_root *root = BTRFS_I(inode)->root; u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - u64 existing_delalloc; - u64 delalloc_start; int ret = 0; WARN_ON(!PageLocked(page)); set_page_extent_mapped(page); lock_extent(io_tree, page_start, page_end, GFP_NOFS); - delalloc_start = page_start; - existing_delalloc = count_range_bits(&BTRFS_I(inode)->io_tree, - &delalloc_start, page_end, - PAGE_CACHE_SIZE, EXTENT_DELALLOC); set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); - spin_lock(&root->fs_info->delalloc_lock); - root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - existing_delalloc; - spin_unlock(&root->fs_info->delalloc_lock); - if (zero_start != PAGE_CACHE_SIZE) { kaddr = kmap(page); memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); @@ -2456,8 +2456,6 @@ int btrfs_defrag_file(struct file *file) { unsigned long ra_index = 0; u64 page_start; u64 page_end; - u64 delalloc_start; - u64 existing_delalloc; unsigned long i; int ret; @@ -2491,19 +2489,9 @@ int btrfs_defrag_file(struct file *file) { page_end = page_start + PAGE_CACHE_SIZE - 1; lock_extent(io_tree, page_start, page_end, GFP_NOFS); - delalloc_start = page_start; - existing_delalloc = - count_range_bits(&BTRFS_I(inode)->io_tree, - &delalloc_start, page_end, - PAGE_CACHE_SIZE, EXTENT_DELALLOC); set_extent_delalloc(io_tree, page_start, page_end, GFP_NOFS); - spin_lock(&root->fs_info->delalloc_lock); - root->fs_info->delalloc_bytes += PAGE_CACHE_SIZE - - existing_delalloc; - spin_unlock(&root->fs_info->delalloc_lock); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); set_page_dirty(page); unlock_page(page); -- cgit v1.2.3-70-g09d2 From 6f568d35a045dbb8a13fe71bfc32e85e39a986cb Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 29 Jan 2008 16:03:38 -0500 Subject: Btrfs: mount -o max_inline=size to control the maximum inline extent size Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/extent_io.c | 1 - fs/btrfs/file.c | 3 ++- fs/btrfs/super.c | 19 ++++++++++++++++++- 5 files changed, 22 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 127c86f795d..8f93e066bf8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -325,6 +325,7 @@ struct btrfs_fs_info { u64 last_trans_committed; unsigned long mount_opt; u64 max_extent; + u64 max_inline; u64 alloc_start; struct btrfs_transaction *running_transaction; struct btrfs_super_block super_copy; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e29a0716f6a..6647b90ef74 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -659,6 +659,7 @@ struct btrfs_root *open_ctree(struct super_block *sb) fs_info->throttles = 0; fs_info->mount_opt = 0; fs_info->max_extent = (u64)-1; + fs_info->max_inline = 8192 * 1024; fs_info->delalloc_bytes = 0; fs_info->btree_inode = new_inode(sb); fs_info->btree_inode->i_ino = 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e81b35b0618..6999b819db6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2451,7 +2451,6 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, goto out; sector = (em->block_start + start - em->start) >> inode->i_blkbits; -printk("bmap finds %Lu %Lu block %Lu\n", em->start, em->len, em->block_start); out: free_extent_map(em); return sector; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8e210616d70..7c10a90362f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -301,7 +301,8 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, */ inline_size = end_pos; if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) || - inline_size > 8192 || + inline_size > root->fs_info->max_inline || + (inline_size & (root->sectorsize -1)) == 0 || inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { u64 last_end; u64 existing_delalloc = 0; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f8a1016600b..a46300c4753 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -64,7 +64,7 @@ static void btrfs_put_super (struct super_block * sb) enum { Opt_subvol, Opt_nodatasum, Opt_nodatacow, Opt_max_extent, - Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_err, + Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_err, }; static match_table_t tokens = { @@ -73,6 +73,7 @@ static match_table_t tokens = { {Opt_nodatacow, "nodatacow"}, {Opt_nobarrier, "nobarrier"}, {Opt_max_extent, "max_extent=%s"}, + {Opt_max_inline, "max_inline=%s"}, {Opt_alloc_start, "alloc_start=%s"}, {Opt_ssd, "ssd"}, {Opt_err, NULL} @@ -178,6 +179,22 @@ static int parse_options (char * options, } } break; + case Opt_max_inline: + if (info) { + char *num = match_strdup(&args[0]); + if (num) { + info->max_inline = + btrfs_parse_size(num); + kfree(num); + + info->max_inline = max_t(u64, + info->max_inline, + root->sectorsize); + printk("btrfs: max_inline at %Lu\n", + info->max_inline); + } + } + break; case Opt_alloc_start: if (info) { char *num = match_strdup(&args[0]); -- cgit v1.2.3-70-g09d2 From b0c68f8bed058d9f2023b067b16ed06a8c439544 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 31 Jan 2008 11:05:37 -0500 Subject: Btrfs: Enable delalloc accounting Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 ++++ fs/btrfs/extent_io.c | 14 +++++++------- fs/btrfs/extent_io.h | 4 ++-- fs/btrfs/inode.c | 18 +++++++++++++----- 4 files changed, 26 insertions(+), 14 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6647b90ef74..ad72e219201 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -861,6 +861,10 @@ int close_ctree(struct btrfs_root *root) write_ctree_super(NULL, root); mutex_unlock(&fs_info->fs_mutex); + if (fs_info->delalloc_bytes) { + printk("btrfs: at unmount delalloc count %Lu\n", + fs_info->delalloc_bytes); + } if (fs_info->extent_root->node) free_extent_buffer(fs_info->extent_root->node); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6999b819db6..624aabc3e6f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -262,7 +262,7 @@ static void set_state_cb(struct extent_io_tree *tree, { if (tree->ops && tree->ops->set_bit_hook) { tree->ops->set_bit_hook(tree->mapping->host, state->start, - state->end, bits); + state->end, state->state, bits); } } @@ -272,7 +272,7 @@ static void clear_state_cb(struct extent_io_tree *tree, { if (tree->ops && tree->ops->set_bit_hook) { tree->ops->clear_bit_hook(tree->mapping->host, state->start, - state->end, bits); + state->end, state->state, bits); } } @@ -298,10 +298,10 @@ static int insert_state(struct extent_io_tree *tree, } if (bits & EXTENT_DIRTY) tree->dirty_bytes += end - start + 1; + set_state_cb(tree, state, bits); state->state |= bits; state->start = start; state->end = end; - set_state_cb(tree, state, bits); node = tree_insert(&tree->state, end, &state->rb_node); if (node) { struct extent_state *found; @@ -369,8 +369,8 @@ static int clear_state_bit(struct extent_io_tree *tree, WARN_ON(range > tree->dirty_bytes); tree->dirty_bytes -= range; } - state->state &= ~bits; clear_state_cb(tree, state, bits); + state->state &= ~bits; if (wake) wake_up(&state->wq); if (delete || state->state == 0) { @@ -574,8 +574,8 @@ static void set_state_bits(struct extent_io_tree *tree, u64 range = state->end - state->start + 1; tree->dirty_bytes += range; } - state->state |= bits; set_state_cb(tree, state, bits); + state->state |= bits; } /* @@ -997,8 +997,8 @@ search_again: free_extent_state(state); goto search_again; } - state->state |= EXTENT_LOCKED; set_state_cb(tree, state, EXTENT_LOCKED); + state->state |= EXTENT_LOCKED; if (!found) *start = state->start; found++; @@ -1497,8 +1497,8 @@ static int end_bio_extent_readpage(struct bio *bio, } else { state = NULL; } - clear->state |= EXTENT_UPTODATE; set_state_cb(tree, clear, EXTENT_UPTODATE); + clear->state |= EXTENT_UPTODATE; clear_state_bit(tree, clear, EXTENT_LOCKED, 1, 0); if (cur == start) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index a96c5a14134..6fd5e2c9061 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -34,9 +34,9 @@ struct extent_io_ops { void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, - unsigned long bits); + unsigned long old, unsigned long bits); int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, - unsigned long bits); + unsigned long old, unsigned long bits); }; struct extent_io_tree { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bcf3b35fb65..f13b1dbc061 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -260,9 +260,9 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) } int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, - unsigned long bits) + unsigned long old, unsigned long bits) { - if ((bits & EXTENT_DELALLOC)) { + if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; spin_lock(&root->fs_info->delalloc_lock); root->fs_info->delalloc_bytes += end - start + 1; @@ -272,12 +272,18 @@ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, } int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, - unsigned long bits) + unsigned long old, unsigned long bits) { - if ((bits & EXTENT_DELALLOC)) { + if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; spin_lock(&root->fs_info->delalloc_lock); - root->fs_info->delalloc_bytes -= end - start + 1; + if (end - start + 1 > root->fs_info->delalloc_bytes) { + printk("warning: delalloc account %Lu %Lu\n", + end - start + 1, root->fs_info->delalloc_bytes); + root->fs_info->delalloc_bytes = 0; + } else { + root->fs_info->delalloc_bytes -= end - start + 1; + } spin_unlock(&root->fs_info->delalloc_lock); } return 0; @@ -3002,6 +3008,8 @@ static struct extent_io_ops btrfs_extent_io_ops = { .writepage_io_hook = btrfs_writepage_io_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .set_bit_hook = btrfs_set_bit_hook, + .clear_bit_hook = btrfs_clear_bit_hook, }; static struct address_space_operations btrfs_aops = { -- cgit v1.2.3-70-g09d2 From 80ea96b1f3bd2431e0d71c9df6ab45c3de0c5840 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 1 Feb 2008 14:51:59 -0500 Subject: Btrfs: Add a lookup cache to the extent state tree Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 57 ++++++++++++++++++++++++++++++++++++---------------- fs/btrfs/extent_io.h | 1 + 2 files changed, 41 insertions(+), 17 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 624aabc3e6f..5f2fbf2054f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -90,6 +90,7 @@ void extent_io_tree_init(struct extent_io_tree *tree, tree->mapping = mapping; INIT_LIST_HEAD(&tree->buffer_lru); tree->lru_size = 0; + tree->last = NULL; } EXPORT_SYMBOL(extent_io_tree_init); @@ -158,16 +159,23 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset, return NULL; } -static struct rb_node *__tree_search(struct rb_root *root, u64 offset, +static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) { + struct rb_root *root = &tree->state; struct rb_node * n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct tree_entry *entry; struct tree_entry *prev_entry = NULL; + if (tree->last) { + struct extent_state *state; + state = tree->last; + if (state->start <= offset && offset <= state->end) + return &tree->last->rb_node; + } while(n) { entry = rb_entry(n, struct tree_entry, rb_node); prev = n; @@ -177,8 +185,10 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, n = n->rb_left; else if (offset > entry->end) n = n->rb_right; - else + else { + tree->last = rb_entry(n, struct extent_state, rb_node); return n; + } } if (prev_ret) { @@ -202,14 +212,20 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, return NULL; } -static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) +static inline struct rb_node *tree_search(struct extent_io_tree *tree, + u64 offset) { struct rb_node *prev = NULL; struct rb_node *ret; - ret = __tree_search(root, offset, &prev, NULL); - if (!ret) + ret = __etree_search(tree, offset, &prev, NULL); + if (!ret) { + if (prev) { + tree->last = rb_entry(prev, struct extent_state, + rb_node); + } return prev; + } return ret; } @@ -238,6 +254,8 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { state->start = other->start; other->tree = NULL; + if (tree->last == other) + tree->last = NULL; rb_erase(&other->rb_node, &tree->state); free_extent_state(other); } @@ -249,6 +267,8 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { other->start = state->start; state->tree = NULL; + if (tree->last == state) + tree->last = NULL; rb_erase(&state->rb_node, &tree->state); free_extent_state(state); } @@ -311,6 +331,7 @@ static int insert_state(struct extent_io_tree *tree, return -EEXIST; } state->tree = tree; + tree->last = state; merge_state(tree, state); return 0; } @@ -375,6 +396,8 @@ static int clear_state_bit(struct extent_io_tree *tree, wake_up(&state->wq); if (delete || state->state == 0) { if (state->tree) { + if (tree->last == state) + tree->last = NULL; rb_erase(&state->rb_node, &tree->state); state->tree = NULL; free_extent_state(state); @@ -422,7 +445,7 @@ again: * this search will find the extents that end after * our range starts */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node) goto out; state = rb_entry(node, struct extent_state, rb_node); @@ -533,7 +556,7 @@ again: * this search will find all the extents that end after * our range starts */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node) break; @@ -612,7 +635,7 @@ again: * this search will find all the extents that end after * our range starts. */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node) { err = insert_state(tree, prealloc, start, end, bits); prealloc = NULL; @@ -915,7 +938,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, * this search will find all the extents that end after * our range starts. */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node || IS_ERR(node)) { goto out; } @@ -953,7 +976,7 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, * our range starts. */ search_again: - node = tree_search(&tree->state, cur_start); + node = tree_search(tree, cur_start); if (!node || IS_ERR(node)) { *end = (u64)-1; goto out; @@ -1041,7 +1064,7 @@ u64 count_range_bits(struct extent_io_tree *tree, * this search will find all the extents that end after * our range starts. */ - node = tree_search(&tree->state, cur_start); + node = tree_search(tree, cur_start); if (!node || IS_ERR(node)) { goto out; } @@ -1142,7 +1165,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) * this search will find all the extents that end after * our range starts. */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node || IS_ERR(node)) { ret = -ENOENT; goto out; @@ -1169,7 +1192,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) * this search will find all the extents that end after * our range starts. */ - node = tree_search(&tree->state, start); + node = tree_search(tree, start); if (!node || IS_ERR(node)) { ret = -ENOENT; goto out; @@ -1200,7 +1223,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, unsigned long flags; spin_lock_irqsave(&tree->lock, flags); - node = tree_search(&tree->state, start); + node = tree_search(tree, start); while (node && start <= end) { state = rb_entry(node, struct extent_state, rb_node); @@ -1348,7 +1371,7 @@ static int end_bio_extent_writepage(struct bio *bio, spin_lock_irqsave(&tree->lock, flags); if (!state || state->end != end) { state = NULL; - node = __tree_search(&tree->state, start, NULL, NULL); + node = __etree_search(tree, start, NULL, NULL); if (node) { state = rb_entry(node, struct extent_state, rb_node); @@ -1468,7 +1491,7 @@ static int end_bio_extent_readpage(struct bio *bio, spin_lock_irqsave(&tree->lock, flags); if (!state || state->end != end) { state = NULL; - node = __tree_search(&tree->state, start, NULL, NULL); + node = __etree_search(tree, start, NULL, NULL); if (node) { state = rb_entry(node, struct extent_state, rb_node); @@ -1631,7 +1654,7 @@ static int submit_one_bio(int rw, struct bio *bio) end = start + bvec->bv_len - 1; spin_lock_irq(&tree->lock); - node = __tree_search(&tree->state, start, NULL, NULL); + node = __etree_search(tree, start, NULL, NULL); BUG_ON(!node); state = rb_entry(node, struct extent_state, rb_node); while(state->end < end) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6fd5e2c9061..160da3f9d78 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -48,6 +48,7 @@ struct extent_io_tree { spinlock_t lru_lock; struct list_head buffer_lru; int lru_size; + struct extent_state *last; }; struct extent_state { -- cgit v1.2.3-70-g09d2 From ae9d12853b44f4e0c06732166e8e3399ec01a680 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 1 Feb 2008 15:42:15 -0500 Subject: Btrfs: Fix delalloc account on state deletion Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5f2fbf2054f..5d2fb527fd9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -396,6 +396,7 @@ static int clear_state_bit(struct extent_io_tree *tree, wake_up(&state->wq); if (delete || state->state == 0) { if (state->tree) { + clear_state_cb(tree, state, state->state); if (tree->last == state) tree->last = NULL; rb_erase(&state->rb_node, &tree->state); -- cgit v1.2.3-70-g09d2 From c2e639f02cff721172569add21ba5d058fd1a81c Mon Sep 17 00:00:00 2001 From: Yan Date: Mon, 4 Feb 2008 08:57:25 -0500 Subject: Btrfs: Fix typo in extent_io.c --- Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 4 ++-- fs/btrfs/inode.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5d2fb527fd9..36a943e5180 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1416,7 +1416,7 @@ static int end_bio_extent_writepage(struct bio *bio, * to the correct offsets in the file */ if (state && (state->end + 1 != start || - !state->state & EXTENT_WRITEBACK)) { + !(state->state & EXTENT_WRITEBACK))) { state = NULL; } spin_unlock_irqrestore(&tree->lock, flags); @@ -1539,7 +1539,7 @@ static int end_bio_extent_readpage(struct bio *bio, * to the correct offsets in the file */ if (state && (state->end + 1 != start || - !state->state & EXTENT_WRITEBACK)) { + !(state->state & EXTENT_LOCKED))) { state = NULL; } spin_unlock_irqrestore(&tree->lock, flags); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f13b1dbc061..44fc94fee2c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -364,8 +364,7 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, if (btrfs_test_opt(root, NODATASUM) || btrfs_test_flag(inode, NODATASUM)) return 0; - - if (state->start == start) { + if (state && state->start == start) { private = state->private; ret = 0; } else { -- cgit v1.2.3-70-g09d2 From 961d023213b94861480f5d6533f7adc8332eb527 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 6 Feb 2008 11:01:42 -0500 Subject: Btrfs: Don't case unsigned long to int in bio submission Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 36a943e5180..b68e57f66a9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1705,7 +1705,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, return 0; } } - nr = min_t(int, max_pages, bio_get_nr_vecs(bdev)); + nr = bio_get_nr_vecs(bdev); bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); if (!bio) { printk("failed to allocate bio nr %d\n", nr); -- cgit v1.2.3-70-g09d2 From a86c12c73d982c545a37a8ecdd66528ab260b770 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 7 Feb 2008 10:50:54 -0500 Subject: Btrfs: Create larger bios for btree blocks Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 10 ++++++---- fs/btrfs/extent_io.c | 12 +++++++++--- fs/btrfs/extent_io.h | 3 ++- 3 files changed, 17 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index dda8ad6c007..6f0825b617d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -197,7 +197,8 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) WARN_ON(1); } eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); - read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1); + read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1, + btree_get_extent); btrfs_clear_buffer_defrag(eb); found_start = btrfs_header_bytenr(eb); if (found_start != start) { @@ -339,7 +340,7 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) if (!buf) return 0; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, - buf, 0, 0); + buf, 0, 0, btree_get_extent); free_extent_buffer(buf); return ret; } @@ -358,7 +359,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return NULL; - read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1); + read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1, + btree_get_extent); if (buf->flags & EXTENT_CSUM) return buf; @@ -1009,7 +1011,7 @@ int btrfs_read_buffer(struct extent_buffer *buf) struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; struct inode *btree_inode = root->fs_info->btree_inode; return read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, - buf, 0, 1); + buf, 0, 1, btree_get_extent); } static struct extent_io_ops btree_extent_io_ops = { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b68e57f66a9..fcae999ab71 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2859,8 +2859,8 @@ EXPORT_SYMBOL(extent_buffer_uptodate); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, - u64 start, - int wait) + u64 start, int wait, + get_extent_t *get_extent) { unsigned long i; unsigned long start_i; @@ -2868,6 +2868,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, int err; int ret = 0; unsigned long num_pages; + struct bio *bio = NULL; + if (eb->flags & EXTENT_UPTODATE) return 0; @@ -2899,7 +2901,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, lock_page(page); } if (!PageUptodate(page)) { - err = page->mapping->a_ops->readpage(NULL, page); + err = __extent_read_full_page(tree, page, + get_extent, &bio); if (err) { ret = err; } @@ -2908,6 +2911,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, } } + if (bio) + submit_one_bio(READ, bio); + if (ret || !wait) { return ret; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 160da3f9d78..151fdada4dc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -157,7 +157,8 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, gfp_t mask); void free_extent_buffer(struct extent_buffer *eb); int read_extent_buffer_pages(struct extent_io_tree *tree, - struct extent_buffer *eb, u64 start, int wait); + struct extent_buffer *eb, u64 start, int wait, + get_extent_t *get_extent); static inline void extent_buffer_get(struct extent_buffer *eb) { -- cgit v1.2.3-70-g09d2 From 39b5637f6f195852259004bb27b58e2dcf9fb378 Mon Sep 17 00:00:00 2001 From: Yan Date: Fri, 15 Feb 2008 10:40:50 -0500 Subject: Btrfs: Fix "no csum found for inode" issue. A few codes were not properly updated for changes of extent map. This may be the causes of "no csum found for inode" issue. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 5 +++-- fs/btrfs/file.c | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fcae999ab71..9262ab37a7c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2417,12 +2417,13 @@ int try_release_extent_mapping(struct extent_map_tree *map, u64 end = start + PAGE_CACHE_SIZE - 1; u64 orig_start = start; int ret = 1; - if ((mask & __GFP_WAIT) && page->mapping->host->i_size > 16 * 1024 * 1024) { + u64 len; while (start <= end) { + len = end - start + 1; spin_lock(&map->lock); - em = lookup_extent_mapping(map, start, end); + em = lookup_extent_mapping(map, start, len); if (!em || IS_ERR(em)) { spin_unlock(&map->lock); break; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f8939608254..b0352b5958a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -357,10 +357,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) { struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 len = end - start + 1; + if (end == (u64)-1) + len = (u64)-1; while(1) { spin_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, start, end); + em = lookup_extent_mapping(em_tree, start, len); if (!em) { spin_unlock(&em_tree->lock); break; -- cgit v1.2.3-70-g09d2 From d7fc640e6fed46932f7c74e14f9b58b8637c66cf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 18 Feb 2008 12:12:38 -0500 Subject: Btrfs: Allocator improvements Reduce CPU time searching for free blocks by optimizing find_first_extent_bit Fix find_free_extent to make better use of the last_alloc hint. Before it was often finding blocks just before the hint. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent-tree.c | 34 ++++++++++++++++++++++++---------- fs/btrfs/extent_io.c | 38 ++++++++++++++++++++++++++++++++++---- fs/btrfs/extent_io.h | 11 +++++++++++ 4 files changed, 70 insertions(+), 15 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6f0825b617d..88e21bdbc47 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -951,7 +951,7 @@ void btrfs_throttle(struct btrfs_root *root) void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) { balance_dirty_pages_ratelimited_nr( - root->fs_info->btree_inode->i_mapping, 1); + root->fs_info->btree_inode->i_mapping, 1); } void btrfs_set_buffer_defrag(struct extent_buffer *buf) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index af5d4a08528..239e9d8669c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -188,9 +188,10 @@ static u64 noinline find_search_start(struct btrfs_root *root, { int ret; struct btrfs_block_group_cache *cache = *cache_ret; + struct extent_io_tree *free_space_cache; + struct extent_state *state; u64 last; u64 start = 0; - u64 end = 0; u64 cache_miss = 0; u64 total_fs_bytes; int wrapped = 0; @@ -199,6 +200,8 @@ static u64 noinline find_search_start(struct btrfs_root *root, goto out; } total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); + free_space_cache = &root->fs_info->free_space_cache; + again: ret = cache_block_group(root, cache); if (ret) @@ -206,22 +209,27 @@ again: last = max(search_start, cache->key.objectid); + spin_lock_irq(&free_space_cache->lock); + state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY); while(1) { - ret = find_first_extent_bit(&root->fs_info->free_space_cache, - last, &start, &end, EXTENT_DIRTY); - if (ret) { + if (!state) { if (!cache_miss) cache_miss = last; + spin_unlock_irq(&free_space_cache->lock); goto new_group; } - start = max(last, start); - last = end + 1; + start = max(last, state->start); + last = state->end + 1; if (last - start < num) { if (last == cache->key.objectid + cache->key.offset) cache_miss = start; + do { + state = extent_state_next(state); + } while(state && !(state->state & EXTENT_DIRTY)); continue; } + spin_unlock_irq(&free_space_cache->lock); if (data != BTRFS_BLOCK_GROUP_MIXED && start + num > cache->key.objectid + cache->key.offset) goto new_group; @@ -1420,6 +1428,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_block_group_cache *block_group; int full_scan = 0; int wrapped = 0; + int empty_cluster; u64 cached_start; WARN_ON(num_bytes < root->sectorsize); @@ -1431,11 +1440,15 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, data = BTRFS_BLOCK_GROUP_MIXED; } - if (!data) + if (!data) { last_ptr = &root->fs_info->last_alloc; + empty_cluster = 128 * 1024; + } - if (data && btrfs_test_opt(root, SSD)) + if (data && btrfs_test_opt(root, SSD)) { last_ptr = &root->fs_info->last_data_alloc; + empty_cluster = 2 * 1024 * 1024; + } if (last_ptr) { if (*last_ptr) @@ -1443,8 +1456,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, else { hint_byte = hint_byte & ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1); - empty_size += 2 * 1024 * 1024; + empty_size += empty_cluster; } + search_start = max(search_start, hint_byte); } search_end = min(search_end, @@ -1476,7 +1490,7 @@ check_failed: if (last_ptr && *last_ptr && search_start != *last_ptr) { *last_ptr = 0; if (!empty_size) { - empty_size += 2 * 1024 * 1024; + empty_size += empty_cluster; total_needed += empty_size; } search_start = find_search_start(root, &block_group, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9262ab37a7c..fb6400895ed 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -255,7 +255,7 @@ static int merge_state(struct extent_io_tree *tree, state->start = other->start; other->tree = NULL; if (tree->last == other) - tree->last = NULL; + tree->last = state; rb_erase(&other->rb_node, &tree->state); free_extent_state(other); } @@ -268,7 +268,7 @@ static int merge_state(struct extent_io_tree *tree, other->start = state->start; state->tree = NULL; if (tree->last == state) - tree->last = NULL; + tree->last = other; rb_erase(&state->rb_node, &tree->state); free_extent_state(state); } @@ -397,8 +397,9 @@ static int clear_state_bit(struct extent_io_tree *tree, if (delete || state->state == 0) { if (state->tree) { clear_state_cb(tree, state, state->state); - if (tree->last == state) - tree->last = NULL; + if (tree->last == state) { + tree->last = extent_state_next(state); + } rb_erase(&state->rb_node, &tree->state); state->tree = NULL; free_extent_state(state); @@ -962,6 +963,35 @@ out: } EXPORT_SYMBOL(find_first_extent_bit); +struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, + u64 start, int bits) +{ + struct rb_node *node; + struct extent_state *state; + + /* + * this search will find all the extents that end after + * our range starts. + */ + node = tree_search(tree, start); + if (!node || IS_ERR(node)) { + goto out; + } + + while(1) { + state = rb_entry(node, struct extent_state, rb_node); + if (state->end >= start && (state->state & bits)) { + return state; + } + node = rb_next(node); + if (!node) + break; + } +out: + return NULL; +} +EXPORT_SYMBOL(find_first_extent_bit_state); + u64 find_lock_delalloc_range(struct extent_io_tree *tree, u64 *start, u64 *end, u64 max_bytes) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 151fdada4dc..fcc4bb078c2 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -81,6 +81,15 @@ struct extent_buffer { struct extent_map_tree; +static inline struct extent_state *extent_state_next(struct extent_state *state) +{ + struct rb_node *node; + node = rb_next(&state->rb_node); + if (!node) + return NULL; + return rb_entry(node, struct extent_state, rb_node); +} + typedef struct extent_map *(get_extent_t)(struct inode *inode, struct page *page, size_t page_offset, @@ -122,6 +131,8 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits); +struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, + u64 start, int bits); int extent_invalidatepage(struct extent_io_tree *tree, struct page *page, unsigned long offset); int extent_write_full_page(struct extent_io_tree *tree, struct page *page, -- cgit v1.2.3-70-g09d2 From 065631f6dccea07bfad48d8981369f6d9cfd6e2b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 20 Feb 2008 12:07:25 -0500 Subject: Btrfs: checksum file data at bio submission time instead of during writepage When we checkum file data during writepage, the checksumming is done one page at a time, making it difficult to do bulk metadata modifications to insert checksums for large ranges of the file at once. This patch changes btrfs to checksum on a per-bio basis instead. The bios are checksummed before they are handed off to the block layer, so each bio is contiguous and only has pages from the same inode. Checksumming on a bio basis allows us to insert and modify the file checksum items in large groups. It also allows the checksumming to be done more easily by async worker threads. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 8 +++----- fs/btrfs/extent_io.c | 2 ++ fs/btrfs/extent_io.h | 1 + fs/btrfs/file-item.c | 50 +++++++++++++++++++++++++++++++++++++++----------- fs/btrfs/inode.c | 32 ++++++++++++++++++++++++++++++-- 5 files changed, 75 insertions(+), 18 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 086cb0525cd..92d892f9207 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1142,11 +1142,9 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, u64 bytenr, int mod); -int btrfs_csum_file_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode, - u64 objectid, u64 offset, - char *data, size_t len); +int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode, + struct bio *bio); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index fb6400895ed..e8130c87633 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1705,6 +1705,8 @@ static int submit_one_bio(int rw, struct bio *bio) (unsigned long long)bio->bi_sector); WARN_ON(1); } + if (tree->ops && tree->ops->submit_bio_hook) + tree->ops->submit_bio_hook(rw, bio); submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index fcc4bb078c2..9d665466708 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -28,6 +28,7 @@ struct extent_state; struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); + int (*submit_bio_hook)(int rw, struct bio *bio); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 3ebbc058d08..3f0e71b0e5d 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -16,6 +16,9 @@ * Boston, MA 021110-1307, USA. */ +#include +#include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -131,28 +134,35 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, return ret; } -int btrfs_csum_file_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode, - u64 objectid, u64 offset, - char *data, size_t len) +int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode, + struct bio *bio) { + u64 objectid = inode->i_ino; + u64 offset; int ret; struct btrfs_key file_key; struct btrfs_key found_key; - u64 next_offset = (u64)-1; - int found_next = 0; + u64 next_offset; + int found_next; struct btrfs_path *path; struct btrfs_csum_item *item; + struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; u64 csum_offset; - u32 csum_result = ~(u32)0; + u32 csum_result; u32 nritems; u32 ins_size; + int bio_index = 0; + struct bio_vec *bvec = bio->bi_io_vec; + char *data; path = btrfs_alloc_path(); BUG_ON(!path); - +again: + next_offset = (u64)-1; + found_next = 0; + offset = page_offset(bvec->bv_page) + bvec->bv_offset; file_key.objectid = objectid; file_key.offset = offset; btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); @@ -259,7 +269,15 @@ csum: item = (struct btrfs_csum_item *)((unsigned char *)item + csum_offset * BTRFS_CRC32_SIZE); found: - csum_result = btrfs_csum_data(root, data, csum_result, len); + item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); + item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + + btrfs_item_size_nr(leaf, path->slots[0])); +next_bvec: + data = kmap_atomic(bvec->bv_page, KM_IRQ0); + csum_result = ~(u32)0; + csum_result = btrfs_csum_data(root, data + bvec->bv_offset, + csum_result, bvec->bv_len); + kunmap_atomic(data, KM_IRQ0); btrfs_csum_final(csum_result, (char *)&csum_result); if (csum_result == 0) { printk("csum result is 0 for inode %lu offset %Lu\n", inode->i_ino, offset); @@ -267,9 +285,19 @@ found: write_extent_buffer(leaf, &csum_result, (unsigned long)item, BTRFS_CRC32_SIZE); + bio_index++; + bvec++; + if (bio_index < bio->bi_vcnt) { + item = (struct btrfs_csum_item *)((char *)item + BTRFS_CRC32_SIZE); + if (item < item_end) + goto next_bvec; + } btrfs_mark_buffer_dirty(path->nodes[0]); + if (bio_index < bio->bi_vcnt) { + btrfs_release_path(root, path); + goto again; + } fail: - btrfs_release_path(root, path); btrfs_free_path(path); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3e79572a5cb..bbb71fad861 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -16,6 +16,7 @@ * Boston, MA 021110-1307, USA. */ +#include #include #include #include @@ -294,6 +295,32 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +int btrfs_submit_bio_hook(int rw, struct bio *bio) +{ + // struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; + struct bio_vec *bvec = bio->bi_io_vec; + struct inode *inode = bvec->bv_page->mapping->host; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + int ret = 0; + + if (rw != WRITE) + return 0; + + if (btrfs_test_opt(root, NODATASUM) || + btrfs_test_flag(inode, NODATASUM)) + return 0; + + mutex_lock(&root->fs_info->fs_mutex); + trans = btrfs_start_transaction(root, 1); + btrfs_set_trans_block_group(trans, inode); + btrfs_csum_file_blocks(trans, root, inode, bio); + ret = btrfs_end_transaction(trans, root); + BUG_ON(ret); + mutex_unlock(&root->fs_info->fs_mutex); + return ret; +} +#if 0 int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end) { struct inode *inode = page->mapping->host; @@ -318,7 +345,7 @@ int btrfs_writepage_io_hook(struct page *page, u64 start, u64 end) mutex_unlock(&root->fs_info->fs_mutex); return ret; } - +#endif int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) { int ret = 0; @@ -3022,7 +3049,8 @@ static struct file_operations btrfs_dir_file_operations = { static struct extent_io_ops btrfs_extent_io_ops = { .fill_delalloc = run_delalloc_range, - .writepage_io_hook = btrfs_writepage_io_hook, + // .writepage_io_hook = btrfs_writepage_io_hook, + .submit_bio_hook = btrfs_submit_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, .set_bit_hook = btrfs_set_bit_hook, -- cgit v1.2.3-70-g09d2 From 0b86a832a1f38abec695864ec2eaedc9d2383f1b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 24 Mar 2008 15:01:56 -0400 Subject: Btrfs: Add support for multiple devices per filesystem Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.c | 38 ++- fs/btrfs/ctree.h | 313 ++++++++++++++++-- fs/btrfs/disk-io.c | 97 +++++- fs/btrfs/disk-io.h | 2 + fs/btrfs/extent-tree.c | 429 +++++++------------------ fs/btrfs/extent_io.c | 6 +- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 26 +- fs/btrfs/print-tree.c | 48 +++ fs/btrfs/transaction.c | 50 ++- fs/btrfs/volumes.c | 852 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/volumes.h | 78 +++++ 13 files changed, 1563 insertions(+), 380 deletions(-) create mode 100644 fs/btrfs/volumes.c create mode 100644 fs/btrfs/volumes.h (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 9c61609f371..5ac6f2946c2 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -6,7 +6,7 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ hash.o file-item.o inode-item.o inode-map.o disk-io.o \ transaction.o bit-radix.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ - extent_io.o + extent_io.o volumes.o ifeq ($(CONFIG_FS_POSIX_ACL),y) btrfs-y += acl.o diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ef8809c1e69..70b6ddfe15a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -70,6 +70,14 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) memset(p, 0, sizeof(*p)); } +static void add_root_to_dirty_list(struct btrfs_root *root) +{ + if (root->track_dirty && list_empty(&root->dirty_list)) { + list_add(&root->dirty_list, + &root->fs_info->dirty_cowonly_roots); + } +} + int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -196,6 +204,7 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, root_gen, 0, 0, 1); } free_extent_buffer(buf); + add_root_to_dirty_list(root); } else { root_gen = btrfs_header_generation(parent); btrfs_set_node_blockptr(parent, parent_slot, @@ -241,7 +250,7 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, return 0; } - search_start = buf->start & ~((u64)BTRFS_BLOCK_GROUP_SIZE - 1); + search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); ret = __btrfs_cow_block(trans, root, buf, parent, parent_slot, cow_ret, search_start, 0); return ret; @@ -724,6 +733,7 @@ static int balance_level(struct btrfs_trans_handle *trans, BUG_ON(ret); root->node = child; + add_root_to_dirty_list(root); path->nodes[level] = NULL; clean_tree_block(trans, root, mid); wait_on_tree_block_writeback(root, mid); @@ -1369,6 +1379,7 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, /* the super has an extra ref to root->node */ free_extent_buffer(root->node); root->node = c; + add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; path->slots[level] = 0; @@ -2777,3 +2788,28 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) } return 0; } + +int btrfs_previous_item(struct btrfs_root *root, + struct btrfs_path *path, u64 min_objectid, + int type) +{ + struct btrfs_key found_key; + struct extent_buffer *leaf; + int ret; + + while(1) { + if (path->slots[0] == 0) { + ret = btrfs_prev_leaf(root, path); + if (ret != 0) + return ret; + } else { + path->slots[0]--; + } + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); + if (found_key.type == type) + return 0; + } + return 1; +} + diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 92d892f9207..1453d995fef 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -40,12 +40,44 @@ extern struct kmem_cache *btrfs_path_cachep; #define BTRFS_MAGIC "_B4RfS_M" #define BTRFS_MAX_LEVEL 8 + +/* holds pointers to all of the tree roots */ #define BTRFS_ROOT_TREE_OBJECTID 1ULL + +/* stores information about which extents are in use, and reference counts */ #define BTRFS_EXTENT_TREE_OBJECTID 2ULL + +/* one per subvolume, storing files and directories */ #define BTRFS_FS_TREE_OBJECTID 3ULL + +/* directory objectid inside the root tree */ #define BTRFS_ROOT_TREE_DIR_OBJECTID 4ULL + + +/* + * chunk tree stores translations from logical -> physical block numbering + * the super block points to the chunk tree + */ +#define BTRFS_CHUNK_TREE_OBJECTID 5ULL + +/* + * stores information about which areas of a given device are in use. + * one per device. The tree of tree roots points to the device tree + */ +#define BTRFS_DEV_TREE_OBJECTID 6ULL + +/* + * All files have objectids higher than this. + */ #define BTRFS_FIRST_FREE_OBJECTID 256ULL + +/* + * the device items go into the chunk tree. The key is in the form + * [ 1 BTRFS_DEV_ITEM_KEY device_id ] + */ +#define BTRFS_DEV_ITEMS_OBJECTID 1ULL + /* * we can actually store much bigger names, but lets not confuse the rest * of linux @@ -95,6 +127,81 @@ struct btrfs_key { u64 offset; } __attribute__ ((__packed__)); +struct btrfs_mapping_tree { + struct extent_map_tree map_tree; +}; + +#define BTRFS_DEV_UUID_SIZE 16 +struct btrfs_dev_item { + /* the internal btrfs device id */ + __le64 devid; + + /* size of the device */ + __le64 total_bytes; + + /* bytes used */ + __le64 bytes_used; + + /* optimal io alignment for this device */ + __le32 io_align; + + /* optimal io width for this device */ + __le32 io_width; + + /* minimal io size for this device */ + __le32 sector_size; + + /* the kernel device number */ + __le64 rdev; + + /* type and info about this device */ + __le64 type; + + /* partition number, 0 for whole dev */ + __le32 partition; + + /* length of the name data at the end of the item */ + __le16 name_len; + + /* physical drive uuid (or lvm uuid) */ + u8 uuid[BTRFS_DEV_UUID_SIZE]; + /* name goes here */ +} __attribute__ ((__packed__)); + +struct btrfs_stripe { + __le64 devid; + __le64 offset; +} __attribute__ ((__packed__)); + +struct btrfs_chunk { + __le64 owner; + __le64 stripe_len; + __le64 type; + + /* optimal io alignment for this chunk */ + __le32 io_align; + + /* optimal io width for this chunk */ + __le32 io_width; + + /* minimal io size for this chunk */ + __le32 sector_size; + + /* 2^16 stripes is quite a lot, a second limit is the size of a single + * item in the btree + */ + __le16 num_stripes; + struct btrfs_stripe stripe; + /* additional stripes go here */ +} __attribute__ ((__packed__)); + +static inline unsigned long btrfs_chunk_item_size(int num_stripes) +{ + BUG_ON(num_stripes == 0); + return sizeof(struct btrfs_chunk) + + sizeof(struct btrfs_stripe) * (num_stripes - 1); +} + #define BTRFS_FSID_SIZE 16 /* * every tree block (leaf or node) starts with this header. @@ -119,6 +226,13 @@ struct btrfs_header { sizeof(struct btrfs_item) - \ sizeof(struct btrfs_file_extent_item)) + +/* + * this is a very generous portion of the super block, giving us + * room to translate 14 chunks with 3 stripes each. + */ +#define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 + /* * the super block basically lists the main trees of the FS * it currently lacks any block count etc etc @@ -131,6 +245,7 @@ struct btrfs_super_block { __le64 magic; __le64 generation; __le64 root; + __le64 chunk_root; __le64 total_bytes; __le64 bytes_used; __le64 root_dir_objectid; @@ -138,7 +253,10 @@ struct btrfs_super_block { __le32 nodesize; __le32 leafsize; __le32 stripesize; + __le32 sys_chunk_array_size; u8 root_level; + u8 chunk_root_level; + u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; } __attribute__ ((__packed__)); /* @@ -208,12 +326,22 @@ struct btrfs_extent_ref { __le64 offset; } __attribute__ ((__packed__)); +/* dev extents record free space on individual devices. The owner + * field points back to the chunk allocation mapping tree that allocated + * the extent + */ +struct btrfs_dev_extent { + __le64 owner; + __le64 length; +} __attribute__ ((__packed__)); + + struct btrfs_inode_ref { __le16 name_len; /* name goes here */ } __attribute__ ((__packed__)); -struct btrfs_inode_timespec { +struct btrfs_timespec { __le64 sec; __le32 nsec; } __attribute__ ((__packed__)); @@ -231,13 +359,13 @@ struct btrfs_inode_item { __le32 uid; __le32 gid; __le32 mode; - __le32 rdev; + __le64 rdev; __le16 flags; __le16 compat_flags; - struct btrfs_inode_timespec atime; - struct btrfs_inode_timespec ctime; - struct btrfs_inode_timespec mtime; - struct btrfs_inode_timespec otime; + struct btrfs_timespec atime; + struct btrfs_timespec ctime; + struct btrfs_timespec mtime; + struct btrfs_timespec otime; } __attribute__ ((__packed__)); struct btrfs_dir_item { @@ -290,29 +418,34 @@ struct btrfs_csum_item { u8 csum; } __attribute__ ((__packed__)); -/* tag for the radix tree of block groups in ram */ -#define BTRFS_BLOCK_GROUP_SIZE (256 * 1024 * 1024) - +/* different types of block groups (and chunks) */ +#define BTRFS_BLOCK_GROUP_DATA (1 << 0) +#define BTRFS_BLOCK_GROUP_SYSTEM (1 << 1) +#define BTRFS_BLOCK_GROUP_METADATA (1 << 2) -#define BTRFS_BLOCK_GROUP_DATA 1 -#define BTRFS_BLOCK_GROUP_MIXED 2 struct btrfs_block_group_item { __le64 used; - u8 flags; + __le64 chunk_tree; + __le64 chunk_objectid; + __le64 flags; } __attribute__ ((__packed__)); struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; - int data; - int cached; u64 pinned; + u64 flags; + int cached; }; + +struct btrfs_device; struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; struct btrfs_root *extent_root; struct btrfs_root *tree_root; + struct btrfs_root *chunk_root; + struct btrfs_root *dev_root; struct radix_tree_root fs_roots_radix; struct extent_io_tree free_space_cache; @@ -321,6 +454,9 @@ struct btrfs_fs_info { struct extent_io_tree pending_del; struct extent_io_tree extent_ins; + /* logical->physical extent mapping */ + struct btrfs_mapping_tree mapping_tree; + u64 generation; u64 last_trans_committed; unsigned long mount_opt; @@ -330,6 +466,7 @@ struct btrfs_fs_info { struct btrfs_transaction *running_transaction; struct btrfs_super_block super_copy; struct extent_buffer *sb_buffer; + struct block_device *__bdev; struct super_block *sb; struct inode *btree_inode; spinlock_t hash_lock; @@ -350,12 +487,17 @@ struct btrfs_fs_info { unsigned long throttles; u64 total_pinned; + struct list_head dirty_cowonly_roots; + + struct list_head devices; + struct list_head *last_device; spinlock_t delalloc_lock; spinlock_t new_trans_lock; u64 delalloc_bytes; u64 last_alloc; u64 last_data_alloc; }; + /* * in ram representation of the tree. extent_root is used for all allocations * and for the extent tree extent_root root. @@ -387,14 +529,19 @@ struct btrfs_root { u64 highest_inode; u64 last_inode_alloc; int ref_cows; + int track_dirty; struct btrfs_key defrag_progress; int defrag_running; int defrag_level; char *name; int in_sysfs; + + /* the dirty list is only used by non-reference counted roots */ + struct list_head dirty_list; }; /* + * inode items have the data typically returned from stat and store other * info about object characteristics. There is one for every file and dir in * the FS @@ -439,6 +586,10 @@ struct btrfs_root { */ #define BTRFS_BLOCK_GROUP_ITEM_KEY 50 +#define BTRFS_DEV_EXTENT_KEY 75 +#define BTRFS_DEV_ITEM_KEY 76 +#define BTRFS_CHUNK_ITEM_KEY 77 + /* * string items are for debugging. They just store a short string of * data in the FS @@ -518,13 +669,104 @@ static inline void btrfs_set_##name(type *s, u##bits val) \ s->member = cpu_to_le##bits(val); \ } +BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); +BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); +BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); +BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); +BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); +BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); +BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); +BTRFS_SETGET_FUNCS(device_rdev, struct btrfs_dev_item, rdev, 64); +BTRFS_SETGET_FUNCS(device_partition, struct btrfs_dev_item, partition, 32); +BTRFS_SETGET_FUNCS(device_name_len, struct btrfs_dev_item, name_len, 16); + +static inline char *btrfs_device_uuid(struct btrfs_dev_item *d) +{ + return (char *)d + offsetof(struct btrfs_dev_item, uuid); +} + +static inline char *btrfs_device_name(struct btrfs_dev_item *d) +{ + return (char *)(d + 1); +} + +BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); +BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); +BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); +BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); +BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); +BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); +BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); +BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); +BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); + +BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, + stripe_len, 64); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, + io_align, 32); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, + io_width, 32); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, + sector_size, 32); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); +BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, + num_stripes, 16); +BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); +BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); + +static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, + int nr) +{ + unsigned long offset = (unsigned long)c; + offset += offsetof(struct btrfs_chunk, stripe); + offset += nr * sizeof(struct btrfs_stripe); + return (struct btrfs_stripe *)offset; +} + +static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, + struct btrfs_chunk *c, int nr) +{ + return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); +} + +static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb, + struct btrfs_chunk *c, int nr, + u64 val) +{ + btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val); +} + +static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, + struct btrfs_chunk *c, int nr) +{ + return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); +} + +static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb, + struct btrfs_chunk *c, int nr, + u64 val) +{ + btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val); +} + /* struct btrfs_block_group_item */ BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, used, 64); BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, used, 64); -BTRFS_SETGET_FUNCS(disk_block_group_flags, struct btrfs_block_group_item, - flags, 8); +BTRFS_SETGET_STACK_FUNCS(block_group_chunk_tree, struct btrfs_block_group_item, + chunk_tree, 64); +BTRFS_SETGET_FUNCS(disk_block_group_chunk_tree, struct btrfs_block_group_item, + chunk_tree, 64); +BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, + struct btrfs_block_group_item, chunk_objectid, 64); +BTRFS_SETGET_FUNCS(disk_block_group_chunk_objecitd, + struct btrfs_block_group_item, chunk_objectid, 64); +BTRFS_SETGET_FUNCS(disk_block_group_flags, + struct btrfs_block_group_item, flags, 64); +BTRFS_SETGET_STACK_FUNCS(block_group_flags, + struct btrfs_block_group_item, flags, 64); /* struct btrfs_inode_ref */ BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); @@ -538,49 +780,53 @@ BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); -BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 32); +BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 16); BTRFS_SETGET_FUNCS(inode_compat_flags, struct btrfs_inode_item, compat_flags, 16); -static inline struct btrfs_inode_timespec * +static inline struct btrfs_timespec * btrfs_inode_atime(struct btrfs_inode_item *inode_item) { unsigned long ptr = (unsigned long)inode_item; ptr += offsetof(struct btrfs_inode_item, atime); - return (struct btrfs_inode_timespec *)ptr; + return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_inode_timespec * +static inline struct btrfs_timespec * btrfs_inode_mtime(struct btrfs_inode_item *inode_item) { unsigned long ptr = (unsigned long)inode_item; ptr += offsetof(struct btrfs_inode_item, mtime); - return (struct btrfs_inode_timespec *)ptr; + return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_inode_timespec * +static inline struct btrfs_timespec * btrfs_inode_ctime(struct btrfs_inode_item *inode_item) { unsigned long ptr = (unsigned long)inode_item; ptr += offsetof(struct btrfs_inode_item, ctime); - return (struct btrfs_inode_timespec *)ptr; + return (struct btrfs_timespec *)ptr; } -static inline struct btrfs_inode_timespec * +static inline struct btrfs_timespec * btrfs_inode_otime(struct btrfs_inode_item *inode_item) { unsigned long ptr = (unsigned long)inode_item; ptr += offsetof(struct btrfs_inode_item, otime); - return (struct btrfs_inode_timespec *)ptr; + return (struct btrfs_timespec *)ptr; } -BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_inode_timespec, sec, 64); -BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_inode_timespec, nsec, 32); +BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); +BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); /* struct btrfs_extent_item */ BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32); +/* struct btrfs_dev_extent */ +BTRFS_SETGET_FUNCS(dev_extent_owner, struct btrfs_dev_extent, owner, 64); +BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); + /* struct btrfs_extent_ref */ BTRFS_SETGET_FUNCS(ref_root, struct btrfs_extent_ref, root, 64); BTRFS_SETGET_FUNCS(ref_generation, struct btrfs_extent_ref, generation, 64); @@ -846,8 +1092,14 @@ BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, generation, 64); BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); +BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, + struct btrfs_super_block, sys_chunk_array_size, 32); BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, root_level, 8); +BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, + chunk_root, 64); +BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, + chunk_root_level, 64); BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, total_bytes, 64); BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, @@ -1009,7 +1261,14 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_free_block_groups(struct btrfs_fs_info *info); int btrfs_read_block_groups(struct btrfs_root *root); +int btrfs_make_block_group(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytes_used, + u64 type, u64 chunk_tree, u64 chunk_objectid, + u64 size); /* ctree.c */ +int btrfs_previous_item(struct btrfs_root *root, + struct btrfs_path *path, u64 min_objectid, + int type); int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 88e21bdbc47..8e37fa120cc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -28,6 +28,7 @@ #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" +#include "volumes.h" #include "print-tree.h" #if 0 @@ -234,6 +235,19 @@ static int btree_writepage_io_hook(struct page *page, u64 start, u64 end) return 0; } +static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + u64 offset; + offset = bio->bi_sector << 9; + if (offset == BTRFS_SUPER_INFO_OFFSET) { + bio->bi_bdev = root->fs_info->sb->s_bdev; + submit_bio(rw, bio); + return 0; + } + return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio); +} + static int btree_writepage(struct page *page, struct writeback_control *wbc) { struct extent_io_tree *tree; @@ -345,6 +359,23 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) return ret; } +static int close_all_devices(struct btrfs_fs_info *fs_info) +{ + struct list_head *list; + struct list_head *next; + struct btrfs_device *device; + + list = &fs_info->devices; + while(!list_empty(list)) { + next = list->next; + list_del(next); + device = list_entry(next, struct btrfs_device, dev_list); + kfree(device->name); + kfree(device); + } + return 0; +} + struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) { @@ -420,6 +451,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->leafsize = leafsize; root->stripesize = stripesize; root->ref_cows = 0; + root->track_dirty = 0; + root->fs_info = fs_info; root->objectid = objectid; root->last_trans = 0; @@ -427,6 +460,8 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->last_inode_alloc = 0; root->name = NULL; root->in_sysfs = 0; + + INIT_LIST_HEAD(&root->dirty_list); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); @@ -634,6 +669,10 @@ struct btrfs_root *open_ctree(struct super_block *sb) GFP_NOFS); struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info), GFP_NOFS); + struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root), + GFP_NOFS); + struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root), + GFP_NOFS); int ret; int err = -EIO; struct btrfs_super_block *disk_super; @@ -657,6 +696,12 @@ struct btrfs_root *open_ctree(struct super_block *sb) fs_info->last_trans_committed = 0; fs_info->tree_root = tree_root; fs_info->extent_root = extent_root; + fs_info->chunk_root = chunk_root; + fs_info->dev_root = dev_root; + INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); + INIT_LIST_HEAD(&fs_info->devices); + btrfs_mapping_init(&fs_info->mapping_tree); + fs_info->last_device = &fs_info->devices; fs_info->sb = sb; fs_info->throttles = 0; fs_info->mount_opt = 0; @@ -714,12 +759,12 @@ struct btrfs_root *open_ctree(struct super_block *sb) goto fail_iput; } #endif - __setup_root(512, 512, 512, 512, tree_root, + __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); fs_info->sb_buffer = read_tree_block(tree_root, BTRFS_SUPER_INFO_OFFSET, - 512); + 4096); if (!fs_info->sb_buffer) goto fail_iput; @@ -730,6 +775,7 @@ struct btrfs_root *open_ctree(struct super_block *sb) read_extent_buffer(fs_info->sb_buffer, fs_info->fsid, (unsigned long)btrfs_super_fsid(fs_info->sb_buffer), BTRFS_FSID_SIZE); + disk_super = &fs_info->super_copy; if (!btrfs_super_root(disk_super)) goto fail_sb_buffer; @@ -753,23 +799,47 @@ struct btrfs_root *open_ctree(struct super_block *sb) goto fail_sb_buffer; } + mutex_lock(&fs_info->fs_mutex); + ret = btrfs_read_sys_array(tree_root); + BUG_ON(ret); + + blocksize = btrfs_level_size(tree_root, + btrfs_super_chunk_root_level(disk_super)); + + __setup_root(nodesize, leafsize, sectorsize, stripesize, + chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID); + + chunk_root->node = read_tree_block(chunk_root, + btrfs_super_chunk_root(disk_super), + blocksize); + BUG_ON(!chunk_root->node); + + ret = btrfs_read_chunk_tree(chunk_root); + BUG_ON(ret); + blocksize = btrfs_level_size(tree_root, btrfs_super_root_level(disk_super)); + tree_root->node = read_tree_block(tree_root, btrfs_super_root(disk_super), blocksize); if (!tree_root->node) goto fail_sb_buffer; - mutex_lock(&fs_info->fs_mutex); ret = find_and_setup_root(tree_root, fs_info, BTRFS_EXTENT_TREE_OBJECTID, extent_root); - if (ret) { - mutex_unlock(&fs_info->fs_mutex); + if (ret) goto fail_tree_root; - } + extent_root->track_dirty = 1; + + ret = find_and_setup_root(tree_root, fs_info, + BTRFS_DEV_TREE_OBJECTID, dev_root); + dev_root->track_dirty = 1; + + if (ret) + goto fail_extent_root; btrfs_read_block_groups(extent_root); @@ -777,7 +847,10 @@ struct btrfs_root *open_ctree(struct super_block *sb) mutex_unlock(&fs_info->fs_mutex); return tree_root; +fail_extent_root: + free_extent_buffer(extent_root->node); fail_tree_root: + mutex_unlock(&fs_info->fs_mutex); free_extent_buffer(tree_root->node); fail_sb_buffer: free_extent_buffer(fs_info->sb_buffer); @@ -874,6 +947,12 @@ int close_ctree(struct btrfs_root *root) if (fs_info->tree_root->node) free_extent_buffer(fs_info->tree_root->node); + if (root->fs_info->chunk_root->node); + free_extent_buffer(root->fs_info->chunk_root->node); + + if (root->fs_info->dev_root->node); + free_extent_buffer(root->fs_info->dev_root->node); + free_extent_buffer(fs_info->sb_buffer); btrfs_free_block_groups(root->fs_info); @@ -901,8 +980,13 @@ int close_ctree(struct btrfs_root *root) kfree(hasher); } #endif + close_all_devices(fs_info); + btrfs_mapping_tree_free(&fs_info->mapping_tree); + kfree(fs_info->extent_root); kfree(fs_info->tree_root); + kfree(fs_info->chunk_root); + kfree(fs_info->dev_root); return 0; } @@ -1016,4 +1100,5 @@ int btrfs_read_buffer(struct extent_buffer *buf) static struct extent_io_ops btree_extent_io_ops = { .writepage_io_hook = btree_writepage_io_hook, + .submit_bio_hook = btree_submit_bio_hook, }; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 828f3a2081b..206cb48638f 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -20,6 +20,7 @@ #define __DISKIO__ #define BTRFS_SUPER_INFO_OFFSET (16 * 1024) +struct btrfs_device; struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); @@ -65,4 +66,5 @@ int btrfs_read_buffer(struct extent_buffer *buf); u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, char *result); void btrfs_throttle(struct btrfs_root *root); +int btrfs_open_device(struct btrfs_device *dev); #endif diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ebfd304138c..2cd957d6e8d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -24,37 +24,19 @@ #include "disk-io.h" #include "print-tree.h" #include "transaction.h" +#include "volumes.h" -#define BLOCK_GROUP_DATA EXTENT_WRITEBACK +#define BLOCK_GROUP_DATA EXTENT_WRITEBACK #define BLOCK_GROUP_METADATA EXTENT_UPTODATE +#define BLOCK_GROUP_SYSTEM EXTENT_NEW + #define BLOCK_GROUP_DIRTY EXTENT_DIRTY static int finish_current_insert(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); static int del_pending_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); -static int find_previous_extent(struct btrfs_root *root, - struct btrfs_path *path) -{ - struct btrfs_key found_key; - struct extent_buffer *leaf; - int ret; - while(1) { - if (path->slots[0] == 0) { - ret = btrfs_prev_leaf(root, path); - if (ret != 0) - return ret; - } else { - path->slots[0]--; - } - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.type == BTRFS_EXTENT_ITEM_KEY) - return 0; - } - return 1; -} static int cache_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *block_group) @@ -91,7 +73,7 @@ static int cache_block_group(struct btrfs_root *root, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; - ret = find_previous_extent(root, path); + ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY); if (ret < 0) return ret; if (ret == 0) { @@ -168,7 +150,8 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct block_group_cache = &info->block_group_cache; ret = find_first_extent_bit(block_group_cache, bytenr, &start, &end, - BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA); + BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA | + BLOCK_GROUP_SYSTEM); if (ret) { return NULL; } @@ -182,23 +165,38 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct return block_group; return NULL; } -static u64 noinline find_search_start(struct btrfs_root *root, + +static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) +{ + if ((bits & BLOCK_GROUP_DATA) && + (cache->flags & BTRFS_BLOCK_GROUP_DATA)) + return 1; + if ((bits & BLOCK_GROUP_METADATA) && + (cache->flags & BTRFS_BLOCK_GROUP_METADATA)) + return 1; + if ((bits & BLOCK_GROUP_SYSTEM) && + (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) + return 1; + return 0; +} + +static int noinline find_search_start(struct btrfs_root *root, struct btrfs_block_group_cache **cache_ret, - u64 search_start, int num, int data) + u64 *start_ret, int num, int data) { int ret; struct btrfs_block_group_cache *cache = *cache_ret; struct extent_io_tree *free_space_cache; - struct extent_state *state; u64 last; u64 start = 0; + u64 end = 0; u64 cache_miss = 0; u64 total_fs_bytes; + u64 search_start = *start_ret; int wrapped = 0; - if (!cache) { + if (!cache) goto out; - } total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); free_space_cache = &root->fs_info->free_space_cache; @@ -208,6 +206,9 @@ again: goto out; last = max(search_start, cache->key.objectid); + if (!block_group_bits(cache, data)) { + goto new_group; + } while(1) { ret = find_first_extent_bit(&root->fs_info->free_space_cache, @@ -225,22 +226,20 @@ again: cache_miss = start; continue; } - if (data != BTRFS_BLOCK_GROUP_MIXED && - start + num > cache->key.objectid + cache->key.offset) + if (start + num > cache->key.objectid + cache->key.offset) goto new_group; if (start + num > total_fs_bytes) goto new_group; - return start; + *start_ret = start; + return 0; } out: cache = btrfs_lookup_block_group(root->fs_info, search_start); if (!cache) { - printk("Unable to find block group for %Lu\n", - search_start); + printk("Unable to find block group for %Lu\n", search_start); WARN_ON(1); - return search_start; } - return search_start; + return -ENOSPC; new_group: last = cache->key.objectid + cache->key.offset; @@ -251,7 +250,6 @@ no_cache: if (!wrapped) { wrapped = 1; last = search_start; - data = BTRFS_BLOCK_GROUP_MIXED; goto wrapped; } goto out; @@ -299,7 +297,6 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, int ret; int full_search = 0; int factor = 8; - int data_swap = 0; block_group_cache = &info->block_group_cache; total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); @@ -307,19 +304,12 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, if (!owner) factor = 8; - if (data == BTRFS_BLOCK_GROUP_MIXED) { - bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA; - factor = 10; - } else if (data) - bit = BLOCK_GROUP_DATA; - else - bit = BLOCK_GROUP_METADATA; + bit = data; if (search_start && search_start < total_fs_bytes) { struct btrfs_block_group_cache *shint; shint = btrfs_lookup_block_group(info, search_start); - if (shint && (shint->data == data || - shint->data == BTRFS_BLOCK_GROUP_MIXED)) { + if (shint && block_group_bits(shint, data)) { used = btrfs_block_group_used(&shint->item); if (used + shint->pinned < div_factor(shint->key.offset, factor)) { @@ -327,8 +317,8 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, } } } - if (hint && hint->key.objectid < total_fs_bytes && - (hint->data == data || hint->data == BTRFS_BLOCK_GROUP_MIXED)) { + if (hint && block_group_bits(hint, data) && + hint->key.objectid < total_fs_bytes) { used = btrfs_block_group_used(&hint->item); if (used + hint->pinned < div_factor(hint->key.offset, factor)) { @@ -379,12 +369,6 @@ again: full_search = 1; goto again; } - if (!data_swap) { - data_swap = 1; - bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA; - last = search_start; - goto again; - } found: return found_group; } @@ -1002,7 +986,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc, - int mark_free, int data) + int mark_free) { struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; @@ -1027,41 +1011,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { - if (cache->data != data && - old_val < (cache->key.offset >> 1)) { - int bit_to_clear; - int bit_to_set; - cache->data = data; - if (data) { - bit_to_clear = BLOCK_GROUP_METADATA; - bit_to_set = BLOCK_GROUP_DATA; - cache->item.flags &= - ~BTRFS_BLOCK_GROUP_MIXED; - cache->item.flags |= - BTRFS_BLOCK_GROUP_DATA; - } else { - bit_to_clear = BLOCK_GROUP_DATA; - bit_to_set = BLOCK_GROUP_METADATA; - cache->item.flags &= - ~BTRFS_BLOCK_GROUP_MIXED; - cache->item.flags &= - ~BTRFS_BLOCK_GROUP_DATA; - } - clear_extent_bits(&info->block_group_cache, - start, end, bit_to_clear, - GFP_NOFS); - set_extent_bits(&info->block_group_cache, - start, end, bit_to_set, - GFP_NOFS); - } else if (cache->data != data && - cache->data != BTRFS_BLOCK_GROUP_MIXED) { - cache->data = BTRFS_BLOCK_GROUP_MIXED; - set_extent_bits(&info->block_group_cache, - start, end, - BLOCK_GROUP_DATA | - BLOCK_GROUP_METADATA, - GFP_NOFS); - } old_val += num_bytes; } else { old_val -= num_bytes; @@ -1357,7 +1306,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root return ret; } ret = update_block_group(trans, root, bytenr, num_bytes, 0, - mark_free, 0); + mark_free); BUG_ON(ret); } btrfs_free_path(path); @@ -1450,38 +1399,21 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, u64 exclude_start, u64 exclude_nr, int data) { - struct btrfs_path *path; - struct btrfs_key key; - u64 hole_size = 0; - u64 aligned; int ret; - int slot = 0; - u64 last_byte = 0; - u64 *last_ptr = NULL; u64 orig_search_start = search_start; - int start_found; - struct extent_buffer *l; struct btrfs_root * root = orig_root->fs_info->extent_root; struct btrfs_fs_info *info = root->fs_info; u64 total_needed = num_bytes; - int level; struct btrfs_block_group_cache *block_group; int full_scan = 0; int wrapped = 0; - int empty_cluster; - u64 cached_start; WARN_ON(num_bytes < root->sectorsize); btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); - level = btrfs_header_level(root->node); - - if (num_bytes >= 32 * 1024 * 1024 && hint_byte) { - data = BTRFS_BLOCK_GROUP_MIXED; - } - if (search_end == (u64)-1) search_end = btrfs_super_total_bytes(&info->super_copy); + if (hint_byte) { block_group = btrfs_lookup_block_group(info, hint_byte); if (!block_group) @@ -1495,7 +1427,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, } total_needed += empty_size; - path = btrfs_alloc_path(); + check_failed: if (!block_group) { block_group = btrfs_lookup_block_group(info, search_start); @@ -1503,135 +1435,49 @@ check_failed: block_group = btrfs_lookup_block_group(info, orig_search_start); } - search_start = find_search_start(root, &block_group, search_start, - total_needed, data); - search_start = stripe_align(root, search_start); - cached_start = search_start; - btrfs_init_path(path); - ins->objectid = search_start; - ins->offset = 0; - start_found = 0; - path->reada = 2; - - ret = btrfs_search_slot(trans, root, ins, path, 0, 0); - if (ret < 0) - goto error; - ret = find_previous_extent(root, path); - if (ret < 0) + ret = find_search_start(root, &block_group, &search_start, + total_needed, data); + if (ret) goto error; - l = path->nodes[0]; - btrfs_item_key_to_cpu(l, &key, path->slots[0]); - while (1) { - l = path->nodes[0]; - slot = path->slots[0]; - if (slot >= btrfs_header_nritems(l)) { - ret = btrfs_next_leaf(root, path); - if (ret == 0) - continue; - if (ret < 0) - goto error; - search_start = max(search_start, - block_group->key.objectid); - if (!start_found) { - aligned = stripe_align(root, search_start); - ins->objectid = aligned; - if (aligned >= search_end) { - ret = -ENOSPC; - goto error; - } - ins->offset = search_end - aligned; - start_found = 1; - goto check_pending; - } - ins->objectid = stripe_align(root, - last_byte > search_start ? - last_byte : search_start); - if (search_end <= ins->objectid) { - ret = -ENOSPC; - goto error; - } - ins->offset = search_end - ins->objectid; - BUG_ON(ins->objectid >= search_end); - goto check_pending; - } - btrfs_item_key_to_cpu(l, &key, slot); - - if (key.objectid >= search_start && key.objectid > last_byte && - start_found) { - if (last_byte < search_start) - last_byte = search_start; - aligned = stripe_align(root, last_byte); - hole_size = key.objectid - aligned; - if (key.objectid > aligned && hole_size >= num_bytes) { - ins->objectid = aligned; - ins->offset = hole_size; - goto check_pending; - } - } - if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) { - if (!start_found && btrfs_key_type(&key) == - BTRFS_BLOCK_GROUP_ITEM_KEY) { - last_byte = key.objectid; - start_found = 1; - } - goto next; - } - - - start_found = 1; - last_byte = key.objectid + key.offset; - - if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED && - last_byte >= block_group->key.objectid + - block_group->key.offset) { - btrfs_release_path(root, path); - search_start = block_group->key.objectid + - block_group->key.offset; - goto new_group; - } -next: - path->slots[0]++; - cond_resched(); - } -check_pending: - /* we have to make sure we didn't find an extent that has already - * been allocated by the map tree or the original allocation - */ - btrfs_release_path(root, path); - BUG_ON(ins->objectid < search_start); + search_start = stripe_align(root, search_start); + ins->objectid = search_start; + ins->offset = num_bytes; if (ins->objectid + num_bytes >= search_end) goto enospc; - if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED && - ins->objectid + num_bytes > block_group-> - key.objectid + block_group->key.offset) { + + if (ins->objectid + num_bytes > + block_group->key.objectid + block_group->key.offset) { search_start = block_group->key.objectid + block_group->key.offset; goto new_group; } + if (test_range_bit(&info->extent_ins, ins->objectid, ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) { search_start = ins->objectid + num_bytes; goto new_group; } + if (test_range_bit(&info->pinned_extents, ins->objectid, ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) { search_start = ins->objectid + num_bytes; goto new_group; } + if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start && ins->objectid < exclude_start + exclude_nr)) { search_start = exclude_start + exclude_nr; goto new_group; } - if (!data) { + + if (!(data & BLOCK_GROUP_DATA)) { block_group = btrfs_lookup_block_group(info, ins->objectid); if (block_group) trans->block_group = block_group; } ins->offset = num_bytes; - btrfs_free_path(path); return 0; new_group: @@ -1646,7 +1492,6 @@ enospc: if (!full_scan) total_needed -= empty_size; full_scan = 1; - data = BTRFS_BLOCK_GROUP_MIXED; } else wrapped = 1; } @@ -1657,8 +1502,6 @@ enospc: goto check_failed; error: - btrfs_release_path(root, path); - btrfs_free_path(path); return ret; } /* @@ -1689,6 +1532,13 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_key keys[2]; + if (data) + data = BLOCK_GROUP_DATA; + else if (root == root->fs_info->chunk_root) + data = BLOCK_GROUP_SYSTEM; + else + data = BLOCK_GROUP_METADATA; + new_hint = max(hint_byte, root->fs_info->alloc_start); if (new_hint < btrfs_super_total_bytes(&info->super_copy)) hint_byte = new_hint; @@ -1718,7 +1568,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, set_extent_bits(&root->fs_info->extent_ins, ins->objectid, ins->objectid + ins->offset - 1, EXTENT_LOCKED, GFP_NOFS); - WARN_ON(data == 1); goto update_block; } @@ -1768,8 +1617,7 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, } update_block: - ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0, - data); + ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0); if (ret) { printk("update block group failed for %Lu %Lu\n", ins->objectid, ins->offset); @@ -2457,7 +2305,7 @@ again: if (ret < 0) goto out; - ret = find_previous_extent(root, path); + ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY); if (ret < 0) goto out; if (ret == 0) { @@ -2604,95 +2452,48 @@ out: int btrfs_grow_extent_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 new_size) { - struct btrfs_path *path; - u64 nr = 0; - u64 cur_byte; - u64 old_size; - unsigned long rem; - struct btrfs_block_group_cache *cache; - struct btrfs_block_group_item *item; - struct btrfs_fs_info *info = root->fs_info; - struct extent_io_tree *block_group_cache; - struct btrfs_key key; - struct extent_buffer *leaf; - int ret; - int bit; - - old_size = btrfs_super_total_bytes(&info->super_copy); - block_group_cache = &info->block_group_cache; - - root = info->extent_root; - - cache = btrfs_lookup_block_group(root->fs_info, old_size - 1); - - cur_byte = cache->key.objectid + cache->key.offset; - if (cur_byte >= new_size) - goto set_size; - - key.offset = BTRFS_BLOCK_GROUP_SIZE; - btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); + btrfs_set_super_total_bytes(&root->fs_info->super_copy, new_size); + return 0; +} - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; +int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *key) +{ + int ret; + struct btrfs_key found_key; + struct extent_buffer *leaf; + int slot; - while(cur_byte < new_size) { - key.objectid = cur_byte; - ret = btrfs_insert_empty_item(trans, root, path, &key, - sizeof(struct btrfs_block_group_item)); - BUG_ON(ret); + ret = btrfs_search_slot(NULL, root, key, path, 0, 0); + if (ret < 0) + return ret; + while(1) { + slot = path->slots[0]; leaf = path->nodes[0]; - item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_block_group_item); - - btrfs_set_disk_block_group_used(leaf, item, 0); - div_long_long_rem(nr, 3, &rem); - if (rem) { - btrfs_set_disk_block_group_flags(leaf, item, - BTRFS_BLOCK_GROUP_DATA); - } else { - btrfs_set_disk_block_group_flags(leaf, item, 0); - } - nr++; - - cache = kmalloc(sizeof(*cache), GFP_NOFS); - BUG_ON(!cache); - - read_extent_buffer(leaf, &cache->item, (unsigned long)item, - sizeof(cache->item)); - - memcpy(&cache->key, &key, sizeof(key)); - cache->cached = 0; - cache->pinned = 0; - cur_byte = key.objectid + key.offset; - btrfs_release_path(root, path); - - if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) { - bit = BLOCK_GROUP_DATA; - cache->data = BTRFS_BLOCK_GROUP_DATA; - } else { - bit = BLOCK_GROUP_METADATA; - cache->data = 0; + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto error; + break; } + btrfs_item_key_to_cpu(leaf, &found_key, slot); - /* use EXTENT_LOCKED to prevent merging */ - set_extent_bits(block_group_cache, key.objectid, - key.objectid + key.offset - 1, - bit | EXTENT_LOCKED, GFP_NOFS); - set_state_private(block_group_cache, key.objectid, - (unsigned long)cache); + if (found_key.objectid >= key->objectid && + found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) + return 0; + path->slots[0]++; } - btrfs_free_path(path); -set_size: - btrfs_set_super_total_bytes(&info->super_copy, new_size); - return 0; + ret = -ENOENT; +error: + return ret; } int btrfs_read_block_groups(struct btrfs_root *root) { struct btrfs_path *path; int ret; - int err = 0; int bit; struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; @@ -2702,28 +2503,28 @@ int btrfs_read_block_groups(struct btrfs_root *root) struct extent_buffer *leaf; block_group_cache = &info->block_group_cache; - root = info->extent_root; key.objectid = 0; - key.offset = BTRFS_BLOCK_GROUP_SIZE; + key.offset = 0; btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY); - path = btrfs_alloc_path(); if (!path) return -ENOMEM; while(1) { - ret = btrfs_search_slot(NULL, info->extent_root, - &key, path, 0, 0); - if (ret != 0) { - err = ret; - break; + ret = find_first_block_group(root, path, &key); + if (ret > 0) { + ret = 0; + goto error; } + if (ret != 0) + goto error; + leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); cache = kmalloc(sizeof(*cache), GFP_NOFS); if (!cache) { - err = -1; + ret = -ENOMEM; break; } @@ -2733,18 +2534,17 @@ int btrfs_read_block_groups(struct btrfs_root *root) memcpy(&cache->key, &found_key, sizeof(found_key)); cache->cached = 0; cache->pinned = 0; + key.objectid = found_key.objectid + found_key.offset; btrfs_release_path(root, path); - - if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) { - bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA; - cache->data = BTRFS_BLOCK_GROUP_MIXED; - } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) { + cache->flags = btrfs_block_group_flags(&cache->item); + bit = 0; + if (cache->flags & BTRFS_BLOCK_GROUP_DATA) { bit = BLOCK_GROUP_DATA; - cache->data = BTRFS_BLOCK_GROUP_DATA; - } else { + } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { + bit = BLOCK_GROUP_SYSTEM; + } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) { bit = BLOCK_GROUP_METADATA; - cache->data = 0; } /* use EXTENT_LOCKED to prevent merging */ @@ -2758,7 +2558,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) btrfs_super_total_bytes(&info->super_copy)) break; } - + ret = 0; +error: btrfs_free_path(path); - return 0; + return ret; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e8130c87633..7e3a1ebde9f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1706,9 +1706,9 @@ static int submit_one_bio(int rw, struct bio *bio) WARN_ON(1); } if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(rw, bio); - - submit_bio(rw, bio); + tree->ops->submit_bio_hook(page->mapping->host, rw, bio); + else + submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) ret = -EOPNOTSUPP; bio_put(bio); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9d665466708..0dca89328f9 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -28,7 +28,7 @@ struct extent_state; struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); - int (*submit_bio_hook)(int rw, struct bio *bio); + int (*submit_bio_hook)(struct inode *inode, int rw, struct bio *bio); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 315dd550d56..17063cd2cb7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -40,6 +40,7 @@ #include "btrfs_inode.h" #include "ioctl.h" #include "print-tree.h" +#include "volumes.h" struct btrfs_iget_args { u64 ino; @@ -295,20 +296,20 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } -int btrfs_submit_bio_hook(int rw, struct bio *bio) +int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) { - struct bio_vec *bvec = bio->bi_io_vec; - struct inode *inode = bvec->bv_page->mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; int ret = 0; - if (rw != WRITE) - return 0; + if (rw != WRITE) { + goto mapit; + } if (btrfs_test_opt(root, NODATASUM) || - btrfs_test_flag(inode, NODATASUM)) - return 0; + btrfs_test_flag(inode, NODATASUM)) { + goto mapit; + } mutex_lock(&root->fs_info->fs_mutex); trans = btrfs_start_transaction(root, 1); @@ -317,7 +318,8 @@ int btrfs_submit_bio_hook(int rw, struct bio *bio) ret = btrfs_end_transaction(trans, root); BUG_ON(ret); mutex_unlock(&root->fs_info->fs_mutex); - return ret; +mapit: + return btrfs_map_bio(root, rw, bio); } int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) @@ -406,7 +408,7 @@ void btrfs_read_locked_inode(struct inode *inode) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_inode_item *inode_item; - struct btrfs_inode_timespec *tspec; + struct btrfs_timespec *tspec; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; u64 alloc_group_block; @@ -455,7 +457,8 @@ void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); if (!BTRFS_I(inode)->block_group) { BTRFS_I(inode)->block_group = btrfs_find_block_group(root, - NULL, 0, 0, 0); + NULL, 0, + BTRFS_BLOCK_GROUP_METADATA, 0); } btrfs_free_path(path); inode_item = NULL; @@ -1550,7 +1553,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, owner = 0; else owner = 1; - group = btrfs_find_block_group(root, group, 0, 0, owner); + group = btrfs_find_block_group(root, group, 0, + BTRFS_BLOCK_GROUP_METADATA, owner); BTRFS_I(inode)->block_group = group; BTRFS_I(inode)->flags = 0; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index da0b4dcf361..9c1335dad40 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -20,6 +20,40 @@ #include "disk-io.h" #include "print-tree.h" +static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) +{ + int num_stripes = btrfs_chunk_num_stripes(eb, chunk); + int i; + printk("\t\tchunk owner %llu type %llu num_stripes %d\n", + (unsigned long long)btrfs_chunk_owner(eb, chunk), + (unsigned long long)btrfs_chunk_type(eb, chunk), + num_stripes); + for (i = 0 ; i < num_stripes ; i++) { + printk("\t\t\tstripe %d devid %llu offset %llu\n", i, + (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i), + (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i)); + } +} +static void print_dev_item(struct extent_buffer *eb, + struct btrfs_dev_item *dev_item) +{ + char *name; + int name_len; + + name_len = btrfs_device_name_len(eb, dev_item); + name = kmalloc(name_len, GFP_NOFS); + if (name) { + read_extent_buffer(eb, name, + (unsigned long)btrfs_device_name(dev_item), + name_len); + } + printk("\t\tdev item name %.*s devid %llu " + "total_bytes %llu bytes used %Lu\n", name_len, name, + (unsigned long long)btrfs_device_id(eb, dev_item), + (unsigned long long)btrfs_device_total_bytes(eb, dev_item), + (unsigned long long)btrfs_device_bytes_used(eb, dev_item)); + kfree(name); +} void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) { int i; @@ -34,6 +68,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) struct btrfs_key key; struct btrfs_key found_key; struct btrfs_extent_ref *ref; + struct btrfs_dev_extent *dev_extent; u32 type; printk("leaf %llu total ptrs %d free space %d\n", @@ -106,6 +141,19 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) printk("\t\tblock group used %llu\n", (unsigned long long)btrfs_disk_block_group_used(l, bi)); break; + case BTRFS_CHUNK_ITEM_KEY: + print_chunk(l, btrfs_item_ptr(l, i, struct btrfs_chunk)); + break; + case BTRFS_DEV_ITEM_KEY: + print_dev_item(l, btrfs_item_ptr(l, i, + struct btrfs_dev_item)); + break; + case BTRFS_DEV_EXTENT_KEY: + dev_extent = btrfs_item_ptr(l, i, + struct btrfs_dev_extent); + printk("\t\tdev extent owner %llu length %llu\n", + (unsigned long long)btrfs_dev_extent_owner(l, dev_extent), + (unsigned long long)btrfs_dev_extent_length(l, dev_extent)); }; } } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e9a0983897f..5e9f69244f9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -198,29 +198,42 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, return werr; } -int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +static int update_cowonly_root(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { int ret; - u64 old_extent_block; - struct btrfs_fs_info *fs_info = root->fs_info; - struct btrfs_root *tree_root = fs_info->tree_root; - struct btrfs_root *extent_root = fs_info->extent_root; + u64 old_root_bytenr; + struct btrfs_root *tree_root = root->fs_info->tree_root; - btrfs_write_dirty_block_groups(trans, extent_root); + btrfs_write_dirty_block_groups(trans, root); while(1) { - old_extent_block = btrfs_root_bytenr(&extent_root->root_item); - if (old_extent_block == extent_root->node->start) + old_root_bytenr = btrfs_root_bytenr(&root->root_item); + if (old_root_bytenr == root->node->start) break; - btrfs_set_root_bytenr(&extent_root->root_item, - extent_root->node->start); - btrfs_set_root_level(&extent_root->root_item, - btrfs_header_level(extent_root->node)); + btrfs_set_root_bytenr(&root->root_item, + root->node->start); + btrfs_set_root_level(&root->root_item, + btrfs_header_level(root->node)); ret = btrfs_update_root(trans, tree_root, - &extent_root->root_key, - &extent_root->root_item); + &root->root_key, + &root->root_item); BUG_ON(ret); - btrfs_write_dirty_block_groups(trans, extent_root); + btrfs_write_dirty_block_groups(trans, root); + } + return 0; +} + +int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct list_head *next; + + while(!list_empty(&fs_info->dirty_cowonly_roots)) { + next = fs_info->dirty_cowonly_roots.next; + list_del_init(next); + root = list_entry(next, struct btrfs_root, dirty_list); + update_cowonly_root(trans, root); } return 0; } @@ -616,6 +629,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, unsigned long timeout = 1; struct btrfs_transaction *cur_trans; struct btrfs_transaction *prev_trans = NULL; + struct btrfs_root *chunk_root = root->fs_info->chunk_root; struct list_head dirty_fs_roots; struct extent_io_tree *pinned_copy; DEFINE_WAIT(wait); @@ -714,6 +728,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_set_super_root_level(&root->fs_info->super_copy, btrfs_header_level(root->fs_info->tree_root->node)); + btrfs_set_super_chunk_root(&root->fs_info->super_copy, + chunk_root->node->start); + btrfs_set_super_chunk_root_level(&root->fs_info->super_copy, + btrfs_header_level(chunk_root->node)); write_extent_buffer(root->fs_info->sb_buffer, &root->fs_info->super_copy, 0, sizeof(root->fs_info->super_copy)); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c new file mode 100644 index 00000000000..90a8d45dc6d --- /dev/null +++ b/fs/btrfs/volumes.c @@ -0,0 +1,852 @@ +/* + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ +#include +#include +#include "ctree.h" +#include "extent_map.h" +#include "disk-io.h" +#include "transaction.h" +#include "print-tree.h" +#include "volumes.h" + +struct map_lookup { + struct btrfs_device *dev; + u64 physical; +}; + +/* + * this uses a pretty simple search, the expectation is that it is + * called very infrequently and that a given device has a small number + * of extents + */ +static int find_free_dev_extent(struct btrfs_trans_handle *trans, + struct btrfs_device *device, + struct btrfs_path *path, + u64 num_bytes, u64 *start) +{ + struct btrfs_key key; + struct btrfs_root *root = device->dev_root; + struct btrfs_dev_extent *dev_extent = NULL; + u64 hole_size = 0; + u64 last_byte = 0; + u64 search_start = 0; + u64 search_end = device->total_bytes; + int ret; + int slot = 0; + int start_found; + struct extent_buffer *l; + + start_found = 0; + path->reada = 2; + + /* FIXME use last free of some kind */ + + key.objectid = device->devid; + key.offset = search_start; + key.type = BTRFS_DEV_EXTENT_KEY; + ret = btrfs_search_slot(trans, root, &key, path, 0, 0); + if (ret < 0) + goto error; + ret = btrfs_previous_item(root, path, 0, key.type); + if (ret < 0) + goto error; + l = path->nodes[0]; + btrfs_item_key_to_cpu(l, &key, path->slots[0]); + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto error; +no_more_items: + if (!start_found) { + if (search_start >= search_end) { + ret = -ENOSPC; + goto error; + } + *start = search_start; + start_found = 1; + goto check_pending; + } + *start = last_byte > search_start ? + last_byte : search_start; + if (search_end <= *start) { + ret = -ENOSPC; + goto error; + } + goto check_pending; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid < device->devid) + goto next; + + if (key.objectid > device->devid) + goto no_more_items; + + if (key.offset >= search_start && key.offset > last_byte && + start_found) { + if (last_byte < search_start) + last_byte = search_start; + hole_size = key.offset - last_byte; + if (key.offset > last_byte && + hole_size >= num_bytes) { + *start = last_byte; + goto check_pending; + } + } + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) { + goto next; + } + + start_found = 1; + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); +next: + path->slots[0]++; + cond_resched(); + } +check_pending: + /* we have to make sure we didn't find an extent that has already + * been allocated by the map tree or the original allocation + */ + btrfs_release_path(root, path); + BUG_ON(*start < search_start); + + if (*start + num_bytes >= search_end) { + ret = -ENOSPC; + goto error; + } + /* check for pending inserts here */ + return 0; + +error: + btrfs_release_path(root, path); + return ret; +} + +int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, + struct btrfs_device *device, + u64 owner, u64 num_bytes, u64 *start) +{ + int ret; + struct btrfs_path *path; + struct btrfs_root *root = device->dev_root; + struct btrfs_dev_extent *extent; + struct extent_buffer *leaf; + struct btrfs_key key; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = find_free_dev_extent(trans, device, path, num_bytes, start); + if (ret) + goto err; + + key.objectid = device->devid; + key.offset = *start; + key.type = BTRFS_DEV_EXTENT_KEY; + ret = btrfs_insert_empty_item(trans, root, path, &key, + sizeof(*extent)); + BUG_ON(ret); + + leaf = path->nodes[0]; + extent = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_dev_extent); + btrfs_set_dev_extent_owner(leaf, extent, owner); + btrfs_set_dev_extent_length(leaf, extent, num_bytes); + btrfs_mark_buffer_dirty(leaf); +err: + btrfs_free_path(path); + return ret; +} + +static int find_next_chunk(struct btrfs_root *root, u64 *objectid) +{ + struct btrfs_path *path; + int ret; + struct btrfs_key key; + struct btrfs_key found_key; + + path = btrfs_alloc_path(); + BUG_ON(!path); + + key.objectid = (u64)-1; + key.offset = (u64)-1; + key.type = BTRFS_CHUNK_ITEM_KEY; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto error; + + BUG_ON(ret == 0); + + ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY); + if (ret) { + *objectid = 0; + } else { + btrfs_item_key_to_cpu(path->nodes[0], &found_key, + path->slots[0]); + *objectid = found_key.objectid + found_key.offset; + } + ret = 0; +error: + btrfs_free_path(path); + return ret; +} + +static struct btrfs_device *next_device(struct list_head *head, + struct list_head *last) +{ + struct list_head *next = last->next; + struct btrfs_device *dev; + + if (list_empty(head)) + return NULL; + + if (next == head) + next = next->next; + + dev = list_entry(next, struct btrfs_device, dev_list); + return dev; +} + +static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, + u64 *objectid) +{ + int ret; + struct btrfs_key key; + struct btrfs_key found_key; + + key.objectid = BTRFS_DEV_ITEMS_OBJECTID; + key.type = BTRFS_DEV_ITEM_KEY; + key.offset = (u64)-1; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto error; + + BUG_ON(ret == 0); + + ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID, + BTRFS_DEV_ITEM_KEY); + if (ret) { + *objectid = 1; + } else { + btrfs_item_key_to_cpu(path->nodes[0], &found_key, + path->slots[0]); + *objectid = found_key.offset + 1; + } + ret = 0; +error: + btrfs_release_path(root, path); + return ret; +} + +/* + * the device information is stored in the chunk root + * the btrfs_device struct should be fully filled in + */ +int btrfs_add_device(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_device *device) +{ + int ret; + struct btrfs_path *path; + struct btrfs_dev_item *dev_item; + struct extent_buffer *leaf; + struct btrfs_key key; + unsigned long ptr; + u64 free_devid; + + root = root->fs_info->chunk_root; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = find_next_devid(root, path, &free_devid); + if (ret) + goto out; + + key.objectid = BTRFS_DEV_ITEMS_OBJECTID; + key.type = BTRFS_DEV_ITEM_KEY; + key.offset = free_devid; + + ret = btrfs_insert_empty_item(trans, root, path, &key, + sizeof(*dev_item) + device->name_len); + if (ret) + goto out; + + leaf = path->nodes[0]; + dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); + + btrfs_set_device_id(leaf, dev_item, device->devid); + btrfs_set_device_type(leaf, dev_item, device->type); + btrfs_set_device_io_align(leaf, dev_item, device->io_align); + btrfs_set_device_io_width(leaf, dev_item, device->io_width); + btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); + btrfs_set_device_rdev(leaf, dev_item, device->rdev); + btrfs_set_device_partition(leaf, dev_item, device->partition); + btrfs_set_device_name_len(leaf, dev_item, device->name_len); + btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); + btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); + + ptr = (unsigned long)btrfs_device_name(dev_item); + write_extent_buffer(leaf, device->name, ptr, device->name_len); + + ptr = (unsigned long)btrfs_device_uuid(dev_item); + write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE); + btrfs_mark_buffer_dirty(leaf); + ret = 0; + +out: + btrfs_free_path(path); + return ret; +} +int btrfs_update_device(struct btrfs_trans_handle *trans, + struct btrfs_device *device) +{ + int ret; + struct btrfs_path *path; + struct btrfs_root *root; + struct btrfs_dev_item *dev_item; + struct extent_buffer *leaf; + struct btrfs_key key; + + root = device->dev_root->fs_info->chunk_root; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + key.objectid = BTRFS_DEV_ITEMS_OBJECTID; + key.type = BTRFS_DEV_ITEM_KEY; + key.offset = device->devid; + + ret = btrfs_search_slot(trans, root, &key, path, 0, 1); + if (ret < 0) + goto out; + + if (ret > 0) { + ret = -ENOENT; + goto out; + } + + leaf = path->nodes[0]; + dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item); + + btrfs_set_device_id(leaf, dev_item, device->devid); + btrfs_set_device_type(leaf, dev_item, device->type); + btrfs_set_device_io_align(leaf, dev_item, device->io_align); + btrfs_set_device_io_width(leaf, dev_item, device->io_width); + btrfs_set_device_sector_size(leaf, dev_item, device->sector_size); + btrfs_set_device_rdev(leaf, dev_item, device->rdev); + btrfs_set_device_partition(leaf, dev_item, device->partition); + btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes); + btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used); + btrfs_mark_buffer_dirty(leaf); + +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_key *key, + struct btrfs_chunk *chunk, int item_size) +{ + struct btrfs_super_block *super_copy = &root->fs_info->super_copy; + struct btrfs_disk_key disk_key; + u32 array_size; + u8 *ptr; + + array_size = btrfs_super_sys_array_size(super_copy); + if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) + return -EFBIG; + + ptr = super_copy->sys_chunk_array + array_size; + btrfs_cpu_key_to_disk(&disk_key, key); + memcpy(ptr, &disk_key, sizeof(disk_key)); + ptr += sizeof(disk_key); + memcpy(ptr, chunk, item_size); + item_size += sizeof(disk_key); + btrfs_set_super_sys_array_size(super_copy, array_size + item_size); + return 0; +} + +int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, u64 *start, + u64 *num_bytes, u32 type) +{ + u64 dev_offset; + struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root; + struct btrfs_stripe *stripes; + struct btrfs_device *device = NULL; + struct btrfs_chunk *chunk; + struct list_head *dev_list = &extent_root->fs_info->devices; + struct list_head *last_dev = extent_root->fs_info->last_device; + struct extent_map_tree *em_tree; + struct map_lookup *map; + struct extent_map *em; + u64 physical; + u64 calc_size = 1024 * 1024 * 1024; + int num_stripes; + int ret; + int index = 0; + struct btrfs_key key; + + + ret = find_next_chunk(chunk_root, &key.objectid); + if (ret) + return ret; + + num_stripes = 1; + chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS); + if (!chunk) + return -ENOMEM; + + stripes = &chunk->stripe; + + *num_bytes = calc_size; + while(index < num_stripes) { + device = next_device(dev_list, last_dev); + BUG_ON(!device); + last_dev = &device->dev_list; + extent_root->fs_info->last_device = last_dev; + + ret = btrfs_alloc_dev_extent(trans, device, + key.objectid, + calc_size, &dev_offset); + BUG_ON(ret); + + device->bytes_used += calc_size; + ret = btrfs_update_device(trans, device); + BUG_ON(ret); + + btrfs_set_stack_stripe_devid(stripes + index, device->devid); + btrfs_set_stack_stripe_offset(stripes + index, dev_offset); + physical = dev_offset; + index++; + } + + /* key.objectid was set above */ + key.offset = *num_bytes; + key.type = BTRFS_CHUNK_ITEM_KEY; + btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid); + btrfs_set_stack_chunk_stripe_len(chunk, 64 * 1024); + btrfs_set_stack_chunk_type(chunk, type); + btrfs_set_stack_chunk_num_stripes(chunk, num_stripes); + btrfs_set_stack_chunk_io_align(chunk, extent_root->sectorsize); + btrfs_set_stack_chunk_io_width(chunk, extent_root->sectorsize); + btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize); + + ret = btrfs_insert_item(trans, chunk_root, &key, chunk, + btrfs_chunk_item_size(num_stripes)); + BUG_ON(ret); + *start = key.objectid; + + em = alloc_extent_map(GFP_NOFS); + if (!em) + return -ENOMEM; + map = kmalloc(sizeof(*map), GFP_NOFS); + if (!map) { + free_extent_map(em); + return -ENOMEM; + } + + em->bdev = (struct block_device *)map; + em->start = key.objectid; + em->len = key.offset; + em->block_start = 0; + + map->physical = physical; + map->dev = device; + + if (!map->dev) { + kfree(map); + free_extent_map(em); + return -EIO; + } + kfree(chunk); + + em_tree = &extent_root->fs_info->mapping_tree.map_tree; + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + BUG_ON(ret); + spin_unlock(&em_tree->lock); + free_extent_map(em); + return ret; +} + +void btrfs_mapping_init(struct btrfs_mapping_tree *tree) +{ + extent_map_tree_init(&tree->map_tree, GFP_NOFS); +} + +void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) +{ + struct extent_map *em; + + while(1) { + spin_lock(&tree->map_tree.lock); + em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); + if (em) + remove_extent_mapping(&tree->map_tree, em); + spin_unlock(&tree->map_tree.lock); + if (!em) + break; + kfree(em->bdev); + /* once for us */ + free_extent_map(em); + /* once for the tree */ + free_extent_map(em); + } +} + +int btrfs_map_block(struct btrfs_mapping_tree *map_tree, + u64 logical, u64 *phys, u64 *length, + struct btrfs_device **dev) +{ + struct extent_map *em; + struct map_lookup *map; + struct extent_map_tree *em_tree = &map_tree->map_tree; + u64 offset; + + + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, logical, *length); + BUG_ON(!em); + + BUG_ON(em->start > logical || em->start + em->len < logical); + map = (struct map_lookup *)em->bdev; + offset = logical - em->start; + *phys = map->physical + offset; + *length = em->len - offset; + *dev = map->dev; + free_extent_map(em); + spin_unlock(&em_tree->lock); + return 0; +} + +int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio) +{ + struct btrfs_mapping_tree *map_tree; + struct btrfs_device *dev; + u64 logical = bio->bi_sector << 9; + u64 physical; + u64 length = 0; + u64 map_length; + struct bio_vec *bvec; + int i; + int ret; + + bio_for_each_segment(bvec, bio, i) { + length += bvec->bv_len; + } + map_tree = &root->fs_info->mapping_tree; + map_length = length; + ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev); + BUG_ON(map_length < length); + bio->bi_sector = physical >> 9; + bio->bi_bdev = dev->bdev; + submit_bio(rw, bio); + return 0; +} + +struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid) +{ + struct btrfs_device *dev; + struct list_head *cur = root->fs_info->devices.next; + struct list_head *head = &root->fs_info->devices; + + while(cur != head) { + dev = list_entry(cur, struct btrfs_device, dev_list); + if (dev->devid == devid) + return dev; + cur = cur->next; + } + return NULL; +} + +static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, + struct extent_buffer *leaf, + struct btrfs_chunk *chunk) +{ + struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; + struct map_lookup *map; + struct extent_map *em; + u64 logical; + u64 length; + u64 devid; + int ret; + + logical = key->objectid; + length = key->offset; + spin_lock(&map_tree->map_tree.lock); + em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); + + /* already mapped? */ + if (em && em->start <= logical && em->start + em->len > logical) { + free_extent_map(em); + spin_unlock(&map_tree->map_tree.lock); + return 0; + } else if (em) { + free_extent_map(em); + } + spin_unlock(&map_tree->map_tree.lock); + + map = kzalloc(sizeof(*map), GFP_NOFS); + if (!map) + return -ENOMEM; + + em = alloc_extent_map(GFP_NOFS); + if (!em) + return -ENOMEM; + map = kmalloc(sizeof(*map), GFP_NOFS); + if (!map) { + free_extent_map(em); + return -ENOMEM; + } + + em->bdev = (struct block_device *)map; + em->start = logical; + em->len = length; + em->block_start = 0; + + map->physical = btrfs_stripe_offset_nr(leaf, chunk, 0); + devid = btrfs_stripe_devid_nr(leaf, chunk, 0); + map->dev = btrfs_find_device(root, devid); + if (!map->dev) { + kfree(map); + free_extent_map(em); + return -EIO; + } + + spin_lock(&map_tree->map_tree.lock); + ret = add_extent_mapping(&map_tree->map_tree, em); + BUG_ON(ret); + spin_unlock(&map_tree->map_tree.lock); + free_extent_map(em); + + return 0; +} + +static int fill_device_from_item(struct extent_buffer *leaf, + struct btrfs_dev_item *dev_item, + struct btrfs_device *device) +{ + unsigned long ptr; + char *name; + + device->devid = btrfs_device_id(leaf, dev_item); + device->total_bytes = btrfs_device_total_bytes(leaf, dev_item); + device->bytes_used = btrfs_device_bytes_used(leaf, dev_item); + device->type = btrfs_device_type(leaf, dev_item); + device->io_align = btrfs_device_io_align(leaf, dev_item); + device->io_width = btrfs_device_io_width(leaf, dev_item); + device->sector_size = btrfs_device_sector_size(leaf, dev_item); + device->rdev = btrfs_device_rdev(leaf, dev_item); + device->partition = btrfs_device_partition(leaf, dev_item); + device->name_len = btrfs_device_name_len(leaf, dev_item); + + ptr = (unsigned long)btrfs_device_uuid(dev_item); + read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE); + + name = kmalloc(device->name_len + 1, GFP_NOFS); + if (!name) + return -ENOMEM; + device->name = name; + ptr = (unsigned long)btrfs_device_name(dev_item); + read_extent_buffer(leaf, name, ptr, device->name_len); + name[device->name_len] = '\0'; + return 0; +} + +static int read_one_dev(struct btrfs_root *root, struct btrfs_key *key, + struct extent_buffer *leaf, + struct btrfs_dev_item *dev_item) +{ + struct btrfs_device *device; + u64 devid; + int ret; + + devid = btrfs_device_id(leaf, dev_item); + if (btrfs_find_device(root, devid)) + return 0; + + device = kmalloc(sizeof(*device), GFP_NOFS); + if (!device) + return -ENOMEM; + + fill_device_from_item(leaf, dev_item, device); + device->dev_root = root->fs_info->dev_root; + device->bdev = root->fs_info->sb->s_bdev; + list_add(&device->dev_list, &root->fs_info->devices); + memcpy(&device->dev_key, key, sizeof(*key)); + ret = 0; +#if 0 + ret = btrfs_open_device(device); + if (ret) { + kfree(device); + } +#endif + return ret; +} + +int btrfs_read_sys_array(struct btrfs_root *root) +{ + struct btrfs_super_block *super_copy = &root->fs_info->super_copy; + struct extent_buffer *sb = root->fs_info->sb_buffer; + struct btrfs_disk_key *disk_key; + struct btrfs_dev_item *dev_item; + struct btrfs_chunk *chunk; + struct btrfs_key key; + u32 num_stripes; + u32 array_size; + u32 len = 0; + u8 *ptr; + unsigned long sb_ptr; + u32 cur; + int ret; + int dev_only = 1; + + array_size = btrfs_super_sys_array_size(super_copy); + + /* + * we do this loop twice, once for the device items and + * once for all of the chunks. This way there are device + * structs filled in for every chunk + */ +again: + ptr = super_copy->sys_chunk_array; + sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); + cur = 0; + + while (cur < array_size) { + disk_key = (struct btrfs_disk_key *)ptr; + btrfs_disk_key_to_cpu(&key, disk_key); + + len = sizeof(*disk_key); + ptr += len; + sb_ptr += len; + cur += len; + + if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID && + key.type == BTRFS_DEV_ITEM_KEY) { + dev_item = (struct btrfs_dev_item *)sb_ptr; + if (dev_only) { + ret = read_one_dev(root, &key, sb, dev_item); + BUG_ON(ret); + } + len = sizeof(*dev_item); + len += btrfs_device_name_len(sb, dev_item); + } else if (key.type == BTRFS_CHUNK_ITEM_KEY) { + + chunk = (struct btrfs_chunk *)sb_ptr; + if (!dev_only) { + ret = read_one_chunk(root, &key, sb, chunk); + BUG_ON(ret); + } + num_stripes = btrfs_chunk_num_stripes(sb, chunk); + len = btrfs_chunk_item_size(num_stripes); + } else { + BUG(); + } + ptr += len; + sb_ptr += len; + cur += len; + } + if (dev_only == 1) { + dev_only = 0; + goto again; + } + return 0; +} + +int btrfs_read_chunk_tree(struct btrfs_root *root) +{ + struct btrfs_path *path; + struct extent_buffer *leaf; + struct btrfs_key key; + struct btrfs_key found_key; + int ret; + int slot; + + root = root->fs_info->chunk_root; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* first we search for all of the device items, and then we + * read in all of the chunk items. This way we can create chunk + * mappings that reference all of the devices that are afound + */ + key.objectid = BTRFS_DEV_ITEMS_OBJECTID; + key.offset = 0; + key.type = 0; +again: + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + while(1) { + leaf = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto error; + break; + } + btrfs_item_key_to_cpu(leaf, &found_key, slot); + if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { + if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID) + break; + if (found_key.type == BTRFS_DEV_ITEM_KEY) { + struct btrfs_dev_item *dev_item; + dev_item = btrfs_item_ptr(leaf, slot, + struct btrfs_dev_item); + ret = read_one_dev(root, &found_key, leaf, + dev_item); + BUG_ON(ret); + } + } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { + struct btrfs_chunk *chunk; + chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); + ret = read_one_chunk(root, &found_key, leaf, chunk); + } + path->slots[0]++; + } + if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { + key.objectid = 0; + btrfs_release_path(root, path); + goto again; + } + + btrfs_free_path(path); + ret = 0; +error: + return ret; +} + diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h new file mode 100644 index 00000000000..4a47dcb00a7 --- /dev/null +++ b/fs/btrfs/volumes.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2007 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __BTRFS_VOLUMES_ +#define __BTRFS_VOLUMES_ +struct btrfs_device { + struct list_head dev_list; + struct btrfs_root *dev_root; + struct btrfs_key dev_key; + + struct block_device *bdev; + + /* the internal btrfs device id */ + u64 devid; + + /* size of the device */ + u64 total_bytes; + + /* bytes used */ + u64 bytes_used; + + /* optimal io alignment for this device */ + u32 io_align; + + /* optimal io width for this device */ + u32 io_width; + + /* minimal io size for this device */ + u32 sector_size; + + /* the kernel device number */ + u64 rdev; + + /* type and info about this device */ + u64 type; + + /* partition number, 0 for whole dev */ + int partition; + + /* length of the name data at the end of the item */ + int name_len; + + /* physical drive uuid (or lvm uuid) */ + u8 uuid[BTRFS_DEV_UUID_SIZE]; + + char *name; +}; + +int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, + struct btrfs_device *device, + u64 owner, u64 num_bytes, u64 *start); +int btrfs_map_block(struct btrfs_mapping_tree *map_tree, + u64 logical, u64 *phys, u64 *length, + struct btrfs_device **dev); +int btrfs_read_sys_array(struct btrfs_root *root); +int btrfs_read_chunk_tree(struct btrfs_root *root); +int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, u64 *start, + u64 *num_bytes, u32 type); +void btrfs_mapping_init(struct btrfs_mapping_tree *tree); +void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); +int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio); +#endif -- cgit v1.2.3-70-g09d2 From 239b14b32dc39232ebf9cce29ff77c4c564355fd Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 24 Mar 2008 15:02:07 -0400 Subject: Btrfs: Bring back mount -o ssd optimizations Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/disk-io.c | 2 ++ fs/btrfs/extent-tree.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/extent_io.c | 2 ++ fs/btrfs/extent_io.h | 2 ++ fs/btrfs/inode.c | 29 +++++++++++++++++++++++++ fs/btrfs/volumes.c | 5 +++++ fs/btrfs/volumes.h | 3 +++ 8 files changed, 103 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 96a49321786..acf22ad6115 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1405,6 +1405,9 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); /* inode.c */ +int btrfs_merge_bio_hook(struct page *page, unsigned long offset, + size_t size, struct bio *bio); + static inline void dec_i_blocks(struct inode *inode, u64 dec) { dec = dec >> 9; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 26185d46712..4890151cd68 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1103,4 +1103,6 @@ int btrfs_read_buffer(struct extent_buffer *buf) static struct extent_io_ops btree_extent_io_ops = { .writepage_io_hook = btree_writepage_io_hook, .submit_bio_hook = btree_submit_bio_hook, + /* note we're sharing with inode.c for the merge bio hook */ + .merge_bio_hook = btrfs_merge_bio_hook, }; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 14eb8fc8701..e9ef644ff56 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1473,13 +1473,31 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root * root = orig_root->fs_info->extent_root; struct btrfs_fs_info *info = root->fs_info; u64 total_needed = num_bytes; + u64 *last_ptr = NULL; struct btrfs_block_group_cache *block_group; int full_scan = 0; int wrapped = 0; + int empty_cluster = 2 * 1024 * 1024; WARN_ON(num_bytes < root->sectorsize); btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); + if (data & BTRFS_BLOCK_GROUP_METADATA) { + last_ptr = &root->fs_info->last_alloc; + } + + if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) { + last_ptr = &root->fs_info->last_data_alloc; + } + + if (last_ptr) { + if (*last_ptr) + hint_byte = *last_ptr; + else { + empty_size += empty_cluster; + } + } + if (search_end == (u64)-1) search_end = btrfs_super_total_bytes(&info->super_copy); @@ -1489,11 +1507,14 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, hint_byte = search_start; block_group = btrfs_find_block_group(root, block_group, hint_byte, data, 1); + if (last_ptr && *last_ptr == 0 && block_group) + hint_byte = block_group->key.objectid; } else { block_group = btrfs_find_block_group(root, trans->block_group, search_start, data, 1); } + search_start = max(search_start, hint_byte); total_needed += empty_size; @@ -1506,9 +1527,36 @@ check_failed: } ret = find_search_start(root, &block_group, &search_start, total_needed, data); + if (ret == -ENOSPC && last_ptr && *last_ptr) { + *last_ptr = 0; + block_group = btrfs_lookup_block_group(info, + orig_search_start); + search_start = orig_search_start; + ret = find_search_start(root, &block_group, &search_start, + total_needed, data); + } + if (ret == -ENOSPC) + goto enospc; if (ret) goto error; + if (last_ptr && *last_ptr && search_start != *last_ptr) { + *last_ptr = 0; + if (!empty_size) { + empty_size += empty_cluster; + total_needed += empty_size; + } + block_group = btrfs_lookup_block_group(info, + orig_search_start); + search_start = orig_search_start; + ret = find_search_start(root, &block_group, + &search_start, total_needed, data); + if (ret == -ENOSPC) + goto enospc; + if (ret) + goto error; + } + search_start = stripe_align(root, search_start); ins->objectid = search_start; ins->offset = num_bytes; @@ -1547,6 +1595,13 @@ check_failed: trans->block_group = block_group; } ins->offset = num_bytes; + if (last_ptr) { + *last_ptr = ins->objectid + ins->offset; + if (*last_ptr == + btrfs_super_total_bytes(&root->fs_info->super_copy)) { + *last_ptr = 0; + } + } return 0; new_group: @@ -1612,12 +1667,12 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, if (root->ref_cows) { if (data != BTRFS_BLOCK_GROUP_METADATA) { ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes, + 2 * 1024 * 1024, BTRFS_BLOCK_GROUP_METADATA); BUG_ON(ret); } ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes, data); + num_bytes + 2 * 1024 * 1024, data); BUG_ON(ret); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7e3a1ebde9f..6dab664529c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1730,6 +1730,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, if (bio_ret && *bio_ret) { bio = *bio_ret; if (bio->bi_sector + (bio->bi_size >> 9) != sector || + (tree->ops && tree->ops->merge_bio_hook && + tree->ops->merge_bio_hook(page, offset, size, bio)) || bio_add_page(bio, page, size, offset) < size) { ret = submit_one_bio(rw, bio); bio = NULL; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 0dca89328f9..8b5319db251 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -29,6 +29,8 @@ struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); int (*submit_bio_hook)(struct inode *inode, int rw, struct bio *bio); + int (*merge_bio_hook)(struct page *page, unsigned long offset, + size_t size, struct bio *bio); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 109576b57f6..5140d680184 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -296,6 +296,34 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +int btrfs_merge_bio_hook(struct page *page, unsigned long offset, + size_t size, struct bio *bio) +{ + struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; + struct btrfs_mapping_tree *map_tree; + struct btrfs_device *dev; + u64 logical = bio->bi_sector << 9; + u64 physical; + u64 length = 0; + u64 map_length; + struct bio_vec *bvec; + int i; + int ret; + + bio_for_each_segment(bvec, bio, i) { + length += bvec->bv_len; + } + map_tree = &root->fs_info->mapping_tree; + map_length = length; + ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev); + if (map_length < length + size) { + printk("merge bio hook logical %Lu bio len %Lu physical %Lu " + "len %Lu\n", logical, length, physical, map_length); + return 1; + } + return 0; +} + int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -3033,6 +3061,7 @@ static struct file_operations btrfs_dir_file_operations = { static struct extent_io_ops btrfs_extent_io_ops = { .fill_delalloc = run_delalloc_range, .submit_bio_hook = btrfs_submit_bio_hook, + .merge_bio_hook = btrfs_merge_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, .set_bit_hook = btrfs_set_bit_hook, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ae22d01ecf5..16fb6bbe6e2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -578,6 +578,11 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio) map_tree = &root->fs_info->mapping_tree; map_length = length; ret = btrfs_map_block(map_tree, logical, &physical, &map_length, &dev); + if (map_length < length) { + printk("mapping failed logical %Lu bio len %Lu physical %Lu " + "len %Lu\n", logical, length, physical, map_length); + BUG(); + } BUG_ON(map_length < length); bio->bi_sector = physical >> 9; bio->bi_bdev = dev->bdev; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 77fa6efd79c..20259128152 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -64,4 +64,7 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio); int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); +int btrfs_map_block(struct btrfs_mapping_tree *map_tree, + u64 logical, u64 *phys, u64 *length, + struct btrfs_device **dev); #endif -- cgit v1.2.3-70-g09d2 From 2d2ae547979854d10b75d557b3abdb3eb7511bbc Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 26 Mar 2008 16:24:23 -0400 Subject: Btrfs: Add leak debugging for extent_buffer and extent_state This also fixes one leak around the super block when failing to mount the FS. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 ++ fs/btrfs/extent_io.c | 28 ++++++++++++++++++++++++++-- fs/btrfs/extent_io.h | 3 ++- 3 files changed, 30 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 134b722587e..1c5e097a4df 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -915,12 +915,14 @@ fail_tree_root: free_extent_buffer(tree_root->node); fail_sb_buffer: free_extent_buffer(fs_info->sb_buffer); + extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); fail_iput: iput(fs_info->btree_inode); fail: close_all_devices(fs_info); kfree(extent_root); kfree(tree_root); + bdi_destroy(&fs_info->bdi); kfree(fs_info); return ERR_PTR(err); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6dab664529c..b99cfabd008 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -26,6 +26,7 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); +static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; #define BUFFER_LRU_MAX 64 @@ -64,15 +65,22 @@ free_state_cache: void extent_io_exit(void) { struct extent_state *state; + struct extent_buffer *eb; while (!list_empty(&states)) { - state = list_entry(states.next, struct extent_state, list); + state = list_entry(states.next, struct extent_state, leak_list); printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); - list_del(&state->list); + list_del(&state->leak_list); kmem_cache_free(extent_state_cache, state); } + while (!list_empty(&buffers)) { + eb = list_entry(buffers.next, struct extent_buffer, leak_list); + printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs)); + list_del(&eb->leak_list); + kmem_cache_free(extent_buffer_cache, eb); + } if (extent_state_cache) kmem_cache_destroy(extent_state_cache); if (extent_buffer_cache) @@ -109,6 +117,7 @@ EXPORT_SYMBOL(extent_io_tree_empty_lru); struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; + unsigned long flags; state = kmem_cache_alloc(extent_state_cache, mask); if (!state || IS_ERR(state)) @@ -116,6 +125,9 @@ struct extent_state *alloc_extent_state(gfp_t mask) state->state = 0; state->private = 0; state->tree = NULL; + spin_lock_irqsave(&leak_lock, flags); + list_add(&state->leak_list, &states); + spin_unlock_irqrestore(&leak_lock, flags); atomic_set(&state->refs, 1); init_waitqueue_head(&state->wq); @@ -128,7 +140,11 @@ void free_extent_state(struct extent_state *state) if (!state) return; if (atomic_dec_and_test(&state->refs)) { + unsigned long flags; WARN_ON(state->tree); + spin_lock_irqsave(&leak_lock, flags); + list_del(&state->leak_list); + spin_unlock_irqrestore(&leak_lock, flags); kmem_cache_free(extent_state_cache, state); } } @@ -2582,6 +2598,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, gfp_t mask) { struct extent_buffer *eb = NULL; + unsigned long flags; spin_lock(&tree->lru_lock); eb = find_lru(tree, start, len); @@ -2594,6 +2611,9 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, INIT_LIST_HEAD(&eb->lru); eb->start = start; eb->len = len; + spin_lock_irqsave(&leak_lock, flags); + list_add(&eb->leak_list, &buffers); + spin_unlock_irqrestore(&leak_lock, flags); atomic_set(&eb->refs, 1); return eb; @@ -2601,6 +2621,10 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, static void __free_extent_buffer(struct extent_buffer *eb) { + unsigned long flags; + spin_lock_irqsave(&leak_lock, flags); + list_del(&eb->leak_list); + spin_unlock_irqrestore(&leak_lock, flags); kmem_cache_free(extent_buffer_cache, eb); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 8b5319db251..16d67a61a25 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -66,7 +66,7 @@ struct extent_state { /* for use by the FS */ u64 private; - struct list_head list; + struct list_head leak_list; }; struct extent_buffer { @@ -80,6 +80,7 @@ struct extent_buffer { struct list_head lru; atomic_t refs; int flags; + struct list_head leak_list; }; struct extent_map_tree; -- cgit v1.2.3-70-g09d2 From 2b114d1d33551ab1dcaa1d0b3a956df3776fb767 Mon Sep 17 00:00:00 2001 From: Peter Date: Tue, 1 Apr 2008 11:21:40 -0400 Subject: Btrfs: Correct usage of IS_ERR() in extent_io.c Signed-off-by: Peter Teoh Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b99cfabd008..776c33f1e6f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -120,7 +120,7 @@ struct extent_state *alloc_extent_state(gfp_t mask) unsigned long flags; state = kmem_cache_alloc(extent_state_cache, mask); - if (!state || IS_ERR(state)) + if (!state) return state; state->state = 0; state->private = 0; @@ -957,7 +957,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, * our range starts. */ node = tree_search(tree, start); - if (!node || IS_ERR(node)) { + if (!node) { goto out; } @@ -990,7 +990,7 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, start); - if (!node || IS_ERR(node)) { + if (!node) { goto out; } @@ -1024,7 +1024,7 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, */ search_again: node = tree_search(tree, cur_start); - if (!node || IS_ERR(node)) { + if (!node) { *end = (u64)-1; goto out; } @@ -1112,7 +1112,7 @@ u64 count_range_bits(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, cur_start); - if (!node || IS_ERR(node)) { + if (!node) { goto out; } @@ -1213,7 +1213,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) * our range starts. */ node = tree_search(tree, start); - if (!node || IS_ERR(node)) { + if (!node) { ret = -ENOENT; goto out; } @@ -1240,7 +1240,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) * our range starts. */ node = tree_search(tree, start); - if (!node || IS_ERR(node)) { + if (!node) { ret = -ENOENT; goto out; } @@ -2642,7 +2642,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, int uptodate = 1; eb = __alloc_extent_buffer(tree, start, len, mask); - if (!eb || IS_ERR(eb)) + if (!eb) return NULL; if (eb->flags & EXTENT_BUFFER_FILLED) @@ -2717,7 +2717,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, int uptodate = 1; eb = __alloc_extent_buffer(tree, start, len, mask); - if (!eb || IS_ERR(eb)) + if (!eb) return NULL; if (eb->flags & EXTENT_BUFFER_FILLED) -- cgit v1.2.3-70-g09d2 From 728131d8e40c2a47c59ca91a806299c4708029f9 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 9 Apr 2008 16:28:12 -0400 Subject: Btrfs: Add additional debugging for metadata checksum failures Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 ++- fs/btrfs/extent_io.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++--- fs/btrfs/extent_io.h | 2 ++ 3 files changed, 55 insertions(+), 4 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index bc5bd5abb28..e444b99e02d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -283,6 +283,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_mapping(map, tree, page, gfp_flags); if (ret == 1) { + invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -376,7 +377,6 @@ int btrfs_verify_block_csum(struct btrfs_root *root, buf->flags |= EXTENT_CSUM; return 0; } - lock_extent(io_tree, buf->start, end, GFP_NOFS); if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) { @@ -384,6 +384,7 @@ int btrfs_verify_block_csum(struct btrfs_root *root, ret = 0; goto out_unlock; } +WARN_ON(buf->flags & EXTENT_CSUM); ret = csum_tree_block(root, buf, 1); set_extent_bits(io_tree, buf->start, end, EXTENT_CSUM, GFP_NOFS); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 776c33f1e6f..13cc2360e37 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2592,6 +2592,33 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, return p; } +int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, + unsigned long len) +{ + struct list_head *lru = &tree->buffer_lru; + struct list_head *cur = lru->next; + struct extent_buffer *eb; + int found = 0; + + spin_lock(&tree->lru_lock); + if (list_empty(lru)) + goto out; + + do { + eb = list_entry(cur, struct extent_buffer, lru); + if (eb->start <= start && eb->start + eb->len > start) { + eb->flags &= ~EXTENT_UPTODATE; + } + if (eb->start == start) { + eb->flags &= ~EXTENT_CSUM; + } + cur = cur->next; + } while (cur != lru); +out: + spin_unlock(&tree->lru_lock); + return found; +} + static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, @@ -2909,10 +2936,32 @@ EXPORT_SYMBOL(set_extent_buffer_uptodate); int extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb) { + int ret = 0; + int ret2; + int num_pages; + int i; + struct page *page; + int pg_uptodate = 1; + if (eb->flags & EXTENT_UPTODATE) - return 1; - return test_range_bit(tree, eb->start, eb->start + eb->len - 1, + ret = 1; + + ret2 = test_range_bit(tree, eb->start, eb->start + eb->len - 1, EXTENT_UPTODATE, 1); + + num_pages = num_extent_pages(eb->start, eb->len); + for (i = 0; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + if (!PageUptodate(page)) { + pg_uptodate = 0; + break; + } + } + if ((ret || ret2) && !pg_uptodate) { +printk("uptodate error2 eb %Lu ret %d ret2 %d pg_uptodate %d\n", eb->start, ret, ret2, pg_uptodate); + WARN_ON(1); + } + return (ret || ret2); } EXPORT_SYMBOL(extent_buffer_uptodate); @@ -2929,7 +2978,6 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, unsigned long num_pages; struct bio *bio = NULL; - if (eb->flags & EXTENT_UPTODATE) return 0; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 16d67a61a25..d9f5bc4dbd7 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -216,4 +216,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_start, unsigned long *map_len, int km); void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); +int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, + unsigned long len); #endif -- cgit v1.2.3-70-g09d2 From ce9adaa5a792c2099a83246265eb4055bc38b6b8 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 9 Apr 2008 16:28:12 -0400 Subject: Btrfs: Do metadata checksums for reads via a workqueue Before, metadata checksumming was done by the callers of read_tree_block, which would set EXTENT_CSUM bits in the extent tree to show that a given range of pages was already checksummed and didn't need to be verified again. But, those bits could go away via try_to_releasepage, and the end result was bogus checksum failures on pages that never left the cache. The new code validates checksums when the page is read. It is a little tricky because metadata blocks can span pages and a single read may end up going via multiple bios. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 5 - fs/btrfs/ctree.h | 4 + fs/btrfs/disk-io.c | 258 ++++++++++++++++++++++++++++++++++++++++++------- fs/btrfs/extent-tree.c | 4 - fs/btrfs/extent_io.c | 100 ++++++++++++++----- fs/btrfs/extent_io.h | 4 +- fs/btrfs/transaction.c | 2 +- 7 files changed, 310 insertions(+), 67 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index c92c6b0ee58..efce173a935 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -158,9 +158,6 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, } else { root_gen = 0; } - if (!(buf->flags & EXTENT_CSUM)) - WARN_ON(1); - WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); @@ -247,8 +244,6 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, root->fs_info->generation); WARN_ON(1); } - if (!(buf->flags & EXTENT_CSUM)) - WARN_ON(1); header_trans = btrfs_header_generation(buf); spin_lock(&root->fs_info->hash_lock); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4b3b20459f6..e803c4daad2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -485,6 +485,10 @@ struct btrfs_fs_info { struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; + struct list_head end_io_work_list; + struct work_struct end_io_work; + spinlock_t end_io_work_lock; + #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) struct work_struct trans_work; #else diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e444b99e02d..82109204788 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -24,6 +24,7 @@ #include #include #include // for block_sync_page +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -45,6 +46,16 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) #endif static struct extent_io_ops btree_extent_io_ops; +static struct workqueue_struct *end_io_workqueue; + +struct end_io_wq { + struct bio *bio; + bio_end_io_t *end_io; + void *private; + struct btrfs_fs_info *info; + int error; + struct list_head list; +}; struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t page_offset, u64 start, u64 len, @@ -219,11 +230,108 @@ static int btree_writepage_io_hook(struct page *page, u64 start, u64 end) return 0; } +int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, + struct extent_state *state) +{ + struct extent_io_tree *tree; + u64 found_start; + int found_level; + unsigned long len; + struct extent_buffer *eb; + struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; + int ret; + + tree = &BTRFS_I(page->mapping->host)->io_tree; + if (page->private == EXTENT_PAGE_PRIVATE) + goto out; + if (!page->private) + goto out; + len = page->private >> 2; + if (len == 0) { + WARN_ON(1); + } + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1, + btree_get_extent); + btrfs_clear_buffer_defrag(eb); + found_start = btrfs_header_bytenr(eb); + if (found_start != start) { + printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n", + start, found_start, len); + WARN_ON(1); + goto err; + } + if (eb->first_page != page) { + printk("bad first page %lu %lu\n", eb->first_page->index, + page->index); + WARN_ON(1); + goto err; + } + found_level = btrfs_header_level(eb); + + ret = csum_tree_block(root, eb, 1); + + end = min_t(u64, eb->len, PAGE_CACHE_SIZE); + end = eb->start + end - 1; + release_extent_buffer_tail_pages(eb); +err: + free_extent_buffer(eb); +out: + return 0; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +static void end_workqueue_bio(struct bio *bio, int err) +#else +static int end_workqueue_bio(struct bio *bio, + unsigned int bytes_done, int err) +#endif +{ + struct end_io_wq *end_io_wq = bio->bi_private; + struct btrfs_fs_info *fs_info; + unsigned long flags; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + if (bio->bi_size) + return 1; +#endif + + fs_info = end_io_wq->info; + spin_lock_irqsave(&fs_info->end_io_work_lock, flags); + end_io_wq->error = err; + list_add_tail(&end_io_wq->list, &fs_info->end_io_work_list); + spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags); + queue_work(end_io_workqueue, &fs_info->end_io_work); + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) + return 0; +#endif +} + static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) { struct btrfs_root *root = BTRFS_I(inode)->root; + struct end_io_wq *end_io_wq; u64 offset; offset = bio->bi_sector << 9; + + if (rw & (1 << BIO_RW)) { + return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio); + } + + end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS); + if (!end_io_wq) + return -ENOMEM; + + end_io_wq->private = bio->bi_private; + end_io_wq->end_io = bio->bi_end_io; + end_io_wq->info = root->fs_info; + end_io_wq->error = 0; + end_io_wq->bio = bio; + + bio->bi_private = end_io_wq; + bio->bi_end_io = end_workqueue_bio; + if (offset == BTRFS_SUPER_INFO_OFFSET) { bio->bi_bdev = root->fs_info->sb->s_bdev; submit_bio(rw, bio); @@ -363,36 +471,7 @@ static int close_all_devices(struct btrfs_fs_info *fs_info) int btrfs_verify_block_csum(struct btrfs_root *root, struct extent_buffer *buf) { - struct extent_io_tree *io_tree; - u64 end; - int ret; - - io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; - if (buf->flags & EXTENT_CSUM) - return 0; - - end = min_t(u64, buf->len, PAGE_CACHE_SIZE); - end = buf->start + end - 1; - if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) { - buf->flags |= EXTENT_CSUM; - return 0; - } - lock_extent(io_tree, buf->start, end, GFP_NOFS); - - if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) { - buf->flags |= EXTENT_CSUM; - ret = 0; - goto out_unlock; - } -WARN_ON(buf->flags & EXTENT_CSUM); - - ret = csum_tree_block(root, buf, 1); - set_extent_bits(io_tree, buf->start, end, EXTENT_CSUM, GFP_NOFS); - buf->flags |= EXTENT_CSUM; - -out_unlock: - unlock_extent(io_tree, buf->start, end, GFP_NOFS); - return ret; + return btrfs_buffer_uptodate(buf); } struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, @@ -430,11 +509,15 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, buf = btrfs_find_create_tree_block(root, bytenr, blocksize); if (!buf) return NULL; - read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1, - btree_get_extent); - ret = btrfs_verify_block_csum(root, buf); + ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, + 1, btree_get_extent); + + if (ret == 0) { + buf->flags |= EXTENT_UPTODATE; + } return buf; + } int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -724,6 +807,99 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) return 0; } +static int bio_ready_for_csum(struct bio *bio) +{ + u64 length = 0; + u64 buf_len = 0; + u64 start = 0; + struct page *page; + struct extent_io_tree *io_tree = NULL; + struct btrfs_fs_info *info = NULL; + struct bio_vec *bvec; + int i; + int ret; + + bio_for_each_segment(bvec, bio, i) { + page = bvec->bv_page; + if (page->private == EXTENT_PAGE_PRIVATE) { + length += bvec->bv_len; + continue; + } + if (!page->private) { + length += bvec->bv_len; + continue; + } + length = bvec->bv_len; + buf_len = page->private >> 2; + start = page_offset(page) + bvec->bv_offset; + io_tree = &BTRFS_I(page->mapping->host)->io_tree; + info = BTRFS_I(page->mapping->host)->root->fs_info; + } + /* are we fully contained in this bio? */ + if (buf_len <= length) + return 1; + + ret = extent_range_uptodate(io_tree, start + length, + start + buf_len - 1); + if (ret == 1) + return ret; + return ret; +} + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) +void btrfs_end_io_csum(void *p) +#else +void btrfs_end_io_csum(struct work_struct *work) +#endif +{ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) + struct btrfs_fs_info *fs_info = p; +#else + struct btrfs_fs_info *fs_info = container_of(work, + struct btrfs_fs_info, + end_io_work); +#endif + unsigned long flags; + struct end_io_wq *end_io_wq; + struct bio *bio; + struct list_head *next; + int error; + int was_empty; + + while(1) { + spin_lock_irqsave(&fs_info->end_io_work_lock, flags); + if (list_empty(&fs_info->end_io_work_list)) { + spin_unlock_irqrestore(&fs_info->end_io_work_lock, + flags); + return; + } + next = fs_info->end_io_work_list.next; + list_del(next); + spin_unlock_irqrestore(&fs_info->end_io_work_lock, flags); + + end_io_wq = list_entry(next, struct end_io_wq, list); + + bio = end_io_wq->bio; + if (!bio_ready_for_csum(bio)) { + spin_lock_irqsave(&fs_info->end_io_work_lock, flags); + was_empty = list_empty(&fs_info->end_io_work_list); + list_add_tail(&end_io_wq->list, + &fs_info->end_io_work_list); + spin_unlock_irqrestore(&fs_info->end_io_work_lock, + flags); + if (was_empty) + return; + continue; + } + error = end_io_wq->error; + bio->bi_private = end_io_wq->private; + bio->bi_end_io = end_io_wq->end_io; + kfree(end_io_wq); + bio_endio(bio, error); + } +} + + struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices) { @@ -750,11 +926,16 @@ struct btrfs_root *open_ctree(struct super_block *sb, err = -ENOMEM; goto fail; } + end_io_workqueue = create_workqueue("btrfs-end-io"); + BUG_ON(!end_io_workqueue); + INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->hashers); + INIT_LIST_HEAD(&fs_info->end_io_work_list); spin_lock_init(&fs_info->hash_lock); + spin_lock_init(&fs_info->end_io_work_lock); spin_lock_init(&fs_info->delalloc_lock); spin_lock_init(&fs_info->new_trans_lock); @@ -799,6 +980,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode->i_mapping, GFP_NOFS); fs_info->do_barriers = 1; + INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info); #else @@ -1044,6 +1226,8 @@ int close_ctree(struct btrfs_root *root) extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); + flush_workqueue(end_io_workqueue); + destroy_workqueue(end_io_workqueue); iput(fs_info->btree_inode); #if 0 @@ -1171,12 +1355,18 @@ int btrfs_read_buffer(struct extent_buffer *buf) { struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; struct inode *btree_inode = root->fs_info->btree_inode; - return read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, + int ret; + ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1, btree_get_extent); + if (ret == 0) { + buf->flags |= EXTENT_UPTODATE; + } + return ret; } static struct extent_io_ops btree_extent_io_ops = { .writepage_io_hook = btree_writepage_io_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, .submit_bio_hook = btree_submit_bio_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0cb742884cd..283b08a32a4 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1898,10 +1898,6 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans, set_extent_dirty(&trans->transaction->dirty_pages, buf->start, buf->start + buf->len - 1, GFP_NOFS); - set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->io_tree, - buf->start, buf->start + buf->len - 1, - EXTENT_CSUM, GFP_NOFS); - buf->flags |= EXTENT_CSUM; if (!btrfs_test_opt(root, SSD)) btrfs_set_buffer_defrag(buf); trans->blocks_used++; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 13cc2360e37..cfc383c17a3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2592,6 +2592,22 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, return p; } +int release_extent_buffer_tail_pages(struct extent_buffer *eb) +{ + unsigned long num_pages = num_extent_pages(eb->start, eb->len); + struct page *page; + unsigned long i; + + if (num_pages == 1) + return 0; + for (i = 1; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + page_cache_release(page); + } + return 0; +} + + int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, unsigned long len) { @@ -2609,9 +2625,6 @@ int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, if (eb->start <= start && eb->start + eb->len > start) { eb->flags &= ~EXTENT_UPTODATE; } - if (eb->start == start) { - eb->flags &= ~EXTENT_CSUM; - } cur = cur->next; } while (cur != lru); out: @@ -2682,7 +2695,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, page_cache_get(page0); mark_page_accessed(page0); set_page_extent_mapped(page0); - WARN_ON(!PageUptodate(page0)); set_page_extent_head(page0, len); } else { i = 0; @@ -2933,13 +2945,39 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, } EXPORT_SYMBOL(set_extent_buffer_uptodate); +int extent_range_uptodate(struct extent_io_tree *tree, + u64 start, u64 end) +{ + struct page *page; + int ret; + int pg_uptodate = 1; + int uptodate; + unsigned long index; + + ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1); + if (ret) + return 1; + while(start <= end) { + index = start >> PAGE_CACHE_SHIFT; + page = find_get_page(tree->mapping, index); + uptodate = PageUptodate(page); + page_cache_release(page); + if (!uptodate) { + pg_uptodate = 0; + break; + } + start += PAGE_CACHE_SIZE; + } + return pg_uptodate; +} + int extent_buffer_uptodate(struct extent_io_tree *tree, - struct extent_buffer *eb) + struct extent_buffer *eb) { int ret = 0; int ret2; - int num_pages; - int i; + unsigned long num_pages; + unsigned long i; struct page *page; int pg_uptodate = 1; @@ -2975,13 +3013,16 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, struct page *page; int err; int ret = 0; + int locked_pages = 0; + int all_uptodate = 1; + int inc_all_pages = 0; unsigned long num_pages; struct bio *bio = NULL; if (eb->flags & EXTENT_UPTODATE) return 0; - if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1, + if (test_range_bit(tree, eb->start, eb->start + eb->len - 1, EXTENT_UPTODATE, 1)) { return 0; } @@ -2997,17 +3038,30 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, num_pages = num_extent_pages(eb->start, eb->len); for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); - if (PageUptodate(page)) { - continue; - } if (!wait) { - if (TestSetPageLocked(page)) { - continue; - } + if (TestSetPageLocked(page)) + goto unlock_exit; } else { lock_page(page); } + locked_pages++; + if (!PageUptodate(page)) { + all_uptodate = 0; + } + } + if (all_uptodate) { + if (start_i == 0) + eb->flags |= EXTENT_UPTODATE; + goto unlock_exit; + } + + for (i = start_i; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + if (inc_all_pages) + page_cache_get(page); if (!PageUptodate(page)) { + if (start_i == 0) + inc_all_pages = 1; err = __extent_read_full_page(tree, page, get_extent, &bio); if (err) { @@ -3034,6 +3088,16 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (!ret) eb->flags |= EXTENT_UPTODATE; return ret; + +unlock_exit: + i = start_i; + while(locked_pages > 0) { + page = extent_buffer_page(eb, i); + i++; + unlock_page(page); + locked_pages--; + } + return ret; } EXPORT_SYMBOL(read_extent_buffer_pages); @@ -3048,7 +3112,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, char *dst = (char *)dstv; size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; - unsigned long num_pages = num_extent_pages(eb->start, eb->len); WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); @@ -3057,11 +3120,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, while(len > 0) { page = extent_buffer_page(eb, i); - if (!PageUptodate(page)) { - printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len); - WARN_ON(1); - } - WARN_ON(!PageUptodate(page)); cur = min(len, (PAGE_CACHE_SIZE - offset)); kaddr = kmap_atomic(page, KM_USER1); @@ -3105,7 +3163,6 @@ printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, } p = extent_buffer_page(eb, i); - WARN_ON(!PageUptodate(p)); kaddr = kmap_atomic(p, km); *token = kaddr; *map = kaddr + offset; @@ -3165,7 +3222,6 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, while(len > 0) { page = extent_buffer_page(eb, i); - WARN_ON(!PageUptodate(page)); cur = min(len, (PAGE_CACHE_SIZE - offset)); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index d9f5bc4dbd7..9d2991d1d3c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -13,7 +13,6 @@ #define EXTENT_DEFRAG (1 << 6) #define EXTENT_DEFRAG_DONE (1 << 7) #define EXTENT_BUFFER_FILLED (1 << 8) -#define EXTENT_CSUM (1 << 9) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) /* @@ -218,4 +217,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, unsigned long len); +int release_extent_buffer_tail_pages(struct extent_buffer *eb); +int extent_range_uptodate(struct extent_io_tree *tree, + u64 start, u64 end); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5e9f69244f9..1ed179c020a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -843,7 +843,7 @@ void btrfs_transaction_flush_work(struct btrfs_root *root) void __init btrfs_init_transaction_sys(void) { - trans_wq = create_workqueue("btrfs"); + trans_wq = create_workqueue("btrfs-transaction"); } void btrfs_exit_transaction_sys(void) -- cgit v1.2.3-70-g09d2 From f188591e987e21b6f7f8864c66a02858b95b530e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 9 Apr 2008 16:28:12 -0400 Subject: Btrfs: Retry metadata reads in the face of checksum failures Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 11 +++++++- fs/btrfs/disk-io.c | 72 ++++++++++++++++++++++++++++++++++++++-------------- fs/btrfs/extent_io.c | 50 +++++++++++++++++++++--------------- fs/btrfs/extent_io.h | 5 ++-- fs/btrfs/inode.c | 7 ++--- fs/btrfs/volumes.c | 39 +++++++++++++++++++++++++--- fs/btrfs/volumes.h | 6 +++-- 7 files changed, 138 insertions(+), 52 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index efce173a935..ff4e9c6859d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -569,7 +569,16 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, static int noinline check_block(struct btrfs_root *root, struct btrfs_path *path, int level) { - return 0; + u64 found_start; + if (btrfs_header_level(path->nodes[level]) != level) + printk("warning: bad level %Lu wanted %d found %d\n", + path->nodes[level]->start, level, + btrfs_header_level(path->nodes[level])); + found_start = btrfs_header_bytenr(path->nodes[level]); + if (found_start != path->nodes[level]->start) { + printk("warning: bad bytentr %Lu found %Lu\n", + path->nodes[level]->start, found_start); + } #if 0 struct extent_buffer *buf = path->nodes[level]; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 94ff87d0eae..59bdf0474be 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -156,7 +156,6 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, BTRFS_CRC32_SIZE); read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE); - WARN_ON(1); printk("btrfs: %s checksum verify failed on %llu " "wanted %X found %X from_this_trans %d " "level %d\n", @@ -171,6 +170,40 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, return 0; } +static int btree_read_extent_buffer_pages(struct btrfs_root *root, + struct extent_buffer *eb, + u64 start) +{ + struct extent_io_tree *io_tree; + int ret; + int num_copies = 0; + int mirror_num = 0; + + io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; + while (1) { + ret = read_extent_buffer_pages(io_tree, eb, start, 1, + btree_get_extent, mirror_num); + if (!ret) { + if (mirror_num) +printk("good read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies); + return ret; + } + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, + eb->start, eb->len); +printk("failed to read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies); + if (num_copies == 1) { +printk("reading %Lu failed only one copy\n", eb->start); + return ret; + } + mirror_num++; + if (mirror_num > num_copies) { +printk("bailing at mirror %d of %d\n", mirror_num, num_copies); + return ret; + } + } +printk("read extent buffer page last\n"); + return -EIO; +} int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { @@ -180,6 +213,8 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) int found_level; unsigned long len; struct extent_buffer *eb; + int ret; + tree = &BTRFS_I(page->mapping->host)->io_tree; if (page->private == EXTENT_PAGE_PRIVATE) @@ -191,8 +226,8 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) WARN_ON(1); } eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); - read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1, - btree_get_extent); + ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE); + BUG_ON(ret); btrfs_clear_buffer_defrag(eb); found_start = btrfs_header_bytenr(eb); if (found_start != start) { @@ -240,7 +275,7 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, unsigned long len; struct extent_buffer *eb; struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; - int ret; + int ret = 0; tree = &BTRFS_I(page->mapping->host)->io_tree; if (page->private == EXTENT_PAGE_PRIVATE) @@ -252,25 +287,26 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, WARN_ON(1); } eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); - read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1, - btree_get_extent); + btrfs_clear_buffer_defrag(eb); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n", - start, found_start, len); - WARN_ON(1); +printk("bad start on %Lu found %Lu\n", eb->start, found_start); + ret = -EIO; goto err; } if (eb->first_page != page) { printk("bad first page %lu %lu\n", eb->first_page->index, page->index); WARN_ON(1); + ret = -EIO; goto err; } found_level = btrfs_header_level(eb); ret = csum_tree_block(root, eb, 1); + if (ret) + ret = -EIO; end = min_t(u64, eb->len, PAGE_CACHE_SIZE); end = eb->start + end - 1; @@ -278,7 +314,7 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, err: free_extent_buffer(eb); out: - return 0; + return ret; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) @@ -329,7 +365,8 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, return 0; } -static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) +static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, + int mirror_num) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 offset; @@ -338,7 +375,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) offset = bio->bi_sector << 9; if (rw & (1 << BIO_RW)) { - return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio); + return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num); } ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1); @@ -349,7 +386,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) submit_bio(rw, bio); return 0; } - return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio); + return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num); } static int btree_writepage(struct page *page, struct writeback_control *wbc) @@ -459,7 +496,7 @@ int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) if (!buf) return 0; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, - buf, 0, 0, btree_get_extent); + buf, 0, 0, btree_get_extent, 0); free_extent_buffer(buf); return ret; } @@ -522,8 +559,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, if (!buf) return NULL; - ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, - 1, btree_get_extent); + ret = btree_read_extent_buffer_pages(root, buf, 0); if (ret == 0) { buf->flags |= EXTENT_UPTODATE; @@ -1366,10 +1402,8 @@ int btrfs_clear_buffer_defrag(struct extent_buffer *buf) int btrfs_read_buffer(struct extent_buffer *buf) { struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; - struct inode *btree_inode = root->fs_info->btree_inode; int ret; - ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, - buf, 0, 1, btree_get_extent); + ret = btree_read_extent_buffer_pages(root, buf, 0); if (ret == 0) { buf->flags |= EXTENT_UPTODATE; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cfc383c17a3..2f159375c87 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1546,7 +1546,7 @@ static int end_bio_extent_readpage(struct bio *bio, !(state->state & EXTENT_LOCKED)) state = NULL; } - if (!state) { + if (!state && uptodate) { spin_unlock_irqrestore(&tree->lock, flags); set_extent_uptodate(tree, start, end, GFP_ATOMIC); @@ -1567,8 +1567,10 @@ static int end_bio_extent_readpage(struct bio *bio, } else { state = NULL; } - set_state_cb(tree, clear, EXTENT_UPTODATE); - clear->state |= EXTENT_UPTODATE; + if (uptodate) { + set_state_cb(tree, clear, EXTENT_UPTODATE); + clear->state |= EXTENT_UPTODATE; + } clear_state_bit(tree, clear, EXTENT_LOCKED, 1, 0); if (cur == start) @@ -1685,7 +1687,7 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, return bio; } -static int submit_one_bio(int rw, struct bio *bio) +static int submit_one_bio(int rw, struct bio *bio, int mirror_num) { u64 maxsector; int ret = 0; @@ -1722,7 +1724,8 @@ static int submit_one_bio(int rw, struct bio *bio) WARN_ON(1); } if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(page->mapping->host, rw, bio); + tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + mirror_num); else submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) @@ -1737,7 +1740,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, struct block_device *bdev, struct bio **bio_ret, unsigned long max_pages, - bio_end_io_t end_io_func) + bio_end_io_t end_io_func, + int mirror_num) { int ret = 0; struct bio *bio; @@ -1749,7 +1753,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, (tree->ops && tree->ops->merge_bio_hook && tree->ops->merge_bio_hook(page, offset, size, bio)) || bio_add_page(bio, page, size, offset) < size) { - ret = submit_one_bio(rw, bio); + ret = submit_one_bio(rw, bio, mirror_num); bio = NULL; } else { return 0; @@ -1769,7 +1773,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, if (bio_ret) { *bio_ret = bio; } else { - ret = submit_one_bio(rw, bio); + ret = submit_one_bio(rw, bio, mirror_num); } return ret; @@ -1798,7 +1802,7 @@ void set_page_extent_head(struct page *page, unsigned long len) static int __extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, - struct bio **bio) + struct bio **bio, int mirror_num) { struct inode *inode = page->mapping->host; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -1901,7 +1905,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, bdev, bio, nr, - end_bio_extent_readpage); + end_bio_extent_readpage, mirror_num); } if (ret) SetPageError(page); @@ -1923,9 +1927,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, struct bio *bio = NULL; int ret; - ret = __extent_read_full_page(tree, page, get_extent, &bio); + ret = __extent_read_full_page(tree, page, get_extent, &bio, 0); if (bio) - submit_one_bio(READ, bio); + submit_one_bio(READ, bio, 0); return ret; } EXPORT_SYMBOL(extent_read_full_page); @@ -2077,7 +2081,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ret = submit_extent_page(WRITE, tree, page, sector, iosize, page_offset, bdev, &epd->bio, max_nr, - end_bio_extent_writepage); + end_bio_extent_writepage, 0); if (ret) SetPageError(page); } @@ -2244,7 +2248,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio); + submit_one_bio(WRITE, epd.bio, 0); } return ret; } @@ -2265,7 +2269,7 @@ int extent_writepages(struct extent_io_tree *tree, ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio); + submit_one_bio(WRITE, epd.bio, 0); } return ret; } @@ -2297,7 +2301,8 @@ int extent_readpages(struct extent_io_tree *tree, page_cache_get(page); if (!pagevec_add(&pvec, page)) __pagevec_lru_add(&pvec); - __extent_read_full_page(tree, page, get_extent, &bio); + __extent_read_full_page(tree, page, get_extent, + &bio, 0); } page_cache_release(page); } @@ -2305,7 +2310,7 @@ int extent_readpages(struct extent_io_tree *tree, __pagevec_lru_add(&pvec); BUG_ON(!list_empty(pages)); if (bio) - submit_one_bio(READ, bio); + submit_one_bio(READ, bio, 0); return 0; } EXPORT_SYMBOL(extent_readpages); @@ -2430,7 +2435,7 @@ int extent_prepare_write(struct extent_io_tree *tree, ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, em->bdev, NULL, 1, - end_bio_extent_preparewrite); + end_bio_extent_preparewrite, 0); iocount++; block_start = block_start + iosize; } else { @@ -2696,6 +2701,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, mark_page_accessed(page0); set_page_extent_mapped(page0); set_page_extent_head(page0, len); + uptodate = PageUptodate(page0); } else { i = 0; } @@ -3006,7 +3012,7 @@ EXPORT_SYMBOL(extent_buffer_uptodate); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, - get_extent_t *get_extent) + get_extent_t *get_extent, int mirror_num) { unsigned long i; unsigned long start_i; @@ -3062,8 +3068,10 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (!PageUptodate(page)) { if (start_i == 0) inc_all_pages = 1; + ClearPageError(page); err = __extent_read_full_page(tree, page, - get_extent, &bio); + get_extent, &bio, + mirror_num); if (err) { ret = err; } @@ -3073,7 +3081,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, } if (bio) - submit_one_bio(READ, bio); + submit_one_bio(READ, bio, mirror_num); if (ret || !wait) { return ret; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 9d2991d1d3c..8d6b8a14cc3 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -27,7 +27,8 @@ struct extent_state; struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); - int (*submit_bio_hook)(struct inode *inode, int rw, struct bio *bio); + int (*submit_bio_hook)(struct inode *inode, int rw, struct bio *bio, + int mirror_num); int (*merge_bio_hook)(struct page *page, unsigned long offset, size_t size, struct bio *bio); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); @@ -172,7 +173,7 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, void free_extent_buffer(struct extent_buffer *eb); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, u64 start, int wait, - get_extent_t *get_extent); + get_extent_t *get_extent, int mirror_num); static inline void extent_buffer_get(struct extent_buffer *eb) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e1ef1acdb35..8c2d5d036bd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -314,7 +314,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, map_tree = &root->fs_info->mapping_tree; map_length = length; ret = btrfs_map_block(map_tree, READ, logical, - &map_length, NULL); + &map_length, NULL, 0); if (map_length < length + size) { return 1; @@ -322,7 +322,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, return 0; } -int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) +int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, + int mirror_num) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -347,7 +348,7 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio) BUG_ON(ret); mutex_unlock(&root->fs_info->fs_mutex); mapit: - return btrfs_map_bio(root, rw, bio); + return btrfs_map_bio(root, rw, bio, mirror_num); } int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 008d3640e8c..3b927f69832 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -788,9 +788,31 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) } } +int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len) +{ + struct extent_map *em; + struct map_lookup *map; + struct extent_map_tree *em_tree = &map_tree->map_tree; + int ret; + + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, logical, len); + BUG_ON(!em); + + BUG_ON(em->start > logical || em->start + em->len < logical); + map = (struct map_lookup *)em->bdev; + if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1)) + ret = map->num_stripes; + else + ret = 1; + free_extent_map(em); + spin_unlock(&em_tree->lock); + return ret; +} + int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, - struct btrfs_multi_bio **multi_ret) + struct btrfs_multi_bio **multi_ret, int mirror_num) { struct extent_map *em; struct map_lookup *map; @@ -822,6 +844,9 @@ again: map = (struct map_lookup *)em->bdev; offset = logical - em->start; + if (mirror_num > map->num_stripes) + mirror_num = 0; + /* if our multi bio struct is too small, back off and try again */ if (multi_ret && (rw & (1 << BIO_RW)) && stripes_allocated < map->num_stripes && @@ -862,7 +887,9 @@ again: if (map->type & BTRFS_BLOCK_GROUP_RAID1) { if (rw & (1 << BIO_RW)) multi->num_stripes = map->num_stripes; - else { + else if (mirror_num) { + stripe_index = mirror_num - 1; + } else { int i; u64 least = (u64)-1; struct btrfs_device *cur; @@ -880,6 +907,8 @@ again: } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { if (rw & (1 << BIO_RW)) multi->num_stripes = map->num_stripes; + else if (mirror_num) + stripe_index = mirror_num - 1; } else { /* * after this do_div call, stripe_nr is the number of stripes @@ -938,7 +967,8 @@ static int end_bio_multi_stripe(struct bio *bio, #endif } -int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio) +int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, + int mirror_num) { struct btrfs_mapping_tree *map_tree; struct btrfs_device *dev; @@ -960,7 +990,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio) map_tree = &root->fs_info->mapping_tree; map_length = length; - ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi); + ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi, + mirror_num); BUG_ON(ret); total_devs = multi->num_stripes; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 10ca0104750..3d5d0a9cb82 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -93,7 +93,7 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, u64 owner, u64 num_bytes, u64 *start); int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, u64 logical, u64 *length, - struct btrfs_multi_bio **multi_ret); + struct btrfs_multi_bio **multi_ret, int mirror_num); int btrfs_read_sys_array(struct btrfs_root *root); int btrfs_read_chunk_tree(struct btrfs_root *root); int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, @@ -101,7 +101,8 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 *num_bytes, u64 type); void btrfs_mapping_init(struct btrfs_mapping_tree *tree); void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); -int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio); +int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, + int mirror_num); int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int flags, void *holder); @@ -112,4 +113,5 @@ int btrfs_add_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); int btrfs_cleanup_fs_uuids(void); +int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); #endif -- cgit v1.2.3-70-g09d2 From 7e38326f5b125288a2baea1d815c500502ab9fc0 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 9 Apr 2008 16:28:12 -0400 Subject: Btrfs: Handle checksumming errors while reading data blocks Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 1 + fs/btrfs/extent_io.c | 9 ++++ fs/btrfs/extent_io.h | 3 ++ fs/btrfs/inode.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 126 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 9b9db9cbc01..fe6ef8e3416 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -29,6 +29,7 @@ struct btrfs_inode { struct btrfs_key location; struct extent_map_tree extent_tree; struct extent_io_tree io_tree; + struct extent_io_tree io_failure_tree; struct inode vfs_inode; u64 ordered_trans; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2f159375c87..866460c3d72 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1534,6 +1534,15 @@ static int end_bio_extent_readpage(struct bio *bio, if (ret) uptodate = 0; } + if (!uptodate && tree->ops && + tree->ops->readpage_io_failed_hook) { + ret = tree->ops->readpage_io_failed_hook(bio, page, + start, end, state); + if (ret == 0) { + state = NULL; + continue; + } + } spin_lock_irqsave(&tree->lock, flags); if (!state || state->end != end) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 8d6b8a14cc3..b47859ccd78 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -32,6 +32,9 @@ struct extent_io_ops { int (*merge_bio_hook)(struct page *page, unsigned long offset, size_t size, struct bio *bio); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); + int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8c2d5d036bd..48f1d1b9645 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -385,6 +385,86 @@ out: return ret; } +struct io_failure_record { + struct page *page; + u64 start; + u64 len; + u64 logical; + int last_mirror; +}; + +int btrfs_readpage_io_failed_hook(struct bio *failed_bio, + struct page *page, u64 start, u64 end, + struct extent_state *state) +{ + struct io_failure_record *failrec = NULL; + u64 private; + struct extent_map *em; + struct inode *inode = page->mapping->host; + struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct bio *bio; + int num_copies; + int ret; + u64 logical; + + ret = get_state_private(failure_tree, start, &private); + if (ret) { + size_t pg_offset = start - page_offset(page); + failrec = kmalloc(sizeof(*failrec), GFP_NOFS); + if (!failrec) + return -ENOMEM; + failrec->start = start; + failrec->len = end - start + 1; + failrec->last_mirror = 0; + + em = btrfs_get_extent(inode, NULL, pg_offset, start, + failrec->len, 0); + + if (!em || IS_ERR(em)) { + kfree(failrec); + return -EIO; + } + logical = start - em->start; + logical = em->block_start + logical; + failrec->logical = logical; + free_extent_map(em); + set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | + EXTENT_DIRTY, GFP_NOFS); + set_state_private(failure_tree, start, (u64)failrec); + } else { + failrec = (struct io_failure_record *)private; + } + num_copies = btrfs_num_copies( + &BTRFS_I(inode)->root->fs_info->mapping_tree, + failrec->logical, failrec->len); + failrec->last_mirror++; + if (!state) { + spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); + state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, + failrec->start, + EXTENT_LOCKED); + if (state && state->start != failrec->start) + state = NULL; + spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); + } + if (!state || failrec->last_mirror > num_copies) { + set_state_private(failure_tree, failrec->start, 0); + clear_extent_bits(failure_tree, failrec->start, + failrec->start + failrec->len - 1, + EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS); + kfree(failrec); + return -EIO; + } + bio = bio_alloc(GFP_NOFS, 1); + bio->bi_private = state; + bio->bi_end_io = failed_bio->bi_end_io; + bio->bi_sector = failrec->logical >> 9; + bio->bi_bdev = failed_bio->bi_bdev; + bio_add_page(bio, page, failrec->len, start - page_offset(page)); + btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror); + return 0; +} + int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { @@ -419,6 +499,29 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); + + /* if the io failure tree for this inode is non-empty, + * check to see if we've recovered from a failed IO + */ + private = 0; + if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, + (u64)-1, 1, EXTENT_DIRTY)) { + u64 private_failure; + struct io_failure_record *failure; + ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, + start, &private_failure); + if (ret == 0) { + failure = (struct io_failure_record *)private_failure; + set_state_private(&BTRFS_I(inode)->io_failure_tree, + failure->start, 0); + clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, + failure->start, + failure->start + failure->len - 1, + EXTENT_DIRTY | EXTENT_LOCKED, + GFP_NOFS); + kfree(failure); + } + } return 0; zeroit: @@ -429,7 +532,7 @@ zeroit: flush_dcache_page(page); kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); - return 0; + return -EIO; } void btrfs_read_locked_inode(struct inode *inode) @@ -1271,6 +1374,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); + extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, + inode->i_mapping, GFP_NOFS); return 0; } @@ -1578,6 +1683,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); + extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, + inode->i_mapping, GFP_NOFS); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->root = root; @@ -1803,6 +1910,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); + extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, + inode->i_mapping, GFP_NOFS); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } @@ -2972,6 +3081,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS); extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping, GFP_NOFS); + extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, + inode->i_mapping, GFP_NOFS); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } @@ -3070,6 +3181,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { .merge_bio_hook = btrfs_merge_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .readpage_io_failed_hook = btrfs_readpage_io_failed_hook, .set_bit_hook = btrfs_set_bit_hook, .clear_bit_hook = btrfs_clear_bit_hook, }; -- cgit v1.2.3-70-g09d2 From 594994aa3ea4cb52cba9c83a4b6f03e6ff3a646e Mon Sep 17 00:00:00 2001 From: Miguel Date: Fri, 11 Apr 2008 15:46:48 -0400 Subject: Btrfs: define write_cache_pages for linux kernel <= 2.6.20 instead write_cache_pages doesn't exist in linux 2.6.20, change the #if condition to match that. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 866460c3d72..88322684be6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2109,8 +2109,7 @@ done: return 0; } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20) /* Taken directly from 2.6.23 for 2.6.18 back port */ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); -- cgit v1.2.3-70-g09d2 From 3b951516ed703af0f6d82053937655ad69b60864 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Apr 2008 11:29:12 -0400 Subject: Btrfs: Use the extent map cache to find the logical disk block during data retries The data read retry code needs to find the logical disk block before it can resubmit new bios. But, finding this block isn't allowed to take the fs_mutex because that will deadlock with a number of different callers. This changes the retry code to use the extent map cache instead, but that requires the extent map cache to have the extent we're looking for. This is a problem because btrfs_drop_extent_cache just drops the entire extent instead of the little tiny part it is invalidating. The bulk of the code in this patch changes btrfs_drop_extent_cache to invalidate only a portion of the extent cache, and changes btrfs_get_extent to deal with the results. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 1 + fs/btrfs/extent_io.c | 12 +++-- fs/btrfs/file.c | 47 ++++++++++++++++++- fs/btrfs/inode.c | 120 ++++++++++++++++++++++++++++++++++++++++++++----- fs/btrfs/volumes.c | 3 ++ 5 files changed, 168 insertions(+), 15 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 76fd5d7146e..593011e5d45 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1747,6 +1747,7 @@ again: search_start, search_end, hint_byte, ins, trans->alloc_exclude_start, trans->alloc_exclude_nr, data); + if (ret == -ENOSPC && num_bytes > min_alloc_size) { num_bytes = num_bytes >> 1; num_bytes = max(num_bytes, min_alloc_size); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 88322684be6..21597bea21f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1025,7 +1025,8 @@ u64 find_lock_delalloc_range(struct extent_io_tree *tree, search_again: node = tree_search(tree, cur_start); if (!node) { - *end = (u64)-1; + if (!found) + *end = (u64)-1; goto out; } @@ -1540,6 +1541,8 @@ static int end_bio_extent_readpage(struct bio *bio, start, end, state); if (ret == 0) { state = NULL; + uptodate = + test_bit(BIO_UPTODATE, &bio->bi_flags); continue; } } @@ -1555,10 +1558,11 @@ static int end_bio_extent_readpage(struct bio *bio, !(state->state & EXTENT_LOCKED)) state = NULL; } - if (!state && uptodate) { + if (!state) { spin_unlock_irqrestore(&tree->lock, flags); - set_extent_uptodate(tree, start, end, - GFP_ATOMIC); + if (uptodate) + set_extent_uptodate(tree, start, end, + GFP_ATOMIC); unlock_extent(tree, start, end, GFP_ATOMIC); goto next_io; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9fbda655206..3f5525f0834 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -356,12 +356,23 @@ out_unlock: int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) { struct extent_map *em; + struct extent_map *split = NULL; + struct extent_map *split2 = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; u64 len = end - start + 1; + int ret; + int testend = 1; - if (end == (u64)-1) + if (end == (u64)-1) { len = (u64)-1; + testend = 0; + } while(1) { + if (!split) + split = alloc_extent_map(GFP_NOFS); + if (!split2) + split2 = alloc_extent_map(GFP_NOFS); + spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (!em) { @@ -369,6 +380,36 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) break; } remove_extent_mapping(em_tree, em); + + if (em->block_start < EXTENT_MAP_LAST_BYTE && + em->start < start) { + split->start = em->start; + split->len = start - em->start; + split->block_start = em->block_start; + split->bdev = em->bdev; + split->flags = em->flags; + ret = add_extent_mapping(em_tree, split); + BUG_ON(ret); + free_extent_map(split); + split = split2; + split2 = NULL; + } + if (em->block_start < EXTENT_MAP_LAST_BYTE && + testend && em->start + em->len > start + len) { + u64 diff = start + len - em->start; + + split->start = start + len; + split->len = em->start + em->len - (start + len); + split->bdev = em->bdev; + split->flags = em->flags; + + split->block_start = em->block_start + diff; + + ret = add_extent_mapping(em_tree, split); + BUG_ON(ret); + free_extent_map(split); + split = NULL; + } spin_unlock(&em_tree->lock); /* once for us */ @@ -376,6 +417,10 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) /* once for the tree*/ free_extent_map(em); } + if (split) + free_extent_map(split); + if (split2) + free_extent_map(split2); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5632ea76007..40f8da88409 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -122,6 +122,8 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) if (alloc_hint == EXTENT_MAP_INLINE) goto out; + BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); + while(num_bytes > 0) { cur_alloc_size = min(num_bytes, root->fs_info->max_extent); ret = btrfs_alloc_extent(trans, root, cur_alloc_size, @@ -140,6 +142,11 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) ins.offset); inode->i_blocks += ins.offset >> 9; btrfs_check_file(root, inode); + if (num_bytes < cur_alloc_size) { + printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, + cur_alloc_size); + break; + } num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; @@ -427,6 +434,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, struct extent_map *em; struct inode *inode = page->mapping->host; struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct bio *bio; int num_copies; int ret; @@ -434,7 +442,6 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, ret = get_state_private(failure_tree, start, &private); if (ret) { - size_t pg_offset = start - page_offset(page); failrec = kmalloc(sizeof(*failrec), GFP_NOFS); if (!failrec) return -ENOMEM; @@ -442,8 +449,13 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, failrec->len = end - start + 1; failrec->last_mirror = 0; - em = btrfs_get_extent(inode, NULL, pg_offset, start, - failrec->len, 0); + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, start, failrec->len); + if (em->start > start || em->start + em->len < start) { + free_extent_map(em); + em = NULL; + } + spin_unlock(&em_tree->lock); if (!em || IS_ERR(em)) { kfree(failrec); @@ -559,6 +571,8 @@ zeroit: flush_dcache_page(page); kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); + if (private == 0) + return 0; return -EIO; } @@ -908,8 +922,9 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, int pending_del_nr = 0; int pending_del_slot = 0; int extent_type = -1; + u64 mask = root->sectorsize - 1; - btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1); + btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1); path = btrfs_alloc_path(); path->reada = -1; BUG_ON(!path); @@ -1212,7 +1227,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) hole_start, 0, 0, hole_size); btrfs_drop_extent_cache(inode, hole_start, - hole_size - 1); + (u64)-1); btrfs_check_file(root, inode); } btrfs_end_transaction(trans, root); @@ -2083,6 +2098,68 @@ out_unlock: return err; } +static int merge_extent_mapping(struct extent_map_tree *em_tree, + struct extent_map *existing, + struct extent_map *em) +{ + u64 start_diff; + u64 new_end; + int ret = 0; + int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE; + + if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE) + goto invalid; + + if (!real_blocks && em->block_start != existing->block_start) + goto invalid; + + new_end = max(existing->start + existing->len, em->start + em->len); + + if (existing->start >= em->start) { + if (em->start + em->len < existing->start) + goto invalid; + + start_diff = existing->start - em->start; + if (real_blocks && em->block_start + start_diff != + existing->block_start) + goto invalid; + + em->len = new_end - em->start; + + remove_extent_mapping(em_tree, existing); + /* free for the tree */ + free_extent_map(existing); + ret = add_extent_mapping(em_tree, em); + + } else if (em->start > existing->start) { + + if (existing->start + existing->len < em->start) + goto invalid; + + start_diff = em->start - existing->start; + if (real_blocks && existing->block_start + start_diff != + em->block_start) + goto invalid; + + remove_extent_mapping(em_tree, existing); + em->block_start = existing->block_start; + em->start = existing->start; + em->len = new_end - existing->start; + free_extent_map(existing); + + ret = add_extent_mapping(em_tree, em); + } else { + goto invalid; + } + return ret; + +invalid: + printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n", + existing->start, existing->len, existing->block_start, + em->start, em->len, em->block_start); + return -EIO; +} + struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -2267,12 +2344,35 @@ insert: err = 0; spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); + + /* it is possible that someone inserted the extent into the tree + * while we had the lock dropped. It is also possible that + * an overlapping map exists in the tree + */ if (ret == -EEXIST) { - free_extent_map(em); - em = lookup_extent_mapping(em_tree, start, len); - if (!em) { - err = -EIO; - printk("failing to insert %Lu %Lu\n", start, len); + struct extent_map *existing; + existing = lookup_extent_mapping(em_tree, start, len); + if (!existing) { + existing = lookup_extent_mapping(em_tree, em->start, + em->len); + if (existing) { + err = merge_extent_mapping(em_tree, existing, + em); + free_extent_map(existing); + if (err) { + free_extent_map(em); + em = NULL; + } + } else { + err = -EIO; + printk("failing to insert %Lu %Lu\n", + start, len); + free_extent_map(em); + em = NULL; + } + } else { + free_extent_map(em); + em = existing; } } spin_unlock(&em_tree->lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e6417a573d4..0e658c1d821 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -883,6 +883,9 @@ again: spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, logical, *length); spin_unlock(&em_tree->lock); + if (!em) { + printk("unable to find logical %Lu\n", logical); + } BUG_ON(!em); BUG_ON(em->start > logical || em->start + em->len < logical); -- cgit v1.2.3-70-g09d2 From 41471e8341a6b476bcd8ecc765a8b297c22a74f9 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Apr 2008 14:08:30 -0400 Subject: Btrfs: Remove bogus max_sector warnings from the extent_io code It was testing the bio before doing logical->physical mapping, so the test was always wrong. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 21597bea21f..a984a7bc119 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1702,7 +1702,6 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, static int submit_one_bio(int rw, struct bio *bio, int mirror_num) { - u64 maxsector; int ret = 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct page *page = bvec->bv_page; @@ -1730,12 +1729,6 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num) bio_get(bio); - maxsector = bio->bi_bdev->bd_inode->i_size >> 9; - if (maxsector < bio->bi_sector) { - printk("sector too large max %Lu got %llu\n", maxsector, - (unsigned long long)bio->bi_sector); - WARN_ON(1); - } if (tree->ops && tree->ops->submit_bio_hook) tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num); -- cgit v1.2.3-70-g09d2 From 7b13b7b119c932a5eca486db4113f4c1fe3b97a8 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 18 Apr 2008 10:29:50 -0400 Subject: Btrfs: Don't drop extent_map cache during releasepage on the btree inode The btree inode should only have a single extent_map in the cache, it doesn't make sense to ever drop it. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 23 ++++++++++++++--------- fs/btrfs/extent_io.c | 38 +++++++++++++++++++++++++++----------- fs/btrfs/extent_io.h | 3 +++ 3 files changed, 44 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 76ee7a4ae00..2de2b00afeb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -76,13 +76,12 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, struct extent_map *em; int ret; -again: spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); spin_unlock(&em_tree->lock); - if (em) { + if (em) goto out; - } + em = alloc_extent_map(GFP_NOFS); if (!em) { em = ERR_PTR(-ENOMEM); @@ -95,15 +94,21 @@ again: spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); - spin_unlock(&em_tree->lock); - if (ret == -EEXIST) { free_extent_map(em); - em = NULL; - goto again; + em = lookup_extent_mapping(em_tree, start, len); + if (em) + ret = 0; + else + ret = -EIO; } else if (ret) { - em = ERR_PTR(ret); + free_extent_map(em); + em = NULL; } + spin_unlock(&em_tree->lock); + + if (ret) + em = ERR_PTR(ret); out: return em; } @@ -496,7 +501,7 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) } tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; - ret = try_release_extent_mapping(map, tree, page, gfp_flags); + ret = try_release_extent_state(map, tree, page, gfp_flags); if (ret == 1) { invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); ClearPagePrivate(page); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a984a7bc119..81f8b4fd069 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2463,6 +2463,31 @@ err: } EXPORT_SYMBOL(extent_prepare_write); +/* + * a helper for releasepage, this tests for areas of the page that + * are locked or under IO and drops the related state bits if it is safe + * to drop the page. + */ +int try_release_extent_state(struct extent_map_tree *map, + struct extent_io_tree *tree, struct page *page, + gfp_t mask) +{ + u64 start = (u64)page->index << PAGE_CACHE_SHIFT; + u64 end = start + PAGE_CACHE_SIZE - 1; + int ret = 1; + + if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0)) + ret = 0; + else { + if ((mask & GFP_NOFS) == GFP_NOFS) + mask = GFP_NOFS; + clear_extent_bit(tree, start, end, EXTENT_UPTODATE, + 1, 1, mask); + } + return ret; +} +EXPORT_SYMBOL(try_release_extent_state); + /* * a helper for releasepage. As long as there are no locked extents * in the range corresponding to the page, both state records and extent @@ -2475,8 +2500,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, struct extent_map *em; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; u64 end = start + PAGE_CACHE_SIZE - 1; - u64 orig_start = start; - int ret = 1; + if ((mask & __GFP_WAIT) && page->mapping->host->i_size > 16 * 1024 * 1024) { u64 len; @@ -2507,15 +2531,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, free_extent_map(em); } } - if (test_range_bit(tree, orig_start, end, EXTENT_IOBITS, 0)) - ret = 0; - else { - if ((mask & GFP_NOFS) == GFP_NOFS) - mask = GFP_NOFS; - clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE, - 1, 1, mask); - } - return ret; + return try_release_extent_state(map, tree, page, mask); } EXPORT_SYMBOL(try_release_extent_mapping); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 232300d13be..e4834614751 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -110,6 +110,9 @@ void extent_io_tree_empty_lru(struct extent_io_tree *tree); int try_release_extent_mapping(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); +int try_release_extent_state(struct extent_map_tree *map, + struct extent_io_tree *tree, struct page *page, + gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, -- cgit v1.2.3-70-g09d2 From e1c4b7451e22f5b0a9fbccfa560ee7b80c35b8cd Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Apr 2008 13:26:46 -0400 Subject: Fix btrfs_get_extent and get_block corner cases, and disable O_DIRECT reads The generic O_DIRECT code assumes all the bios have the same bdev, which isn't true for multi-device btrfs. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 1 + fs/btrfs/inode.c | 32 +++++++++++++++++++++----------- fs/btrfs/volumes.c | 2 +- 3 files changed, 23 insertions(+), 12 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 81f8b4fd069..6e4bf029c6d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1694,6 +1694,7 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, } if (bio) { + bio->bi_size = 0; bio->bi_bdev = bdev; bio->bi_sector = first_sector; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e875c7c8a64..abfe86df02d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -494,6 +494,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, bio->bi_end_io = failed_bio->bi_end_io; bio->bi_sector = failrec->logical >> 9; bio->bi_bdev = failed_bio->bi_bdev; + bio->bi_size = 0; bio_add_page(bio, page, failrec->len, start - page_offset(page)); btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror); return 0; @@ -2187,12 +2188,9 @@ again: spin_unlock(&em_tree->lock); if (em) { - if (em->start > start) { - printk("get_extent lookup [%Lu %Lu] em [%Lu %Lu]\n", - start, len, em->start, em->len); - WARN_ON(1); - } - if (em->block_start == EXTENT_MAP_INLINE && page) + if (em->start > start || em->start + em->len <= start) + free_extent_map(em); + else if (em->block_start == EXTENT_MAP_INLINE && page) free_extent_map(em); else goto out; @@ -2340,7 +2338,6 @@ insert: err = 0; spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); - /* it is possible that someone inserted the extent into the tree * while we had the lock dropped. It is also possible that * an overlapping map exists in the tree @@ -2348,6 +2345,11 @@ insert: if (ret == -EEXIST) { struct extent_map *existing; existing = lookup_extent_mapping(em_tree, start, len); + if (existing && (existing->start > start || + existing->start + existing->len <= start)) { + free_extent_map(existing); + existing = NULL; + } if (!existing) { existing = lookup_extent_mapping(em_tree, em->start, em->len); @@ -2388,6 +2390,7 @@ out: return em; } +#if 0 /* waiting for O_DIRECT reads */ static int btrfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { @@ -2405,22 +2408,24 @@ static int btrfs_get_block(struct inode *inode, sector_t iblock, if (!em || IS_ERR(em)) goto out; - if (em->start > start || em->start + em->len <= start) + if (em->start > start || em->start + em->len <= start) { goto out; + } if (em->block_start == EXTENT_MAP_INLINE) { ret = -EINVAL; goto out; } + len = em->start + em->len - start; + len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size))); + if (em->block_start == EXTENT_MAP_HOLE || em->block_start == EXTENT_MAP_DELALLOC) { + bh_result->b_size = len; goto out; } - len = em->start + em->len - start; - len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size))); - logical = start - em->start; logical = em->block_start + logical; @@ -2430,6 +2435,7 @@ static int btrfs_get_block(struct inode *inode, sector_t iblock, BUG_ON(ret); bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits; bh_result->b_size = min(map_length, len); + bh_result->b_bdev = multi->stripes[0].dev->bdev; set_buffer_mapped(bh_result); kfree(multi); @@ -2437,11 +2443,14 @@ out: free_extent_map(em); return ret; } +#endif static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { + return -EINVAL; +#if 0 struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -2450,6 +2459,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, btrfs_get_block, NULL); +#endif } static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5619e50583e..bccb5566fd8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1161,7 +1161,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, int total_devs = 1; length = bio->bi_size; - map_tree = &root->fs_info->mapping_tree; map_length = length; @@ -1192,6 +1191,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, } bio->bi_sector = multi->stripes[dev_nr].physical >> 9; dev = multi->stripes[dev_nr].dev; + bio->bi_bdev = dev->bdev; spin_lock(&dev->io_lock); dev->total_ios++; -- cgit v1.2.3-70-g09d2 From 004fb5750615bb3cf53e2aa50f6ef1ea57d97df9 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 25 Apr 2008 09:00:55 -0400 Subject: Btrfs: write_extent_pages came in 2.6.23 Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6e4bf029c6d..c1c980d1fbb 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2107,7 +2107,7 @@ done: return 0; } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,20) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) /* Taken directly from 2.6.23 for 2.6.18 back port */ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); -- cgit v1.2.3-70-g09d2 From 5e478dc9828ad33d7b08dcdf277e13f14a7c1be7 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 25 Apr 2008 09:10:45 -0400 Subject: Btrfs: write_cache_pages came in 2.6.22 Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c1c980d1fbb..2368536629e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2107,7 +2107,7 @@ done: return 0; } -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) /* Taken directly from 2.6.23 for 2.6.18 back port */ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); -- cgit v1.2.3-70-g09d2 From 4235298e4fc3c1a09f659cfe2fd285024eeb2241 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 28 Apr 2008 16:40:52 -0400 Subject: Btrfs: Drop some verbose printks Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 18 +++++------------- fs/btrfs/extent_io.c | 13 +++++-------- fs/btrfs/volumes.c | 2 -- 3 files changed, 10 insertions(+), 23 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a9ce491d279..1c7d84aff86 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -214,25 +214,18 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, 1, btree_get_extent, mirror_num); - if (!ret) { - if (mirror_num) -printk("good read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies); + if (!ret) return ret; - } + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); -printk("failed to read %Lu mirror %d total %d\n", eb->start, mirror_num, num_copies); - if (num_copies == 1) { -printk("reading %Lu failed only one copy\n", eb->start); + if (num_copies == 1) return ret; - } + mirror_num++; - if (mirror_num > num_copies) { -printk("bailing at mirror %d of %d\n", mirror_num, num_copies); + if (mirror_num > num_copies) return ret; - } } -printk("read extent buffer page last\n"); return -EIO; } @@ -322,7 +315,6 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, btrfs_clear_buffer_defrag(eb); found_start = btrfs_header_bytenr(eb); if (found_start != start) { -printk("bad start on %Lu found %Lu\n", eb->start, found_start); ret = -EIO; goto err; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2368536629e..dd403b426ff 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3003,17 +3003,18 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb) { int ret = 0; - int ret2; unsigned long num_pages; unsigned long i; struct page *page; int pg_uptodate = 1; if (eb->flags & EXTENT_UPTODATE) - ret = 1; + return 1; - ret2 = test_range_bit(tree, eb->start, eb->start + eb->len - 1, + ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1, EXTENT_UPTODATE, 1); + if (ret) + return ret; num_pages = num_extent_pages(eb->start, eb->len); for (i = 0; i < num_pages; i++) { @@ -3023,11 +3024,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, break; } } - if ((ret || ret2) && !pg_uptodate) { -printk("uptodate error2 eb %Lu ret %d ret2 %d pg_uptodate %d\n", eb->start, ret, ret2, pg_uptodate); - WARN_ON(1); - } - return (ret || ret2); + return pg_uptodate; } EXPORT_SYMBOL(extent_buffer_uptodate); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6476ecbf132..9a724113456 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1282,7 +1282,6 @@ again: num_stripes, sub_stripes); index = 0; -printk("new chunk type %Lu start %Lu size %Lu\n", type, key.offset, *num_bytes); while(index < num_stripes) { struct btrfs_stripe *stripe; BUG_ON(list_empty(&private_devs)); @@ -1299,7 +1298,6 @@ printk("new chunk type %Lu start %Lu size %Lu\n", type, key.offset, *num_bytes); BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset, calc_size, &dev_offset); BUG_ON(ret); -printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.offset, calc_size, device->devid, type); device->bytes_used += calc_size; ret = btrfs_update_device(trans, device); BUG_ON(ret); -- cgit v1.2.3-70-g09d2 From 1259ab75c62462b8ffad90067b5e1f6312786a18 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 12 May 2008 13:39:03 -0400 Subject: Btrfs: Handle write errors on raid1 and raid10 When duplicate copies exist, writes are allowed to fail to one of those copies. This changeset includes a few changes that allow the FS to continue even when some IOs fail. It also adds verification of the parent generation number for btree blocks. This generation is stored in the pointer to a block, and it ensures that missed writes to are detected. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 2 +- fs/btrfs/disk-io.c | 56 ++++++++++++++++++++++++++++++++++++++---- fs/btrfs/disk-io.h | 2 +- fs/btrfs/extent-tree.c | 4 +-- fs/btrfs/extent_io.c | 49 +++++++++++++++++++++++++++++++------ fs/btrfs/extent_io.h | 7 +++++- fs/btrfs/inode.c | 66 +++++++++++++++++++++++++++++++------------------- fs/btrfs/tree-defrag.c | 7 +++--- fs/btrfs/volumes.c | 11 ++++++--- 9 files changed, 155 insertions(+), 49 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 289d71d8653..02e571e6ee6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -379,7 +379,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, cur = btrfs_find_tree_block(root, blocknr, blocksize); if (cur) - uptodate = btrfs_buffer_uptodate(cur); + uptodate = btrfs_buffer_uptodate(cur, gen); else uptodate = 0; if (!cur || !uptodate) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index edee7a44f86..574b1245964 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -205,6 +205,33 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, return 0; } +static int verify_parent_transid(struct extent_io_tree *io_tree, + struct extent_buffer *eb, u64 parent_transid) +{ + int ret; + + if (!parent_transid || btrfs_header_generation(eb) == parent_transid) + return 0; + + lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); + if (extent_buffer_uptodate(io_tree, eb) && + btrfs_header_generation(eb) == parent_transid) { + ret = 0; + goto out; + } + printk("parent transid verify failed on %llu wanted %llu found %llu\n", + (unsigned long long)eb->start, + (unsigned long long)parent_transid, + (unsigned long long)btrfs_header_generation(eb)); + ret = 1; +out: + clear_extent_buffer_uptodate(io_tree, eb); + unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, + GFP_NOFS); + return ret; + +} + static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) @@ -218,7 +245,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, while (1) { ret = read_extent_buffer_pages(io_tree, eb, start, 1, btree_get_extent, mirror_num); - if (!ret) + if (!ret && + !verify_parent_transid(io_tree, eb, parent_transid)) return ret; num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, @@ -330,6 +358,13 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, ret = -EIO; goto err; } + if (memcmp_extent_buffer(eb, root->fs_info->fsid, + (unsigned long)btrfs_header_fsid(eb), + BTRFS_FSID_SIZE)) { + printk("bad fsid on block %Lu\n", eb->start); + ret = -EIO; + goto err; + } found_level = btrfs_header_level(eb); ret = csum_tree_block(root, eb, 1); @@ -1363,7 +1398,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) "I/O error on %s\n", bdevname(bh->b_bdev, b)); } - set_buffer_write_io_error(bh); + /* note, we dont' set_buffer_write_io_error because we have + * our own ways of dealing with the IO errors + */ clear_buffer_uptodate(bh); } unlock_buffer(bh); @@ -1459,7 +1496,8 @@ int write_all_supers(struct btrfs_root *root) ret = submit_bh(WRITE, bh); BUG_ON(ret); wait_on_buffer(bh); - BUG_ON(!buffer_uptodate(bh)); + if (!buffer_uptodate(bh)) + total_errors++; } else { total_errors++; } @@ -1607,10 +1645,18 @@ int close_ctree(struct btrfs_root *root) return 0; } -int btrfs_buffer_uptodate(struct extent_buffer *buf) +int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) { + int ret; struct inode *btree_inode = buf->first_page->mapping->host; - return extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); + + ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf); + if (!ret) + return ret; + + ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, + parent_transid); + return !ret; } int btrfs_set_buffer_uptodate(struct extent_buffer *buf) diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index e29c895d523..30d1ed293c2 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -56,7 +56,7 @@ int btrfs_insert_dev_radix(struct btrfs_root *root, void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); -int btrfs_buffer_uptodate(struct extent_buffer *buf); +int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); int btrfs_set_buffer_uptodate(struct extent_buffer *buf); int wait_on_tree_block_writeback(struct btrfs_root *root, struct extent_buffer *buf); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index db07dde4a87..605018c6045 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1366,7 +1366,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, if (!pending) { buf = btrfs_find_tree_block(root, bytenr, num_bytes); if (buf) { - if (btrfs_buffer_uptodate(buf)) { + if (btrfs_buffer_uptodate(buf, 0)) { u64 transid = root->fs_info->running_transaction->transid; u64 header_transid = @@ -2151,7 +2151,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, continue; } next = btrfs_find_tree_block(root, bytenr, blocksize); - if (!next || !btrfs_buffer_uptodate(next)) { + if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { free_extent_buffer(next); reada_walk_down(root, cur, path->slots[*level]); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dd403b426ff..2a3624adc0c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1366,7 +1366,7 @@ static int end_bio_extent_writepage(struct bio *bio, unsigned int bytes_done, int err) #endif { - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + int uptodate = err == 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct extent_state *state = bio->bi_private; struct extent_io_tree *tree = state->tree; @@ -1375,6 +1375,7 @@ static int end_bio_extent_writepage(struct bio *bio, u64 end; u64 cur; int whole_page; + int ret; unsigned long flags; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) @@ -1395,17 +1396,30 @@ static int end_bio_extent_writepage(struct bio *bio, if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); + if (tree->ops && tree->ops->writepage_end_io_hook) { + ret = tree->ops->writepage_end_io_hook(page, start, + end, state); + if (ret) + uptodate = 0; + } + + if (!uptodate && tree->ops && + tree->ops->writepage_io_failed_hook) { + ret = tree->ops->writepage_io_failed_hook(bio, page, + start, end, state); + if (ret == 0) { + state = NULL; + uptodate = (err == 0); + continue; + } + } + if (!uptodate) { clear_extent_uptodate(tree, start, end, GFP_ATOMIC); ClearPageUptodate(page); SetPageError(page); } - if (tree->ops && tree->ops->writepage_end_io_hook) { - tree->ops->writepage_end_io_hook(page, start, end, - state); - } - /* * bios can get merged in funny ways, and so we need to * be careful with the state variable. We know the @@ -2073,9 +2087,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } else { ret = 0; } - if (ret) + if (ret) { SetPageError(page); - else { + } else { unsigned long max_nr = end_index + 1; set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { @@ -2948,6 +2962,25 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, } EXPORT_SYMBOL(set_extent_buffer_dirty); +int clear_extent_buffer_uptodate(struct extent_io_tree *tree, + struct extent_buffer *eb) +{ + unsigned long i; + struct page *page; + unsigned long num_pages; + + num_pages = num_extent_pages(eb->start, eb->len); + eb->flags &= ~EXTENT_UPTODATE; + + clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, + GFP_NOFS); + for (i = 0; i < num_pages; i++) { + page = extent_buffer_page(eb, i); + ClearPageUptodate(page); + } + return 0; +} + int set_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e4834614751..f1960dafaa1 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -36,9 +36,12 @@ struct extent_io_ops { int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, u64 start, u64 end, struct extent_state *state); + int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); - void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, + int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits); @@ -212,6 +215,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb); +int clear_extent_buffer_uptodate(struct extent_io_tree *tree, + struct extent_buffer *eb); int extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb); int map_extent_buffer(struct extent_buffer *eb, unsigned long offset, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a492fd238c8..08760ff9bab 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -430,9 +430,9 @@ struct io_failure_record { int last_mirror; }; -int btrfs_readpage_io_failed_hook(struct bio *failed_bio, - struct page *page, u64 start, u64 end, - struct extent_state *state) +int btrfs_io_failed_hook(struct bio *failed_bio, + struct page *page, u64 start, u64 end, + struct extent_state *state) { struct io_failure_record *failrec = NULL; u64 private; @@ -443,6 +443,7 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, struct bio *bio; int num_copies; int ret; + int rw; u64 logical; ret = get_state_private(failure_tree, start, &private); @@ -505,7 +506,41 @@ int btrfs_readpage_io_failed_hook(struct bio *failed_bio, bio->bi_bdev = failed_bio->bi_bdev; bio->bi_size = 0; bio_add_page(bio, page, failrec->len, start - page_offset(page)); - btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror); + if (failed_bio->bi_rw & (1 << BIO_RW)) + rw = WRITE; + else + rw = READ; + + BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, + failrec->last_mirror); + return 0; +} + +int btrfs_clean_io_failures(struct inode *inode, u64 start) +{ + u64 private; + u64 private_failure; + struct io_failure_record *failure; + int ret; + + private = 0; + if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, + (u64)-1, 1, EXTENT_DIRTY)) { + ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, + start, &private_failure); + if (ret == 0) { + failure = (struct io_failure_record *)(unsigned long) + private_failure; + set_state_private(&BTRFS_I(inode)->io_failure_tree, + failure->start, 0); + clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, + failure->start, + failure->start + failure->len - 1, + EXTENT_DIRTY | EXTENT_LOCKED, + GFP_NOFS); + kfree(failure); + } + } return 0; } @@ -547,26 +582,7 @@ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, /* if the io failure tree for this inode is non-empty, * check to see if we've recovered from a failed IO */ - private = 0; - if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, - (u64)-1, 1, EXTENT_DIRTY)) { - u64 private_failure; - struct io_failure_record *failure; - ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, - start, &private_failure); - if (ret == 0) { - failure = (struct io_failure_record *)(unsigned long) - private_failure; - set_state_private(&BTRFS_I(inode)->io_failure_tree, - failure->start, 0); - clear_extent_bits(&BTRFS_I(inode)->io_failure_tree, - failure->start, - failure->start + failure->len - 1, - EXTENT_DIRTY | EXTENT_LOCKED, - GFP_NOFS); - kfree(failure); - } - } + btrfs_clean_io_failures(inode, start); return 0; zeroit: @@ -3657,7 +3673,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { .merge_bio_hook = btrfs_merge_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, - .readpage_io_failed_hook = btrfs_readpage_io_failed_hook, + .readpage_io_failed_hook = btrfs_io_failed_hook, .set_bit_hook = btrfs_set_bit_hook, .clear_bit_hook = btrfs_clear_bit_hook, }; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 5085e9e693b..c02e2bf2f02 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -51,6 +51,7 @@ static int defrag_walk_down(struct btrfs_trans_handle *trans, struct extent_buffer *next; struct extent_buffer *cur; u64 bytenr; + u64 ptr_gen; int ret = 0; int is_extent = 0; @@ -93,11 +94,12 @@ static int defrag_walk_down(struct btrfs_trans_handle *trans, break; } bytenr = btrfs_node_blockptr(cur, path->slots[*level]); + ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); if (cache_only) { next = btrfs_find_tree_block(root, bytenr, btrfs_level_size(root, *level - 1)); - if (!next || !btrfs_buffer_uptodate(next) || + if (!next || !btrfs_buffer_uptodate(next, ptr_gen) || !btrfs_buffer_defrag(next)) { free_extent_buffer(next); path->slots[*level]++; @@ -106,8 +108,7 @@ static int defrag_walk_down(struct btrfs_trans_handle *trans, } else { next = read_tree_block(root, bytenr, btrfs_level_size(root, *level - 1), - btrfs_node_ptr_generation(cur, - path->slots[*level])); + ptr_gen); } ret = btrfs_cow_block(trans, root, next, path->nodes[*level], path->slots[*level], &next); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b5d7bd1915b..5fc7fb48147 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1807,14 +1807,19 @@ static int end_bio_multi_stripe(struct bio *bio, if (atomic_dec_and_test(&multi->stripes_pending)) { bio->bi_private = multi->private; bio->bi_end_io = multi->end_io; - /* only send an error to the higher layers if it is * beyond the tolerance of the multi-bio */ - if (atomic_read(&multi->error) > multi->max_errors) + if (atomic_read(&multi->error) > multi->max_errors) { err = -EIO; - else + } else if (err) { + /* + * this bio is actually up to date, we didn't + * go over the max number of errors + */ + set_bit(BIO_UPTODATE, &bio->bi_flags); err = 0; + } kfree(multi); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) -- cgit v1.2.3-70-g09d2 From 211c17f51f46dc6c308c742098273dd46b5ca59c Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 15 May 2008 09:13:45 -0400 Subject: Fix corners in writepage and btrfs_truncate_page The extent_io writepage calls needed an extra check for discarding pages that started on th last byte in the file. btrfs_truncate_page needed checks to make sure the page was still part of the file after reading it, and most importantly, needed to wait for all IO to the page to finish before freeing the corresponding extents on disk. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 15 +++++++++------ fs/btrfs/file.c | 8 -------- fs/btrfs/inode.c | 9 ++++++++- 3 files changed, 17 insertions(+), 15 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2a3624adc0c..17c508a941e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1988,9 +1988,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 nr_delalloc; u64 delalloc_end; + WARN_ON(!PageLocked(page)); - if (page->index > end_index) { - clear_extent_dirty(tree, start, page_end, GFP_NOFS); + page_offset = i_size & (PAGE_CACHE_SIZE - 1); + if (page->index > end_index || + (page->index == end_index && !page_offset)) { + page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; } @@ -1998,13 +2001,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (page->index == end_index) { char *userpage; - size_t offset = i_size & (PAGE_CACHE_SIZE - 1); - userpage = kmap_atomic(page, KM_USER0); - memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset); - flush_dcache_page(page); + memset(userpage + page_offset, 0, + PAGE_CACHE_SIZE - page_offset); kunmap_atomic(userpage, KM_USER0); + flush_dcache_page(page); } + page_offset = 0; set_page_extent_mapped(page); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index df97d470812..d968900c697 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -305,7 +305,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, (inline_size & (root->sectorsize -1)) == 0 || inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { u64 last_end; - u64 existing_delalloc = 0; for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; @@ -315,13 +314,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, last_end = (u64)(pages[num_pages -1]->index) << PAGE_CACHE_SHIFT; last_end += PAGE_CACHE_SIZE - 1; - if (start_pos < isize) { - u64 delalloc_start = start_pos; - existing_delalloc = count_range_bits(io_tree, - &delalloc_start, - end_of_last_block, (u64)-1, - EXTENT_DELALLOC); - } set_extent_delalloc(io_tree, start_pos, end_of_last_block, GFP_NOFS); btrfs_add_ordered_inode(inode); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 08760ff9bab..40b4a8ec17f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1180,19 +1180,26 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) goto out; ret = -ENOMEM; +again: page = grab_cache_page(mapping, index); if (!page) goto out; if (!PageUptodate(page)) { ret = btrfs_readpage(NULL, page); lock_page(page); + if (page->mapping != mapping) { + unlock_page(page); + page_cache_release(page); + goto again; + } if (!PageUptodate(page)) { ret = -EIO; goto out; } } - page_start = (u64)page->index << PAGE_CACHE_SHIFT; + page_start = (u64)page->index << PAGE_CACHE_SHIFT; + wait_on_page_writeback(page); ret = btrfs_cow_one_page(inode, page, offset); unlock_page(page); -- cgit v1.2.3-70-g09d2 From 925baeddc5b0764a53f2214a1253251bab0e0324 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 25 Jun 2008 16:01:30 -0400 Subject: Btrfs: Start btree concurrency work. The allocation trees and the chunk trees are serialized via their own dedicated mutexes. This means allocation location is still not very fine grained. The main FS btree is protected by locks on each block in the btree. Locks are taken top / down, and as processing finishes on a given level of the tree, the lock is released after locking the lower level. The end result of a search is now a path where only the lowest level is locked. Releasing or freeing the path drops any locks held. Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.c | 386 ++++++++++++++++++++++++++++++++++++------------- fs/btrfs/ctree.h | 23 ++- fs/btrfs/disk-io.c | 13 +- fs/btrfs/extent-tree.c | 252 +++++++++++++++++++++----------- fs/btrfs/extent_io.c | 8 - fs/btrfs/inode.c | 8 +- fs/btrfs/ioctl.c | 8 +- fs/btrfs/locking.c | 50 +++++++ fs/btrfs/locking.h | 26 ++++ fs/btrfs/transaction.c | 15 +- fs/btrfs/tree-defrag.c | 2 + 12 files changed, 579 insertions(+), 214 deletions(-) create mode 100644 fs/btrfs/locking.c create mode 100644 fs/btrfs/locking.h (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 7ed6b39e42d..0e2dcc718de 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -6,7 +6,7 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ hash.o file-item.o inode-item.o inode-map.o disk-io.o \ transaction.o bit-radix.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ - extent_io.o volumes.o async-thread.o ioctl.o + extent_io.o volumes.o async-thread.o ioctl.o locking.o btrfs-$(CONFIG_FS_POSIX_ACL) += acl.o else diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1d404bde3cf..75625c68fdd 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -21,6 +21,7 @@ #include "disk-io.h" #include "transaction.h" #include "print-tree.h" +#include "locking.h" static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level); @@ -64,12 +65,47 @@ void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) int i; for (i = 0; i < BTRFS_MAX_LEVEL; i++) { if (!p->nodes[i]) - break; + continue; + if (p->locks[i]) { + btrfs_tree_unlock(p->nodes[i]); + p->locks[i] = 0; + } free_extent_buffer(p->nodes[i]); } memset(p, 0, sizeof(*p)); } +struct extent_buffer *btrfs_root_node(struct btrfs_root *root) +{ + struct extent_buffer *eb; + spin_lock(&root->node_lock); + eb = root->node; + extent_buffer_get(eb); + spin_unlock(&root->node_lock); + return eb; +} + +struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) +{ + struct extent_buffer *eb; + + while(1) { + eb = btrfs_root_node(root); + btrfs_tree_lock(eb); + + spin_lock(&root->node_lock); + if (eb == root->node) { + spin_unlock(&root->node_lock); + break; + } + spin_unlock(&root->node_lock); + + btrfs_tree_unlock(eb); + free_extent_buffer(eb); + } + return eb; +} + static void add_root_to_dirty_list(struct btrfs_root *root) { if (root->track_dirty && list_empty(&root->dirty_list)) { @@ -111,7 +147,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, } else { first_key.objectid = 0; } - cow = __btrfs_alloc_free_block(trans, new_root, buf->len, + cow = btrfs_alloc_free_block(trans, new_root, buf->len, new_root_objectid, trans->transid, first_key.objectid, level, buf->start, 0); @@ -151,8 +187,14 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, int ret = 0; int different_trans = 0; int level; + int unlock_orig = 0; struct btrfs_key first_key; + if (*cow_ret == buf) + unlock_orig = 1; + + WARN_ON(!btrfs_tree_locked(buf)); + if (root->ref_cows) { root_gen = trans->transid; } else { @@ -172,7 +214,7 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, } else { first_key.objectid = 0; } - cow = __btrfs_alloc_free_block(trans, root, buf->len, + cow = btrfs_alloc_free_block(trans, root, buf->len, root->root_key.objectid, root_gen, first_key.objectid, level, search_start, empty_size); @@ -196,9 +238,14 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, } if (buf == root->node) { + WARN_ON(parent && parent != buf); root_gen = btrfs_header_generation(buf); + + spin_lock(&root->node_lock); root->node = cow; extent_buffer_get(cow); + spin_unlock(&root->node_lock); + if (buf != root->commit_root) { btrfs_free_extent(trans, root, buf->start, buf->len, root->root_key.objectid, @@ -219,6 +266,8 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_header_owner(parent), root_gen, 0, 0, 1); } + if (unlock_orig) + btrfs_tree_unlock(buf); free_extent_buffer(buf); btrfs_mark_buffer_dirty(cow); *cow_ret = cow; @@ -316,6 +365,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, int progress_passed = 0; struct btrfs_disk_key disk_key; + /* FIXME this code needs locking */ + return 0; + parent_level = btrfs_header_level(parent); if (cache_only && parent_level != 1) return 0; @@ -729,6 +781,7 @@ static int balance_level(struct btrfs_trans_handle *trans, return 0; mid = path->nodes[level]; + WARN_ON(!path->locks[level]); WARN_ON(btrfs_header_generation(mid) != trans->transid); orig_ptr = btrfs_node_blockptr(mid, orig_slot); @@ -749,14 +802,21 @@ static int balance_level(struct btrfs_trans_handle *trans, /* promote the child to a root */ child = read_node_slot(root, mid, 0); + btrfs_tree_lock(child); BUG_ON(!child); ret = btrfs_cow_block(trans, root, child, mid, 0, &child); BUG_ON(ret); + spin_lock(&root->node_lock); root->node = child; + spin_unlock(&root->node_lock); + add_root_to_dirty_list(root); + btrfs_tree_unlock(child); + path->locks[level] = 0; path->nodes[level] = NULL; clean_tree_block(trans, root, mid); + btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); ret = btrfs_free_extent(trans, root, mid->start, mid->len, @@ -775,6 +835,7 @@ static int balance_level(struct btrfs_trans_handle *trans, left = read_node_slot(root, parent, pslot - 1); if (left) { + btrfs_tree_lock(left); wret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (wret) { @@ -784,6 +845,7 @@ static int balance_level(struct btrfs_trans_handle *trans, } right = read_node_slot(root, parent, pslot + 1); if (right) { + btrfs_tree_lock(right); wret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (wret) { @@ -815,6 +877,7 @@ static int balance_level(struct btrfs_trans_handle *trans, u32 blocksize = right->len; clean_tree_block(trans, root, right); + btrfs_tree_unlock(right); free_extent_buffer(right); right = NULL; wret = del_ptr(trans, root, path, level + 1, pslot + @@ -862,7 +925,9 @@ static int balance_level(struct btrfs_trans_handle *trans, u64 root_gen = btrfs_header_generation(parent); u64 bytenr = mid->start; u32 blocksize = mid->len; + clean_tree_block(trans, root, mid); + btrfs_tree_unlock(mid); free_extent_buffer(mid); mid = NULL; wret = del_ptr(trans, root, path, level + 1, pslot); @@ -885,11 +950,14 @@ static int balance_level(struct btrfs_trans_handle *trans, if (left) { if (btrfs_header_nritems(left) > orig_slot) { extent_buffer_get(left); + /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; - if (mid) + if (mid) { + btrfs_tree_unlock(mid); free_extent_buffer(mid); + } } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; @@ -901,10 +969,15 @@ static int balance_level(struct btrfs_trans_handle *trans, btrfs_node_blockptr(path->nodes[level], path->slots[level])) BUG(); enospc: - if (right) + if (right) { + btrfs_tree_unlock(right); free_extent_buffer(right); - if (left) + } + if (left) { + if (path->nodes[level] != left) + btrfs_tree_unlock(left); free_extent_buffer(left); + } return ret; } @@ -942,6 +1015,8 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, /* first, try to make some room in the middle buffer */ if (left) { u32 left_nr; + + btrfs_tree_lock(left); left_nr = btrfs_header_nritems(left); if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; @@ -967,24 +1042,28 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; + btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; + btrfs_tree_unlock(left); free_extent_buffer(left); } return 0; } + btrfs_tree_unlock(left); free_extent_buffer(left); } - right= read_node_slot(root, parent, pslot + 1); + right = read_node_slot(root, parent, pslot + 1); /* * then try to empty the right most buffer into the middle */ if (right) { u32 right_nr; + btrfs_tree_lock(right); right_nr = btrfs_header_nritems(right); if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; @@ -1013,12 +1092,15 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, path->slots[level + 1] += 1; path->slots[level] = orig_slot - btrfs_header_nritems(mid); + btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { + btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; } + btrfs_tree_unlock(right); free_extent_buffer(right); } return 1; @@ -1050,6 +1132,8 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, return; node = path->nodes[level]; + WARN_ON(!path->skip_locking && !btrfs_tree_locked(node)); + search = btrfs_node_blockptr(node, slot); blocksize = btrfs_level_size(root, level - 1); eb = btrfs_find_tree_block(root, search, blocksize); @@ -1098,6 +1182,39 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, highest_read = search; } } + +static void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) +{ + int i; + int skip_level = level; + struct extent_buffer *t; + + for (i = level; i < BTRFS_MAX_LEVEL; i++) { + if (!path->nodes[i]) + break; + if (!path->locks[i]) + break; + if (path->slots[i] == 0) { + skip_level = i + 1; + continue; + } + if (path->keep_locks) { + u32 nritems; + t = path->nodes[i]; + nritems = btrfs_header_nritems(t); + if (path->slots[i] >= nritems - 1) { + skip_level = i + 1; + continue; + } + } + t = path->nodes[i]; + if (i >= lowest_unlock && i > skip_level && path->locks[i]) { + btrfs_tree_unlock(t); + path->locks[i] = 0; + } + } +} + /* * look for key in the tree. path is filled in with nodes along the way * if key is found, we return zero and you can find the item in the leaf @@ -1120,15 +1237,27 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root int ret; int level; int should_reada = p->reada; + int lowest_unlock = 1; u8 lowest_level = 0; lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len); WARN_ON(p->nodes[0] != NULL); - WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex)); + // WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex)); + WARN_ON(root == root->fs_info->extent_root && + !mutex_is_locked(&root->fs_info->alloc_mutex)); + WARN_ON(root == root->fs_info->chunk_root && + !mutex_is_locked(&root->fs_info->chunk_mutex)); + WARN_ON(root == root->fs_info->dev_root && + !mutex_is_locked(&root->fs_info->chunk_mutex)); + if (ins_len < 0) + lowest_unlock = 2; again: - b = root->node; - extent_buffer_get(b); + if (!p->skip_locking) + b = btrfs_lock_root_node(root); + else + b = btrfs_root_node(root); + while (b) { level = btrfs_header_level(b); if (cow) { @@ -1147,9 +1276,12 @@ again: WARN_ON(1); level = btrfs_header_level(b); p->nodes[level] = b; + if (!p->skip_locking) + p->locks[level] = 1; ret = check_block(root, p, level); if (ret) return -1; + ret = bin_search(b, key, level, &slot); if (level != 0) { if (ret && slot > 0) @@ -1177,14 +1309,19 @@ again: BUG_ON(btrfs_header_nritems(b) == 1); } /* this is only true while dropping a snapshot */ - if (level == lowest_level) + if (level == lowest_level) { + unlock_up(p, level, lowest_unlock); break; + } if (should_reada) reada_for_search(root, p, level, slot, key->objectid); b = read_node_slot(root, b, slot); + if (!p->skip_locking) + btrfs_tree_lock(b); + unlock_up(p, level, lowest_unlock); } else { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < @@ -1195,6 +1332,7 @@ again: if (sret) return sret; } + unlock_up(p, level, lowest_unlock); return ret; } } @@ -1225,6 +1363,13 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans, break; t = path->nodes[i]; btrfs_set_node_key(t, key, tslot); + if (!btrfs_tree_locked(path->nodes[i])) { + int ii; +printk("fixup without lock on level %d\n", btrfs_header_level(path->nodes[i])); + for (ii = 0; ii < BTRFS_MAX_LEVEL; ii++) { +printk("level %d slot %d\n", ii, path->slots[ii]); + } + } btrfs_mark_buffer_dirty(path->nodes[i]); if (tslot != 0) break; @@ -1370,6 +1515,7 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, u64 lower_gen; struct extent_buffer *lower; struct extent_buffer *c; + struct extent_buffer *old; struct btrfs_disk_key lower_key; BUG_ON(path->nodes[level]); @@ -1386,12 +1532,13 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, else btrfs_node_key(lower, &lower_key, 0); - c = __btrfs_alloc_free_block(trans, root, root->nodesize, + c = btrfs_alloc_free_block(trans, root, root->nodesize, root->root_key.objectid, root_gen, lower_key.objectid, level, root->node->start, 0); if (IS_ERR(c)) return PTR_ERR(c); + memset_extent_buffer(c, 0, 0, root->nodesize); btrfs_set_header_nritems(c, 1); btrfs_set_header_level(c, level); @@ -1416,23 +1563,31 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(c); - /* the super has an extra ref to root->node */ - free_extent_buffer(root->node); + spin_lock(&root->node_lock); + old = root->node; root->node = c; + spin_unlock(&root->node_lock); + + /* the super has an extra ref to root->node */ + free_extent_buffer(old); + add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; + path->locks[level] = 1; path->slots[level] = 0; if (root->ref_cows && lower_gen != trans->transid) { struct btrfs_path *back_path = btrfs_alloc_path(); int ret; + mutex_lock(&root->fs_info->alloc_mutex); ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root, path, lower->start, root->root_key.objectid, trans->transid, 0, 0); BUG_ON(ret); + mutex_unlock(&root->fs_info->alloc_mutex); btrfs_free_path(back_path); } return 0; @@ -1521,7 +1676,7 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root root_gen = 0; btrfs_node_key(c, &disk_key, 0); - split = __btrfs_alloc_free_block(trans, root, root->nodesize, + split = btrfs_alloc_free_block(trans, root, root->nodesize, root->root_key.objectid, root_gen, btrfs_disk_key_objectid(&disk_key), @@ -1564,10 +1719,12 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root if (path->slots[level] >= mid) { path->slots[level] -= mid; + btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = split; path->slots[level + 1] += 1; } else { + btrfs_tree_unlock(split); free_extent_buffer(split); } return ret; @@ -1648,30 +1805,24 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root return 1; right = read_node_slot(root, upper, slot + 1); + btrfs_tree_lock(right); free_space = btrfs_leaf_free_space(root, right); - if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(right); - return 1; - } + if (free_space < data_size + sizeof(struct btrfs_item)) + goto out_unlock; /* cow and double check */ ret = btrfs_cow_block(trans, root, right, upper, slot + 1, &right); - if (ret) { - free_extent_buffer(right); - return 1; - } + if (ret) + goto out_unlock; + free_space = btrfs_leaf_free_space(root, right); - if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(right); - return 1; - } + if (free_space < data_size + sizeof(struct btrfs_item)) + goto out_unlock; left_nritems = btrfs_header_nritems(left); - if (left_nritems == 0) { - free_extent_buffer(right); - return 1; - } + if (left_nritems == 0) + goto out_unlock; if (empty) nr = 0; @@ -1707,10 +1858,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root left->map_token = NULL; } - if (push_items == 0) { - free_extent_buffer(right); - return 1; - } + if (push_items == 0) + goto out_unlock; if (!empty && push_items == left_nritems) WARN_ON(1); @@ -1778,14 +1927,24 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root /* then fixup the leaf pointer in the path */ if (path->slots[0] >= left_nritems) { path->slots[0] -= left_nritems; + if (btrfs_header_nritems(path->nodes[0]) == 0) + clean_tree_block(trans, root, path->nodes[0]); + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[1] += 1; } else { + btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; + +out_unlock: + btrfs_tree_unlock(right); + free_extent_buffer(right); + return 1; } + /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise @@ -1823,10 +1982,11 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root } left = read_node_slot(root, path->nodes[1], slot - 1); + btrfs_tree_lock(left); free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } /* cow and double check */ @@ -1834,14 +1994,14 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root path->nodes[1], slot - 1, &left); if (ret) { /* we hit -ENOSPC, but it isn't fatal here */ - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } if (empty) @@ -1876,8 +2036,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root } if (push_items == 0) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } if (!empty && push_items == btrfs_header_nritems(right)) WARN_ON(1); @@ -1975,15 +2135,23 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root /* then fixup the leaf pointer in the path */ if (path->slots[0] < push_items) { path->slots[0] += old_left_nritems; + if (btrfs_header_nritems(path->nodes[0]) == 0) + clean_tree_block(trans, root, path->nodes[0]); + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = left; path->slots[1] -= 1; } else { + btrfs_tree_unlock(left); free_extent_buffer(left); path->slots[0] -= push_items; } BUG_ON(path->slots[0] < 0); return ret; +out: + btrfs_tree_unlock(left); + free_extent_buffer(left); + return ret; } /* @@ -2052,7 +2220,7 @@ again: btrfs_item_key(l, &disk_key, 0); - right = __btrfs_alloc_free_block(trans, root, root->leafsize, + right = btrfs_alloc_free_block(trans, root, root->leafsize, root->root_key.objectid, root_gen, disk_key.objectid, 0, l->start, 0); @@ -2085,6 +2253,8 @@ again: path->slots[1] + 1, 1); if (wret) ret = wret; + + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; @@ -2111,6 +2281,7 @@ again: path->slots[1], 1); if (wret) ret = wret; + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; @@ -2184,12 +2355,15 @@ again: BUG_ON(path->slots[0] != slot); if (mid <= slot) { + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] -= mid; path->slots[1] += 1; - } else + } else { + btrfs_tree_unlock(right); free_extent_buffer(right); + } BUG_ON(path->slots[0] < 0); @@ -2418,10 +2592,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, total_data += data_size[i]; } - /* create a root if there isn't one */ - if (!root->node) - BUG(); - total_size = total_data + (nr - 1) * sizeof(struct btrfs_item); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); if (ret == 0) { @@ -2516,7 +2686,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, btrfs_print_leaf(root, leaf); BUG(); } - out: return ret; } @@ -2655,7 +2824,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_set_header_level(leaf, 0); } else { u64 root_gen = btrfs_header_generation(path->nodes[1]); - clean_tree_block(trans, root, leaf); wret = del_ptr(trans, root, path, 1, path->slots[1]); if (wret) ret = wret; @@ -2706,8 +2874,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, root_gen = btrfs_header_generation( path->nodes[1]); - clean_tree_block(trans, root, leaf); - wret = del_ptr(trans, root, path, 1, slot); if (wret) ret = wret; @@ -2720,7 +2886,13 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (wret) ret = wret; } else { - btrfs_mark_buffer_dirty(leaf); + /* if we're still in the path, make sure + * we're dirty. Otherwise, one of the + * push_leaf functions must have already + * dirtied this buffer + */ + if (path->nodes[0] == leaf) + btrfs_mark_buffer_dirty(leaf); free_extent_buffer(leaf); } } else { @@ -2731,56 +2903,40 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, } /* - * walk up the tree as far as required to find the previous leaf. + * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { - int slot; - int level = 1; - struct extent_buffer *c; - struct extent_buffer *next = NULL; + struct btrfs_key key; + struct btrfs_disk_key found_key; + int ret; - while(level < BTRFS_MAX_LEVEL) { - if (!path->nodes[level]) - return 1; + btrfs_item_key_to_cpu(path->nodes[0], &key, 0); - slot = path->slots[level]; - c = path->nodes[level]; - if (slot == 0) { - level++; - if (level == BTRFS_MAX_LEVEL) - return 1; - continue; - } - slot--; - - if (next) - free_extent_buffer(next); + if (key.offset > 0) + key.offset--; + else if (key.type > 0) + key.type--; + else if (key.objectid > 0) + key.objectid--; + else + return 1; - next = read_node_slot(root, c, slot); - break; - } - path->slots[level] = slot; - while(1) { - level--; - c = path->nodes[level]; - free_extent_buffer(c); - slot = btrfs_header_nritems(next); - if (slot != 0) - slot--; - path->nodes[level] = next; - path->slots[level] = slot; - if (!level) - break; - next = read_node_slot(root, next, slot); - } - return 0; + btrfs_release_path(root, path); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ret; + btrfs_item_key(path->nodes[0], &found_key, 0); + ret = comp_keys(&found_key, &key); + if (ret < 0) + return 0; + return 1; } /* - * walk up the tree as far as required to find the next leaf. + * search the tree again to find a leaf with greater keys * returns 0 if it found something or 1 if there are no greater leaves. * returns < 0 on io errors. */ @@ -2790,6 +2946,28 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) int level = 1; struct extent_buffer *c; struct extent_buffer *next = NULL; + struct btrfs_key key; + u32 nritems; + int ret; + + nritems = btrfs_header_nritems(path->nodes[0]); + if (nritems == 0) { + return 1; + } + + btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); + + path->keep_locks = 1; + btrfs_release_path(root, path); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + path->keep_locks = 0; + + if (ret < 0) + return ret; + + if (path->slots[0] < nritems - 1) { + goto done; + } while(level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) @@ -2799,33 +2977,45 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) + if (level == BTRFS_MAX_LEVEL) { return 1; + } continue; } - if (next) + if (next) { + btrfs_tree_unlock(next); free_extent_buffer(next); + } - if (path->reada) + if (level == 1 && path->locks[1] && path->reada) reada_for_search(root, path, level, slot, 0); next = read_node_slot(root, c, slot); + if (!path->skip_locking) + btrfs_tree_lock(next); break; } path->slots[level] = slot; while(1) { level--; c = path->nodes[level]; + if (path->locks[level]) + btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = next; path->slots[level] = 0; + path->locks[level] = 1; if (!level) break; - if (path->reada) - reada_for_search(root, path, level, 0, 0); + if (level == 1 && path->locks[1] && path->reada) + reada_for_search(root, path, level, slot, 0); next = read_node_slot(root, next, 0); + if (!path->skip_locking) + btrfs_tree_lock(next); } +done: + unlock_up(path, 0, 1); return 0; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index dcea9d706d9..50891b39f36 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -330,8 +330,13 @@ struct btrfs_node { struct btrfs_path { struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; int slots[BTRFS_MAX_LEVEL]; + /* if there is real range locking, this locks field will change */ + int locks[BTRFS_MAX_LEVEL]; int reada; + /* keep some upper locks as we walk down */ + int keep_locks; int lowest_level; + int skip_locking; }; /* @@ -515,6 +520,8 @@ struct btrfs_fs_info { spinlock_t hash_lock; struct mutex trans_mutex; struct mutex fs_mutex; + struct mutex alloc_mutex; + struct mutex chunk_mutex; struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -576,6 +583,10 @@ struct btrfs_fs_info { */ struct btrfs_root { struct extent_buffer *node; + + /* the node lock is held while changing the node pointer */ + spinlock_t node_lock; + struct extent_buffer *commit_root; struct btrfs_root_item root_item; struct btrfs_key root_key; @@ -1353,13 +1364,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *hint, u64 search_start, int data, int owner); -int btrfs_inc_root_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 owner_objectid); struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u32 size, - u64 root_objectid, - u64 hint, u64 empty_size); -struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize, u64 root_objectid, @@ -1368,8 +1373,6 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans, int level, u64 hint, u64 empty_size); -int btrfs_grow_extent_tree(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 new_size); int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 new_size); int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -1409,6 +1412,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type); + +struct extent_buffer *btrfs_root_node(struct btrfs_root *root); +struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); + int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e5c758e306d..fe40bdd984f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -32,6 +32,7 @@ #include "volumes.h" #include "print-tree.h" #include "async-thread.h" +#include "locking.h" #if 0 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) @@ -681,9 +682,11 @@ int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct inode *btree_inode = root->fs_info->btree_inode; if (btrfs_header_generation(buf) == - root->fs_info->running_transaction->transid) + root->fs_info->running_transaction->transid) { + WARN_ON(!btrfs_tree_locked(buf)); clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); + } return 0; } @@ -720,6 +723,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->in_sysfs = 0; INIT_LIST_HEAD(&root->dirty_list); + spin_lock_init(&root->node_lock); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); @@ -1196,6 +1200,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->fs_mutex); + mutex_init(&fs_info->alloc_mutex); + mutex_init(&fs_info->chunk_mutex); #if 0 ret = add_hasher(fs_info, "crc32c"); @@ -1274,7 +1280,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_lock(&fs_info->fs_mutex); + mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_sys_array(tree_root); + mutex_unlock(&fs_info->chunk_mutex); if (ret) { printk("btrfs: failed to read the system array on %s\n", sb->s_id); @@ -1296,7 +1304,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), BTRFS_UUID_SIZE); + mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_chunk_tree(chunk_root); + mutex_unlock(&fs_info->chunk_mutex); BUG_ON(ret); btrfs_close_extra_devices(fs_devices); @@ -1654,6 +1664,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) u64 transid = btrfs_header_generation(buf); struct inode *btree_inode = root->fs_info->btree_inode; + WARN_ON(!btrfs_tree_locked(buf)); if (transid != root->fs_info->generation) { printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n", (unsigned long long)buf->start, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 41a63462d3e..7e40c516fe6 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -25,6 +25,7 @@ #include "print-tree.h" #include "transaction.h" #include "volumes.h" +#include "locking.h" #define BLOCK_GROUP_DATA EXTENT_WRITEBACK #define BLOCK_GROUP_METADATA EXTENT_UPTODATE @@ -36,7 +37,28 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); static int del_pending_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); +static struct btrfs_block_group_cache * +__btrfs_find_block_group(struct btrfs_root *root, + struct btrfs_block_group_cache *hint, + u64 search_start, int data, int owner); +void maybe_lock_mutex(struct btrfs_root *root) +{ + if (root != root->fs_info->extent_root && + root != root->fs_info->chunk_root && + root != root->fs_info->dev_root) { + mutex_lock(&root->fs_info->alloc_mutex); + } +} + +void maybe_unlock_mutex(struct btrfs_root *root) +{ + if (root != root->fs_info->extent_root && + root != root->fs_info->chunk_root && + root != root->fs_info->dev_root) { + mutex_unlock(&root->fs_info->alloc_mutex); + } +} static int cache_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *block_group) @@ -66,6 +88,7 @@ static int cache_block_group(struct btrfs_root *root, return -ENOMEM; path->reada = 2; + path->skip_locking = 1; first_free = block_group->key.objectid; key.objectid = block_group->key.objectid; key.offset = 0; @@ -290,7 +313,7 @@ no_cache: cache = btrfs_lookup_first_block_group(root->fs_info, last); } cache_miss = 0; - cache = btrfs_find_block_group(root, cache, last, data, 0); + cache = __btrfs_find_block_group(root, cache, last, data, 0); if (!cache) goto no_cache; *cache_ret = cache; @@ -318,10 +341,10 @@ static int block_group_state_bits(u64 flags) return bits; } -struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, - struct btrfs_block_group_cache - *hint, u64 search_start, - int data, int owner) +static struct btrfs_block_group_cache * +__btrfs_find_block_group(struct btrfs_root *root, + struct btrfs_block_group_cache *hint, + u64 search_start, int data, int owner) { struct btrfs_block_group_cache *cache; struct extent_io_tree *block_group_cache; @@ -411,6 +434,18 @@ found: return found_group; } +struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, + struct btrfs_block_group_cache + *hint, u64 search_start, + int data, int owner) +{ + + struct btrfs_block_group_cache *ret; + mutex_lock(&root->fs_info->alloc_mutex); + ret = __btrfs_find_block_group(root, hint, search_start, data, owner); + mutex_unlock(&root->fs_info->alloc_mutex); + return ret; +} static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset) { @@ -646,7 +681,7 @@ out: return ret; } -int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, +static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 root_objectid, u64 ref_generation, @@ -696,6 +731,22 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, return 0; } +int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset) +{ + int ret; + + mutex_lock(&root->fs_info->alloc_mutex); + ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, + root_objectid, ref_generation, + owner, owner_offset); + mutex_unlock(&root->fs_info->alloc_mutex); + return ret; +} + int btrfs_extent_post_op(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -760,6 +811,10 @@ u32 btrfs_count_snapshots_in_path(struct btrfs_root *root, struct btrfs_extent_ref *ref_item; int level = -1; + /* FIXME, needs locking */ + BUG(); + + mutex_lock(&root->fs_info->alloc_mutex); path = btrfs_alloc_path(); again: if (level == -1) @@ -854,33 +909,9 @@ again: out: btrfs_free_path(path); + mutex_unlock(&root->fs_info->alloc_mutex); return total_count; } -int btrfs_inc_root_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 owner_objectid) -{ - u64 generation; - u64 key_objectid; - u64 level; - u32 nritems; - struct btrfs_disk_key disk_key; - - level = btrfs_header_level(root->node); - generation = trans->transid; - nritems = btrfs_header_nritems(root->node); - if (nritems > 0) { - if (level == 0) - btrfs_item_key(root->node, &disk_key, 0); - else - btrfs_node_key(root->node, &disk_key, 0); - key_objectid = btrfs_disk_key_objectid(&disk_key); - } else { - key_objectid = 0; - } - return btrfs_inc_extent_ref(trans, root, root->node->start, - root->node->len, owner_objectid, - generation, level, key_objectid); -} int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf) @@ -897,6 +928,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (!root->ref_cows) return 0; + mutex_lock(&root->fs_info->alloc_mutex); level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); for (i = 0; i < nritems; i++) { @@ -913,7 +945,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (disk_bytenr == 0) continue; - ret = btrfs_inc_extent_ref(trans, root, disk_bytenr, + ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(buf, fi), root->root_key.objectid, trans->transid, key.objectid, key.offset); @@ -924,7 +956,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, } else { bytenr = btrfs_node_blockptr(buf, i); btrfs_node_key_to_cpu(buf, &key, i); - ret = btrfs_inc_extent_ref(trans, root, bytenr, + ret = __btrfs_inc_extent_ref(trans, root, bytenr, btrfs_level_size(root, level - 1), root->root_key.objectid, trans->transid, @@ -935,6 +967,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, } } } + mutex_unlock(&root->fs_info->alloc_mutex); return 0; fail: WARN_ON(1); @@ -965,6 +998,7 @@ fail: } } #endif + mutex_unlock(&root->fs_info->alloc_mutex); return ret; } @@ -1019,6 +1053,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; + mutex_lock(&root->fs_info->alloc_mutex); while(1) { ret = find_first_extent_bit(block_group_cache, last, &start, &end, BLOCK_GROUP_DIRTY); @@ -1045,6 +1080,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, BLOCK_GROUP_DIRTY, GFP_NOFS); } btrfs_free_path(path); + mutex_unlock(&root->fs_info->alloc_mutex); return werr; } @@ -1162,26 +1198,28 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->force_alloc = 0; } if (space_info->full) - return 0; + goto out; thresh = div_factor(space_info->total_bytes, 6); if (!force && (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) < thresh) - return 0; + goto out; + mutex_lock(&extent_root->fs_info->chunk_mutex); ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); if (ret == -ENOSPC) { printk("space info full %Lu\n", flags); space_info->full = 1; - return 0; + goto out; } BUG_ON(ret); ret = btrfs_make_block_group(trans, extent_root, 0, flags, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); BUG_ON(ret); - + mutex_unlock(&extent_root->fs_info->chunk_mutex); +out: return 0; } @@ -1318,6 +1356,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct extent_io_tree *free_space_cache; free_space_cache = &root->fs_info->free_space_cache; + mutex_lock(&root->fs_info->alloc_mutex); while(1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); @@ -1327,6 +1366,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, clear_extent_dirty(unpin, start, end, GFP_NOFS); set_extent_dirty(free_space_cache, start, end, GFP_NOFS); } + mutex_unlock(&root->fs_info->alloc_mutex); return 0; } @@ -1363,18 +1403,24 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, GFP_NOFS); eb = read_tree_block(extent_root, ins.objectid, ins.offset, trans->transid); + btrfs_tree_lock(eb); level = btrfs_header_level(eb); if (level == 0) { btrfs_item_key(eb, &first, 0); } else { btrfs_node_key(eb, &first, 0); } + btrfs_tree_unlock(eb); + free_extent_buffer(eb); + /* + * the first key is just a hint, so the race we've created + * against reading it is fine + */ err = btrfs_insert_extent_backref(trans, extent_root, path, start, extent_root->root_key.objectid, 0, level, btrfs_disk_key_objectid(&first)); BUG_ON(err); - free_extent_buffer(eb); } btrfs_free_path(path); return 0; @@ -1384,12 +1430,14 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, int pending) { int err = 0; - struct extent_buffer *buf; if (!pending) { +#if 0 + struct extent_buffer *buf; buf = btrfs_find_tree_block(root, bytenr, num_bytes); if (buf) { - if (btrfs_buffer_uptodate(buf, 0)) { + if (!btrfs_try_tree_lock(buf) && + btrfs_buffer_uptodate(buf, 0)) { u64 transid = root->fs_info->running_transaction->transid; u64 header_transid = @@ -1398,12 +1446,15 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { clean_tree_block(NULL, root, buf); + btrfs_tree_unlock(buf); free_extent_buffer(buf); return 1; } + btrfs_tree_unlock(buf); } free_extent_buffer(buf); } +#endif update_pinned_extents(root, bytenr, num_bytes, 1); } else { set_extent_bits(&root->fs_info->pending_del, @@ -1586,10 +1637,11 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct /* * remove an extent from the root, returns 0 on success */ -int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root - *root, u64 bytenr, u64 num_bytes, - u64 root_objectid, u64 ref_generation, - u64 owner_objectid, u64 owner_offset, int pin) +static int __btrfs_free_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u64 root_objectid, + u64 ref_generation, u64 owner_objectid, + u64 owner_offset, int pin) { struct btrfs_root *extent_root = root->fs_info->extent_root; int pending_ret; @@ -1610,6 +1662,22 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root return ret ? ret : pending_ret; } +int btrfs_free_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u64 root_objectid, + u64 ref_generation, u64 owner_objectid, + u64 owner_offset, int pin) +{ + int ret; + + maybe_lock_mutex(root); + ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, + root_objectid, ref_generation, + owner_objectid, owner_offset, pin); + maybe_unlock_mutex(root); + return ret; +} + static u64 stripe_align(struct btrfs_root *root, u64 val) { u64 mask = ((u64)root->stripesize - 1); @@ -1679,12 +1747,12 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, block_group = btrfs_lookup_first_block_group(info, hint_byte); if (!block_group) hint_byte = search_start; - block_group = btrfs_find_block_group(root, block_group, + block_group = __btrfs_find_block_group(root, block_group, hint_byte, data, 1); if (last_ptr && *last_ptr == 0 && block_group) hint_byte = block_group->key.objectid; } else { - block_group = btrfs_find_block_group(root, + block_group = __btrfs_find_block_group(root, trans->block_group, search_start, data, 1); } @@ -1806,7 +1874,7 @@ enospc: } block_group = btrfs_lookup_first_block_group(info, search_start); cond_resched(); - block_group = btrfs_find_block_group(root, block_group, + block_group = __btrfs_find_block_group(root, block_group, search_start, data, 0); goto check_failed; @@ -1843,6 +1911,8 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_key keys[2]; + maybe_lock_mutex(root); + if (data) { alloc_profile = info->avail_data_alloc_bits & info->data_alloc_profile; @@ -1892,9 +1962,10 @@ again: if (ret) { printk("allocation failed flags %Lu\n", data); } - BUG_ON(ret); - if (ret) - return ret; + if (ret) { + BUG(); + goto out; + } /* block accounting for super block */ super_used = btrfs_super_bytes_used(&info->super_copy); @@ -1953,11 +2024,11 @@ again: finish_current_insert(trans, extent_root); pending_ret = del_pending_extents(trans, extent_root); - if (ret) { - return ret; - } + if (ret) + goto out; if (pending_ret) { - return pending_ret; + ret = pending_ret; + goto out; } update_block: @@ -1967,36 +2038,15 @@ update_block: ins->objectid, ins->offset); BUG(); } - return 0; +out: + maybe_unlock_mutex(root); + return ret; } - /* * helper function to allocate a block for a given tree * returns the tree buffer or NULL. */ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u32 blocksize, - u64 root_objectid, u64 hint, - u64 empty_size) -{ - u64 ref_generation; - - if (root->ref_cows) - ref_generation = trans->transid; - else - ref_generation = 0; - - - return __btrfs_alloc_free_block(trans, root, blocksize, root_objectid, - ref_generation, 0, 0, hint, empty_size); -} - -/* - * helper function to allocate a block for a given tree - * returns the tree buffer or NULL. - */ -struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize, u64 root_objectid, @@ -2026,6 +2076,7 @@ struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans, return ERR_PTR(-ENOMEM); } btrfs_set_header_generation(buf, trans->transid); + btrfs_tree_lock(buf); clean_tree_block(trans, root, buf); btrfs_set_buffer_uptodate(buf); @@ -2076,7 +2127,7 @@ static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); if (disk_bytenr == 0) continue; - ret = btrfs_free_extent(trans, root, disk_bytenr, + ret = __btrfs_free_extent(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(leaf, fi), leaf_owner, leaf_generation, key.objectid, key.offset, 0); @@ -2151,6 +2202,8 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, int ret; u32 refs; + mutex_lock(&root->fs_info->alloc_mutex); + WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); ret = lookup_extent_ref(trans, root, @@ -2182,6 +2235,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, bytenr = btrfs_node_blockptr(cur, path->slots[*level]); ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); blocksize = btrfs_level_size(root, *level - 1); + ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs); BUG_ON(ret); if (refs != 1) { @@ -2189,7 +2243,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); root_gen = btrfs_header_generation(parent); path->slots[*level]++; - ret = btrfs_free_extent(trans, root, bytenr, + ret = __btrfs_free_extent(trans, root, bytenr, blocksize, root_owner, root_gen, 0, 0, 1); BUG_ON(ret); @@ -2201,9 +2255,11 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, reada_walk_down(root, cur, path->slots[*level]); mutex_unlock(&root->fs_info->fs_mutex); + mutex_unlock(&root->fs_info->alloc_mutex); next = read_tree_block(root, bytenr, blocksize, ptr_gen); mutex_lock(&root->fs_info->fs_mutex); + mutex_lock(&root->fs_info->alloc_mutex); /* we've dropped the lock, double check */ ret = lookup_extent_ref(trans, root, bytenr, @@ -2216,7 +2272,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, path->slots[*level]++; free_extent_buffer(next); - ret = btrfs_free_extent(trans, root, bytenr, + ret = __btrfs_free_extent(trans, root, bytenr, blocksize, root_owner, root_gen, 0, 0, 1); @@ -2244,13 +2300,14 @@ out: } root_gen = btrfs_header_generation(parent); - ret = btrfs_free_extent(trans, root, path->nodes[*level]->start, + ret = __btrfs_free_extent(trans, root, path->nodes[*level]->start, path->nodes[*level]->len, root_owner, root_gen, 0, 0, 1); free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; *level += 1; BUG_ON(ret); + mutex_unlock(&root->fs_info->alloc_mutex); return 0; } @@ -2350,6 +2407,12 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_node_key(node, &found_key, path->slots[level]); WARN_ON(memcmp(&found_key, &root_item->drop_progress, sizeof(found_key))); + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + if (path->nodes[i] && path->locks[i]) { + path->locks[i] = 0; + btrfs_tree_unlock(path->nodes[i]); + } + } } while(1) { wret = walk_down_tree(trans, root, path, &level); @@ -2383,6 +2446,8 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) u64 end; u64 ptr; int ret; + + mutex_lock(&info->alloc_mutex); while(1) { ret = find_first_extent_bit(&info->block_group_cache, 0, &start, &end, (unsigned int)-1); @@ -2402,6 +2467,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) clear_extent_dirty(&info->free_space_cache, start, end, GFP_NOFS); } + mutex_unlock(&info->alloc_mutex); return 0; } @@ -2678,6 +2744,7 @@ static int noinline relocate_one_reference(struct btrfs_root *extent_root, eb = read_tree_block(found_root, extent_key->objectid, extent_key->offset, 0); + btrfs_tree_lock(eb); level = btrfs_header_level(eb); if (level == 0) @@ -2685,6 +2752,7 @@ static int noinline relocate_one_reference(struct btrfs_root *extent_root, else btrfs_node_key_to_cpu(eb, &found_key, 0); + btrfs_tree_unlock(eb); free_extent_buffer(eb); ret = find_root_for_ref(extent_root, path, &found_key, @@ -2888,6 +2956,7 @@ int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start) int ret; int progress; + mutex_lock(&root->fs_info->alloc_mutex); shrink_block_group = btrfs_lookup_block_group(root->fs_info, shrink_start); BUG_ON(!shrink_block_group); @@ -3044,20 +3113,22 @@ next: (unsigned int)-1, GFP_NOFS); out: btrfs_free_path(path); + mutex_unlock(&root->fs_info->alloc_mutex); return ret; } int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key) { - int ret; + int ret = 0; struct btrfs_key found_key; struct extent_buffer *leaf; int slot; ret = btrfs_search_slot(NULL, root, key, path, 0, 0); if (ret < 0) - return ret; + goto out; + while(1) { slot = path->slots[0]; leaf = path->nodes[0]; @@ -3066,18 +3137,20 @@ int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, if (ret == 0) continue; if (ret < 0) - goto error; + goto out; break; } btrfs_item_key_to_cpu(leaf, &found_key, slot); if (found_key.objectid >= key->objectid && - found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) - return 0; + found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { + ret = 0; + goto out; + } path->slots[0]++; } ret = -ENOENT; -error: +out: return ret; } @@ -3103,6 +3176,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (!path) return -ENOMEM; + mutex_lock(&root->fs_info->alloc_mutex); while(1) { ret = find_first_block_group(root, path, &key); if (ret > 0) { @@ -3158,6 +3232,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = 0; error: btrfs_free_path(path); + mutex_unlock(&root->fs_info->alloc_mutex); return ret; } @@ -3205,5 +3280,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ret = del_pending_extents(trans, extent_root); BUG_ON(ret); set_avail_alloc_bits(extent_root->fs_info, type); + return 0; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 17c508a941e..bd15cdcaba9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2889,7 +2889,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); - lock_page(page); if (i == 0) set_page_extent_head(page, eb->len); else @@ -2907,7 +2906,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, end = start + PAGE_CACHE_SIZE - 1; if (test_range_bit(tree, start, end, EXTENT_DIRTY, 0)) { - unlock_page(page); continue; } } @@ -2919,7 +2917,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, PAGECACHE_TAG_DIRTY); } read_unlock_irq(&page->mapping->tree_lock); - unlock_page(page); } return 0; } @@ -2948,17 +2945,12 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, * on us if the page isn't already dirty. */ if (i == 0) { - lock_page(page); set_page_extent_head(page, eb->len); } else if (PagePrivate(page) && page->private != EXTENT_PAGE_PRIVATE) { - lock_page(page); set_page_extent_mapped(page); - unlock_page(page); } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); - if (i == 0) - unlock_page(page); } return set_extent_dirty(tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0c79346fd2c..61bd8953a68 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -115,6 +115,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) trans = btrfs_start_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); + mutex_unlock(&root->fs_info->fs_mutex); num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); @@ -159,6 +160,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) btrfs_add_ordered_inode(inode); btrfs_update_inode(trans, root, inode); out: + mutex_lock(&root->fs_info->fs_mutex); btrfs_end_transaction(trans, root); return ret; } @@ -349,10 +351,12 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, mutex_lock(&root->fs_info->fs_mutex); trans = btrfs_start_transaction(root, 1); + mutex_unlock(&root->fs_info->fs_mutex); btrfs_set_trans_block_group(trans, inode); btrfs_csum_file_blocks(trans, root, inode, bio, sums); + mutex_lock(&root->fs_info->fs_mutex); ret = btrfs_end_transaction(trans, root); BUG_ON(ret); mutex_unlock(&root->fs_info->fs_mutex); @@ -807,6 +811,7 @@ static int btrfs_unlink_trans(struct btrfs_trans_handle *trans, goto err; } ret = btrfs_delete_one_dir_name(trans, root, path, di); + btrfs_release_path(root, path); dentry->d_inode->i_ctime = dir->i_ctime; ret = btrfs_del_inode_ref(trans, root, name, name_len, @@ -881,8 +886,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) struct btrfs_trans_handle *trans; unsigned long nr = 0; - if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) + if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) { return -ENOTEMPTY; + } mutex_lock(&root->fs_info->fs_mutex); ret = btrfs_check_free_space(root, 1, 1); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 6fb45580275..3fbf74e93db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -43,6 +43,7 @@ #include "ioctl.h" #include "print-tree.h" #include "volumes.h" +#include "locking.h" @@ -75,9 +76,9 @@ static noinline int create_subvol(struct btrfs_root *root, char *name, if (ret) goto fail; - leaf = __btrfs_alloc_free_block(trans, root, root->leafsize, - objectid, trans->transid, 0, 0, - 0, 0); + leaf = btrfs_alloc_free_block(trans, root, root->leafsize, + objectid, trans->transid, 0, 0, + 0, 0); if (IS_ERR(leaf)) return PTR_ERR(leaf); @@ -108,6 +109,7 @@ static noinline int create_subvol(struct btrfs_root *root, char *name, memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress)); root_item.drop_level = 0; + btrfs_tree_unlock(leaf); free_extent_buffer(leaf); leaf = NULL; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c new file mode 100644 index 00000000000..80813a307b4 --- /dev/null +++ b/fs/btrfs/locking.c @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "extent_io.h" +#include "locking.h" + +int btrfs_tree_lock(struct extent_buffer *eb) +{ + lock_page(eb->first_page); + return 0; +} + +int btrfs_try_tree_lock(struct extent_buffer *eb) +{ + return TestSetPageLocked(eb->first_page); +} + +int btrfs_tree_unlock(struct extent_buffer *eb) +{ + WARN_ON(!PageLocked(eb->first_page)); + unlock_page(eb->first_page); + return 0; +} + +int btrfs_tree_locked(struct extent_buffer *eb) +{ + return PageLocked(eb->first_page); +} + diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h new file mode 100644 index 00000000000..2dab96d8280 --- /dev/null +++ b/fs/btrfs/locking.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __BTRFS_LOCKING_ +#define __BTRFS_LOCKING_ + +int btrfs_tree_lock(struct extent_buffer *eb); +int btrfs_tree_unlock(struct extent_buffer *eb); +int btrfs_tree_locked(struct extent_buffer *eb); +int btrfs_try_tree_lock(struct extent_buffer *eb); +#endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f04684f7fea..1ed433a7149 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -23,6 +23,7 @@ #include "ctree.h" #include "disk-io.h" #include "transaction.h" +#include "locking.h" static int total_trans = 0; extern struct kmem_cache *btrfs_trans_handle_cachep; @@ -96,8 +97,7 @@ static noinline int record_root_in_trans(struct btrfs_root *root) radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_DEFRAG_TAG); - root->commit_root = root->node; - extent_buffer_get(root->node); + root->commit_root = btrfs_root_node(root); } else { WARN_ON(1); } @@ -559,6 +559,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root = pending->root; struct extent_buffer *tmp; + struct extent_buffer *old; int ret; int namelen; u64 objectid; @@ -578,16 +579,18 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, key.offset = 1; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); - extent_buffer_get(root->node); - btrfs_cow_block(trans, root, root->node, NULL, 0, &tmp); - free_extent_buffer(tmp); + old = btrfs_lock_root_node(root); + btrfs_cow_block(trans, root, old, NULL, 0, &old); - btrfs_copy_root(trans, root, root->node, &tmp, objectid); + btrfs_copy_root(trans, root, old, &tmp, objectid); + btrfs_tree_unlock(old); + free_extent_buffer(old); btrfs_set_root_bytenr(new_root_item, tmp->start); btrfs_set_root_level(new_root_item, btrfs_header_level(tmp)); ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, new_root_item); + btrfs_tree_unlock(tmp); free_extent_buffer(tmp); if (ret) goto fail; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 155961c7b4d..fab851d8538 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -181,6 +181,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, if (root->fs_info->extent_root == root) is_extent = 1; + goto out; + if (root->ref_cows == 0 && !is_extent) goto out; -- cgit v1.2.3-70-g09d2 From 079899c2384023cd8efcd3806680b4f1d2abbd54 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 25 Jun 2008 16:01:31 -0400 Subject: Btrfs: Change find_extent_buffer to use TestSetPageLocked This makes it possible for callers to check for extent_buffers in cache without deadlocking against any btree locks held. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 -- fs/btrfs/extent_io.c | 7 ++++++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a9b3a25a45b..dc3c03c6612 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1438,7 +1438,6 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, int err = 0; if (!pending) { -#if 0 struct extent_buffer *buf; buf = btrfs_find_tree_block(root, bytenr, num_bytes); if (buf) { @@ -1460,7 +1459,6 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, } free_extent_buffer(buf); } -#endif update_pinned_extents(root, bytenr, num_bytes, 1); } else { set_extent_bits(&root->fs_info->pending_del, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bd15cdcaba9..40a5f53cb04 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2808,10 +2808,15 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, goto lru_add; for (i = 0; i < num_pages; i++, index++) { - p = find_lock_page(mapping, index); + p = find_get_page(mapping, index); if (!p) { goto fail; } + if (TestSetPageLocked(p)) { + page_cache_release(p); + goto fail; + } + set_page_extent_mapped(p); mark_page_accessed(p); -- cgit v1.2.3-70-g09d2 From e6dcd2dc9c489108648e2ed543315dd134d50a9a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 12:53:50 -0400 Subject: Btrfs: New data=ordered implementation The old data=ordered code would force commit to wait until all the data extents from the transaction were fully on disk. This introduced large latencies into the commit and stalled new writers in the transaction for a long time. The new code changes the way data allocations and extents work: * When delayed allocation is filled, data extents are reserved, and the extent bit EXTENT_ORDERED is set on the entire range of the extent. A struct btrfs_ordered_extent is allocated an inserted into a per-inode rbtree to track the pending extents. * As each page is written EXTENT_ORDERED is cleared on the bytes corresponding to that page. * When all of the bytes corresponding to a single struct btrfs_ordered_extent are written, The previously reserved extent is inserted into the FS btree and into the extent allocation trees. The checksums for the file data are also updated. Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 4 +- fs/btrfs/ctree.h | 19 +- fs/btrfs/disk-io.c | 13 +- fs/btrfs/extent-tree.c | 132 +++++++++----- fs/btrfs/extent_io.c | 52 +++++- fs/btrfs/extent_io.h | 14 +- fs/btrfs/extent_map.c | 5 +- fs/btrfs/file-item.c | 62 ++++--- fs/btrfs/file.c | 67 ++++--- fs/btrfs/inode.c | 447 ++++++++++++++++++++++++++++++----------------- fs/btrfs/ordered-data.c | 455 ++++++++++++++++++++++++++++++------------------ fs/btrfs/ordered-data.h | 71 ++++++-- fs/btrfs/transaction.c | 67 +------ fs/btrfs/transaction.h | 4 - 14 files changed, 910 insertions(+), 502 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 40b4e0c9cd0..8d03687510e 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -21,6 +21,7 @@ #include "extent_map.h" #include "extent_io.h" +#include "ordered-data.h" /* in memory btrfs inode */ struct btrfs_inode { @@ -32,9 +33,8 @@ struct btrfs_inode { struct extent_io_tree io_failure_tree; struct mutex csum_mutex; struct inode vfs_inode; - atomic_t ordered_writeback; + struct btrfs_ordered_inode_tree ordered_tree; - u64 ordered_trans; /* * transid of the trans_handle that last modified this inode */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f3783dbd9b6..ceebc052ddc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "bit-radix.h" #include "extent_io.h" @@ -37,6 +38,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_transaction_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; +struct btrfs_ordered_sum; #define BTRFS_MAGIC "_B5RfS_M" @@ -510,6 +512,7 @@ struct btrfs_fs_info { u64 max_inline; u64 alloc_start; struct btrfs_transaction *running_transaction; + wait_queue_head_t transaction_throttle; struct btrfs_super_block super_copy; struct btrfs_super_block super_for_commit; struct block_device *__bdev; @@ -541,6 +544,7 @@ struct btrfs_fs_info { */ struct btrfs_workers workers; struct btrfs_workers endio_workers; + struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; @@ -1384,6 +1388,17 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, u64 owner, u64 owner_offset, u64 empty_size, u64 hint_byte, u64 search_end, struct btrfs_key *ins, u64 data); +int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins); +int btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data); int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf); int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root @@ -1556,9 +1571,9 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, u64 bytenr, int mod); int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, - struct bio *bio, char *sums); + struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, char **sums_ret); + struct bio *bio, struct btrfs_ordered_sum **sums_ret); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b01b3f4f92a..4a5ebafb935 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -407,7 +407,11 @@ static int end_workqueue_bio(struct bio *bio, end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); + if (bio->bi_rw & (1 << BIO_RW)) + btrfs_queue_worker(&fs_info->endio_write_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) return 0; @@ -1286,6 +1290,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->volume_mutex); + init_waitqueue_head(&fs_info->transaction_throttle); #if 0 ret = add_hasher(fs_info, "crc32c"); @@ -1325,9 +1330,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_write_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_write_workers, + fs_info->thread_pool_size); err = -EINVAL; if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) { @@ -1447,6 +1456,7 @@ fail_sb_buffer: extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: iput(fs_info->btree_inode); @@ -1702,6 +1712,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); iput(fs_info->btree_inode); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8ebfa6be079..343d1101c31 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1895,36 +1895,17 @@ error: return ret; } -/* - * finds a free extent and does all the dirty work required for allocation - * returns the key for the extent through ins, and a tree buffer for - * the first block of the extent through buf. - * - * returns 0 if everything worked, non-zero otherwise. - */ -int btrfs_alloc_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 num_bytes, u64 min_alloc_size, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset, - u64 empty_size, u64 hint_byte, - u64 search_end, struct btrfs_key *ins, u64 data) +static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data) { int ret; - int pending_ret; - u64 super_used; - u64 root_used; u64 search_start = 0; u64 alloc_profile; - u32 sizes[2]; struct btrfs_fs_info *info = root->fs_info; - struct btrfs_root *extent_root = info->extent_root; - struct btrfs_extent_item *extent_item; - struct btrfs_extent_ref *ref; - struct btrfs_path *path; - struct btrfs_key keys[2]; - - maybe_lock_mutex(root); if (data) { alloc_profile = info->avail_data_alloc_bits & @@ -1974,11 +1955,48 @@ again: } if (ret) { printk("allocation failed flags %Lu\n", data); - } - if (ret) { BUG(); - goto out; } + clear_extent_dirty(&root->fs_info->free_space_cache, + ins->objectid, ins->objectid + ins->offset - 1, + GFP_NOFS); + return 0; +} + +int btrfs_reserve_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, + u64 data) +{ + int ret; + maybe_lock_mutex(root); + ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, + empty_size, hint_byte, search_end, ins, + data); + maybe_unlock_mutex(root); + return ret; +} + +static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins) +{ + int ret; + int pending_ret; + u64 super_used; + u64 root_used; + u64 num_bytes = ins->offset; + u32 sizes[2]; + struct btrfs_fs_info *info = root->fs_info; + struct btrfs_root *extent_root = info->extent_root; + struct btrfs_extent_item *extent_item; + struct btrfs_extent_ref *ref; + struct btrfs_path *path; + struct btrfs_key keys[2]; /* block accounting for super block */ spin_lock_irq(&info->delalloc_lock); @@ -1990,10 +2008,6 @@ again: root_used = btrfs_root_used(&root->root_item); btrfs_set_root_used(&root->root_item, root_used + num_bytes); - clear_extent_dirty(&root->fs_info->free_space_cache, - ins->objectid, ins->objectid + ins->offset - 1, - GFP_NOFS); - if (root == extent_root) { set_extent_bits(&root->fs_info->extent_ins, ins->objectid, ins->objectid + ins->offset - 1, @@ -2001,10 +2015,6 @@ again: goto update_block; } - WARN_ON(trans->alloc_exclude_nr); - trans->alloc_exclude_start = ins->objectid; - trans->alloc_exclude_nr = ins->offset; - memcpy(&keys[0], ins, sizeof(*ins)); keys[1].offset = hash_extent_ref(root_objectid, ref_generation, owner, owner_offset); @@ -2054,6 +2064,51 @@ update_block: BUG(); } out: + return ret; +} + +int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + struct btrfs_key *ins) +{ + int ret; + maybe_lock_mutex(root); + ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, + ref_generation, owner, + owner_offset, ins); + maybe_unlock_mutex(root); + return ret; +} +/* + * finds a free extent and does all the dirty work required for allocation + * returns the key for the extent through ins, and a tree buffer for + * the first block of the extent through buf. + * + * returns 0 if everything worked, non-zero otherwise. + */ +int btrfs_alloc_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 num_bytes, u64 min_alloc_size, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset, + u64 empty_size, u64 hint_byte, + u64 search_end, struct btrfs_key *ins, u64 data) +{ + int ret; + + maybe_lock_mutex(root); + + ret = __btrfs_reserve_extent(trans, root, num_bytes, + min_alloc_size, empty_size, hint_byte, + search_end, ins, data); + BUG_ON(ret); + ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, + ref_generation, owner, + owner_offset, ins); + BUG_ON(ret); + maybe_unlock_mutex(root); return ret; } @@ -2288,8 +2343,8 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->alloc_mutex); /* we've dropped the lock, double check */ - ret = drop_snap_lookup_refcount(root, bytenr, - blocksize, &refs); + ret = lookup_extent_ref(NULL, root, bytenr, blocksize, + &refs); BUG_ON(ret); if (refs != 1) { parent = path->nodes[*level]; @@ -2584,7 +2639,6 @@ out_unlock: kfree(ra); trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); if (trans) { - btrfs_add_ordered_inode(inode); btrfs_end_transaction(trans, BTRFS_I(inode)->root); mark_inode_dirty(inode); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 40a5f53cb04..3f82a6e9ca4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -793,6 +793,13 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_dirty); +int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask); +} +EXPORT_SYMBOL(set_extent_ordered); + int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask) { @@ -812,8 +819,8 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, - mask); + EXTENT_DELALLOC | EXTENT_DIRTY, + 0, NULL, mask); } EXPORT_SYMBOL(set_extent_delalloc); @@ -825,6 +832,13 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(clear_extent_dirty); +int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask); +} +EXPORT_SYMBOL(clear_extent_ordered); + int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { @@ -1395,10 +1409,9 @@ static int end_bio_extent_writepage(struct bio *bio, if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); - if (tree->ops && tree->ops->writepage_end_io_hook) { ret = tree->ops->writepage_end_io_hook(page, start, - end, state); + end, state, uptodate); if (ret) uptodate = 0; } @@ -1868,9 +1881,14 @@ static int __extent_read_full_page(struct extent_io_tree *tree, unlock_extent(tree, cur, end, GFP_NOFS); break; } - extent_offset = cur - em->start; + if (extent_map_end(em) <= cur) { +printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur); + } BUG_ON(extent_map_end(em) <= cur); + if (end < cur) { +printk("2bad mapping end %Lu cur %Lu\n", end, cur); + } BUG_ON(end < cur); iosize = min(extent_map_end(em) - cur, end - cur + 1); @@ -1976,6 +1994,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 last_byte = i_size_read(inode); u64 block_start; u64 iosize; + u64 unlock_start; sector_t sector; struct extent_map *em; struct block_device *bdev; @@ -1988,7 +2007,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 nr_delalloc; u64 delalloc_end; - WARN_ON(!PageLocked(page)); page_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || @@ -2030,6 +2048,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = delalloc_end + 1; } lock_extent(tree, start, page_end, GFP_NOFS); + unlock_start = start; end = page_end; if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { @@ -2038,6 +2057,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (last_byte <= start) { clear_extent_dirty(tree, start, page_end, GFP_NOFS); + unlock_extent(tree, start, page_end, GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + page_end, NULL, 1); + unlock_start = page_end + 1; goto done; } @@ -2047,6 +2071,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, while (cur <= end) { if (cur >= last_byte) { clear_extent_dirty(tree, cur, page_end, GFP_NOFS); + unlock_extent(tree, unlock_start, page_end, GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, cur, + page_end, NULL, 1); + unlock_start = page_end + 1; break; } em = epd->get_extent(inode, page, page_offset, cur, @@ -2071,8 +2100,16 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, block_start == EXTENT_MAP_INLINE) { clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); + + unlock_extent(tree, unlock_start, cur + iosize -1, + GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, cur, + cur + iosize - 1, + NULL, 1); cur = cur + iosize; page_offset += iosize; + unlock_start = cur; continue; } @@ -2119,7 +2156,8 @@ done: set_page_writeback(page); end_page_writeback(page); } - unlock_extent(tree, start, page_end, GFP_NOFS); + if (unlock_start <= page_end) + unlock_extent(tree, unlock_start, page_end, GFP_NOFS); unlock_page(page); return 0; } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f1960dafaa1..2268a799589 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -13,6 +13,8 @@ #define EXTENT_DEFRAG (1 << 6) #define EXTENT_DEFRAG_DONE (1 << 7) #define EXTENT_BUFFER_FILLED (1 << 8) +#define EXTENT_ORDERED (1 << 9) +#define EXTENT_ORDERED_METADATA (1 << 10) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) /* @@ -42,7 +44,7 @@ struct extent_io_ops { int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state); int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, - struct extent_state *state); + struct extent_state *state, int uptodate); int (*set_bit_hook)(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits); int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, @@ -131,6 +133,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int filled); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask); +int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int wake, int delete, gfp_t mask); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, @@ -141,8 +145,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); +int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits); struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, @@ -209,6 +219,8 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len); int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, struct extent_buffer *eb); +int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); +int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_io_tree *tree, diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index f5a04eb9a2a..81123277c2b 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -206,10 +206,11 @@ int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *merge = NULL; struct rb_node *rb; + BUG_ON(spin_trylock(&tree->lock)); rb = tree_insert(&tree->map, em->start, &em->rb_node); if (rb) { - merge = rb_entry(rb, struct extent_map, rb_node); ret = -EEXIST; + free_extent_map(merge); goto out; } atomic_inc(&em->refs); @@ -268,6 +269,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, struct rb_node *next = NULL; u64 end = range_end(start, len); + BUG_ON(spin_trylock(&tree->lock)); em = tree->last; if (em && end > em->start && start < extent_map_end(em)) goto found; @@ -318,6 +320,7 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { int ret = 0; + BUG_ON(spin_trylock(&tree->lock)); rb_erase(&em->rb_node, &tree->map); em->in_tree = 0; if (tree->last == em) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f537eb43c2c..345caf8ff51 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -135,26 +135,37 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, } int btrfs_csum_one_bio(struct btrfs_root *root, - struct bio *bio, char **sums_ret) + struct bio *bio, struct btrfs_ordered_sum **sums_ret) { - u32 *sums; + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; char *data; struct bio_vec *bvec = bio->bi_io_vec; int bio_index = 0; - sums = kmalloc(bio->bi_vcnt * BTRFS_CRC32_SIZE, GFP_NOFS); + WARN_ON(bio->bi_vcnt <= 0); + sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); if (!sums) return -ENOMEM; - *sums_ret = (char *)sums; + *sums_ret = sums; + sector_sum = &sums->sums; + sums->file_offset = page_offset(bvec->bv_page); + sums->len = bio->bi_size; + INIT_LIST_HEAD(&sums->list); while(bio_index < bio->bi_vcnt) { data = kmap_atomic(bvec->bv_page, KM_USER0); - *sums = ~(u32)0; - *sums = btrfs_csum_data(root, data + bvec->bv_offset, - *sums, bvec->bv_len); + sector_sum->sum = ~(u32)0; + sector_sum->sum = btrfs_csum_data(root, + data + bvec->bv_offset, + sector_sum->sum, + bvec->bv_len); kunmap_atomic(data, KM_USER0); - btrfs_csum_final(*sums, (char *)sums); - sums++; + btrfs_csum_final(sector_sum->sum, + (char *)§or_sum->sum); + sector_sum->offset = page_offset(bvec->bv_page) + + bvec->bv_offset; + sector_sum++; bio_index++; bvec++; } @@ -163,7 +174,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, - struct bio *bio, char *sums) + struct btrfs_ordered_sum *sums) { u64 objectid = inode->i_ino; u64 offset; @@ -171,17 +182,16 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_key file_key; struct btrfs_key found_key; u64 next_offset; + u64 total_bytes = 0; int found_next; struct btrfs_path *path; struct btrfs_csum_item *item; struct btrfs_csum_item *item_end; struct extent_buffer *leaf = NULL; u64 csum_offset; - u32 *sums32 = (u32 *)sums; + struct btrfs_sector_sum *sector_sum; u32 nritems; u32 ins_size; - int bio_index = 0; - struct bio_vec *bvec = bio->bi_io_vec; char *eb_map; char *eb_token; unsigned long map_len; @@ -189,10 +199,11 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); BUG_ON(!path); + sector_sum = &sums->sums; again: next_offset = (u64)-1; found_next = 0; - offset = page_offset(bvec->bv_page) + bvec->bv_offset; + offset = sector_sum->offset; file_key.objectid = objectid; file_key.offset = offset; btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); @@ -303,7 +314,7 @@ found: item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); eb_token = NULL; -next_bvec: +next_sector: if (!eb_token || (unsigned long)item + BTRFS_CRC32_SIZE >= map_start + map_len) { @@ -321,21 +332,20 @@ next_bvec: } if (eb_token) { memcpy(eb_token + ((unsigned long)item & (PAGE_CACHE_SIZE - 1)), - sums32, BTRFS_CRC32_SIZE); + §or_sum->sum, BTRFS_CRC32_SIZE); } else { - write_extent_buffer(leaf, sums32, (unsigned long)item, - BTRFS_CRC32_SIZE); + write_extent_buffer(leaf, §or_sum->sum, + (unsigned long)item, BTRFS_CRC32_SIZE); } - bio_index++; - bvec++; - sums32++; - if (bio_index < bio->bi_vcnt) { + total_bytes += root->sectorsize; + sector_sum++; + if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + BTRFS_CRC32_SIZE); if (item < item_end && offset + PAGE_CACHE_SIZE == - page_offset(bvec->bv_page)) { - offset = page_offset(bvec->bv_page); - goto next_bvec; + sector_sum->offset) { + offset = sector_sum->offset; + goto next_sector; } } if (eb_token) { @@ -343,7 +353,7 @@ next_bvec: eb_token = NULL; } btrfs_mark_buffer_dirty(path->nodes[0]); - if (bio_index < bio->bi_vcnt) { + if (total_bytes < sums->len) { btrfs_release_path(root, path); goto again; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8037792f878..12e765f7e0d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -34,7 +34,6 @@ #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" -#include "ordered-data.h" #include "ioctl.h" #include "print-tree.h" #include "compat.h" @@ -273,7 +272,9 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; last_pos_in_file = (isize + mask) & ~mask; hole_size = (start_pos - last_pos_in_file + mask) & ~mask; - if (last_pos_in_file < start_pos) { + if (hole_size > 0) { + btrfs_wait_ordered_range(inode, last_pos_in_file, + last_pos_in_file + hole_size); err = btrfs_drop_extents(trans, root, inode, last_pos_in_file, last_pos_in_file + hole_size, @@ -303,19 +304,17 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, inline_size > root->fs_info->max_inline || (inline_size & (root->sectorsize -1)) == 0 || inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { - u64 last_end; - + /* check for reserved extents on each page, we don't want + * to reset the delalloc bit on things that already have + * extents reserved. + */ + set_extent_delalloc(io_tree, start_pos, + end_of_last_block, GFP_NOFS); for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; SetPageUptodate(p); set_page_dirty(p); } - last_end = (u64)(pages[num_pages -1]->index) << - PAGE_CACHE_SHIFT; - last_end += PAGE_CACHE_SIZE - 1; - set_extent_delalloc(io_tree, start_pos, end_of_last_block, - GFP_NOFS); - btrfs_add_ordered_inode(inode); } else { u64 aligned_end; /* step one, delete the existing extents in this range */ @@ -350,10 +349,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) struct extent_map *split = NULL; struct extent_map *split2 = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map *tmp; u64 len = end - start + 1; + u64 next_start; int ret; int testend = 1; + WARN_ON(end < start); if (end == (u64)-1) { len = (u64)-1; testend = 0; @@ -370,6 +372,8 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) spin_unlock(&em_tree->lock); break; } + tmp = rb_entry(&em->rb_node, struct extent_map, rb_node); + next_start = tmp->start; remove_extent_mapping(em_tree, em); if (em->block_start < EXTENT_MAP_LAST_BYTE && @@ -778,37 +782,58 @@ static int prepare_pages(struct btrfs_root *root, struct file *file, struct inode *inode = fdentry(file)->d_inode; int err = 0; u64 start_pos; + u64 last_pos; start_pos = pos & ~((u64)root->sectorsize - 1); + last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; memset(pages, 0, num_pages * sizeof(struct page *)); - +again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { err = -ENOMEM; BUG_ON(1); } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - ClearPageDirty(pages[i]); -#else - cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); -#endif wait_on_page_writeback(pages[i]); - set_page_extent_mapped(pages[i]); - WARN_ON(!PageLocked(pages[i])); } if (start_pos < inode->i_size) { - u64 last_pos; - last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT; + struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1); + if (ordered && + ordered->file_offset + ordered->len > start_pos && + ordered->file_offset < last_pos) { + btrfs_put_ordered_extent(ordered); + unlock_extent(&BTRFS_I(inode)->io_tree, + start_pos, last_pos - 1, GFP_NOFS); + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + btrfs_wait_ordered_range(inode, start_pos, + last_pos - start_pos); + goto again; + } + if (ordered) + btrfs_put_ordered_extent(ordered); + clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS); unlock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); } + for (i = 0; i < num_pages; i++) { +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) + ClearPageDirty(pages[i]); +#else + cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); +#endif + set_page_extent_mapped(pages[i]); + WARN_ON(!PageLocked(pages[i])); + } return 0; } @@ -969,13 +994,11 @@ out_nolock: (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); } current->backing_dev_info = NULL; - btrfs_ordered_throttle(root, inode); return num_written ? num_written : err; } int btrfs_release_file(struct inode * inode, struct file * filp) { - btrfs_del_ordered_inode(inode, 0); if (filp->private_data) btrfs_ioctl_trans_end(filp); return 0; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d39433dfb2c..c5a62f0b959 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -43,6 +43,7 @@ #include "ioctl.h" #include "print-tree.h" #include "volumes.h" +#include "ordered-data.h" struct btrfs_iget_args { u64 ino; @@ -109,10 +110,11 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) u64 num_bytes; u64 cur_alloc_size; u64 blocksize = root->sectorsize; - u64 orig_start = start; u64 orig_num_bytes; struct btrfs_key ins; - int ret; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 0; trans = btrfs_start_transaction(root, 1); BUG_ON(!trans); @@ -120,33 +122,44 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); - ret = btrfs_drop_extents(trans, root, inode, - start, start + num_bytes, start, &alloc_hint); orig_num_bytes = num_bytes; if (alloc_hint == EXTENT_MAP_INLINE) goto out; BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1); while(num_bytes > 0) { cur_alloc_size = min(num_bytes, root->fs_info->max_extent); - ret = btrfs_alloc_extent(trans, root, cur_alloc_size, - root->sectorsize, - root->root_key.objectid, - trans->transid, - inode->i_ino, start, 0, - alloc_hint, (u64)-1, &ins, 1); + ret = btrfs_reserve_extent(trans, root, cur_alloc_size, + root->sectorsize, 0, 0, + (u64)-1, &ins, 1); if (ret) { WARN_ON(1); goto out; } + em = alloc_extent_map(GFP_NOFS); + em->start = start; + em->len = ins.offset; + em->block_start = ins.objectid; + em->bdev = root->fs_info->fs_devices->latest_bdev; + while(1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, start, + start + ins.offset - 1); + } + cur_alloc_size = ins.offset; - ret = btrfs_insert_file_extent(trans, root, inode->i_ino, - start, ins.objectid, ins.offset, - ins.offset, 0); - inode->i_blocks += ins.offset >> 9; - btrfs_check_file(root, inode); + ret = btrfs_add_ordered_extent(inode, start, ins.objectid, + ins.offset); + BUG_ON(ret); if (num_bytes < cur_alloc_size) { printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, cur_alloc_size); @@ -156,10 +169,6 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } - btrfs_drop_extent_cache(inode, orig_start, - orig_start + orig_num_bytes - 1); - btrfs_add_ordered_inode(inode); - btrfs_update_inode(trans, root, inode); out: btrfs_end_transaction(trans, root); return ret; @@ -341,25 +350,15 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { struct btrfs_root *root = BTRFS_I(inode)->root; - struct btrfs_trans_handle *trans; int ret = 0; - char *sums = NULL; + struct btrfs_ordered_sum *sums; ret = btrfs_csum_one_bio(root, bio, &sums); BUG_ON(ret); - trans = btrfs_start_transaction(root, 1); - - btrfs_set_trans_block_group(trans, inode); - mutex_lock(&BTRFS_I(inode)->csum_mutex); - btrfs_csum_file_blocks(trans, root, inode, bio, sums); - mutex_unlock(&BTRFS_I(inode)->csum_mutex); - - ret = btrfs_end_transaction(trans, root); + ret = btrfs_add_ordered_sum(inode, sums); BUG_ON(ret); - kfree(sums); - return btrfs_map_bio(root, rw, bio, mirror_num, 1); } @@ -369,14 +368,10 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - if (!(rw & (1 << BIO_RW))) { - ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); - BUG_ON(ret); - goto mapit; - } + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); - if (btrfs_test_opt(root, NODATASUM) || - btrfs_test_flag(inode, NODATASUM)) { + if (!(rw & (1 << BIO_RW))) { goto mapit; } @@ -387,6 +382,96 @@ mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } +static int add_pending_csums(struct btrfs_trans_handle *trans, + struct inode *inode, u64 file_offset, + struct list_head *list) +{ + struct list_head *cur; + struct btrfs_ordered_sum *sum; + + btrfs_set_trans_block_group(trans, inode); + while(!list_empty(list)) { + cur = list->next; + sum = list_entry(cur, struct btrfs_ordered_sum, list); + mutex_lock(&BTRFS_I(inode)->csum_mutex); + btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, + inode, sum); + mutex_unlock(&BTRFS_I(inode)->csum_mutex); + list_del(&sum->list); + kfree(sum); + } + return 0; +} + +int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, + struct extent_state *state, int uptodate) +{ + struct inode *inode = page->mapping->host; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + struct btrfs_ordered_extent *ordered_extent; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + u64 alloc_hint = 0; + struct list_head list; + struct btrfs_key ins; + int ret; + + ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); + if (!ret) { + return 0; + } + + trans = btrfs_start_transaction(root, 1); + + ordered_extent = btrfs_lookup_ordered_extent(inode, start); + BUG_ON(!ordered_extent); + + lock_extent(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + GFP_NOFS); + + INIT_LIST_HEAD(&list); + + ins.objectid = ordered_extent->start; + ins.offset = ordered_extent->len; + ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid, + trans->transid, inode->i_ino, + ordered_extent->file_offset, &ins); + BUG_ON(ret); + ret = btrfs_drop_extents(trans, root, inode, + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len, + ordered_extent->file_offset, &alloc_hint); + BUG_ON(ret); + ret = btrfs_insert_file_extent(trans, root, inode->i_ino, + ordered_extent->file_offset, + ordered_extent->start, + ordered_extent->len, + ordered_extent->len, 0); + BUG_ON(ret); + btrfs_drop_extent_cache(inode, ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1); + inode->i_blocks += ordered_extent->len >> 9; + unlock_extent(io_tree, ordered_extent->file_offset, + ordered_extent->file_offset + ordered_extent->len - 1, + GFP_NOFS); + add_pending_csums(trans, inode, ordered_extent->file_offset, + &ordered_extent->list); + + btrfs_remove_ordered_extent(inode, ordered_extent); + /* once for us */ + btrfs_put_ordered_extent(ordered_extent); + /* once for the tree */ + btrfs_put_ordered_extent(ordered_extent); + + btrfs_update_inode(trans, root, inode); + btrfs_end_transaction(trans, root); + return 0; +} + int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) { int ret = 0; @@ -409,7 +494,8 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) if (ret == -ENOENT || ret == -EFBIG) ret = 0; csum = 0; - printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start); + printk("no csum found for inode %lu start %Lu\n", inode->i_ino, + start); goto out; } read_extent_buffer(path->nodes[0], &csum, (unsigned long)item, @@ -833,7 +919,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) { struct btrfs_root *root; struct btrfs_trans_handle *trans; - struct inode *inode = dentry->d_inode; int ret; unsigned long nr = 0; @@ -849,14 +934,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) ret = btrfs_unlink_trans(trans, root, dir, dentry); nr = trans->blocks_used; - if (inode->i_nlink == 0) { - /* if the inode isn't linked anywhere, - * we don't need to worry about - * data=ordered - */ - btrfs_del_ordered_inode(inode, 1); - } - btrfs_end_transaction_throttle(trans, root); fail: btrfs_btree_balance_dirty(root, nr); @@ -931,6 +1008,7 @@ static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans, int extent_type = -1; u64 mask = root->sectorsize - 1; + btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1); path = btrfs_alloc_path(); path->reada = -1; @@ -1117,34 +1195,6 @@ error: return ret; } -static int btrfs_cow_one_page(struct inode *inode, struct page *page, - size_t zero_start) -{ - char *kaddr; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - int ret = 0; - - WARN_ON(!PageLocked(page)); - set_page_extent_mapped(page); - - lock_extent(io_tree, page_start, page_end, GFP_NOFS); - set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, - page_end, GFP_NOFS); - - if (zero_start != PAGE_CACHE_SIZE) { - kaddr = kmap(page); - memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); - flush_dcache_page(page); - kunmap(page); - } - set_page_dirty(page); - unlock_extent(io_tree, page_start, page_end, GFP_NOFS); - - return ret; -} - /* * taken from block_truncate_page, but does cow as it zeros out * any bytes left in the last page in the file. @@ -1153,12 +1203,16 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from) { struct inode *inode = mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; + char *kaddr; u32 blocksize = root->sectorsize; pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); struct page *page; int ret = 0; u64 page_start; + u64 page_end; if ((offset & (blocksize - 1)) == 0) goto out; @@ -1168,6 +1222,10 @@ again: page = grab_cache_page(mapping, index); if (!page) goto out; + + page_start = page_offset(page); + page_end = page_start + PAGE_CACHE_SIZE - 1; + if (!PageUptodate(page)) { ret = btrfs_readpage(NULL, page); lock_page(page); @@ -1181,10 +1239,32 @@ again: goto out; } } - - page_start = (u64)page->index << PAGE_CACHE_SHIFT; wait_on_page_writeback(page); - ret = btrfs_cow_one_page(inode, page, offset); + + lock_extent(io_tree, page_start, page_end, GFP_NOFS); + set_page_extent_mapped(page); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + btrfs_wait_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + goto again; + } + + set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, + page_end, GFP_NOFS); + ret = 0; + if (offset != PAGE_CACHE_SIZE) { + kaddr = kmap(page); + memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset); + flush_dcache_page(page); + kunmap(page); + } + set_page_dirty(page); + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); unlock_page(page); page_cache_release(page); @@ -1222,8 +1302,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) btrfs_truncate_page(inode->i_mapping, inode->i_size); - lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); hole_size = block_end - hole_start; + btrfs_wait_ordered_range(inode, hole_start, hole_size); + lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); trans = btrfs_start_transaction(root, 1); btrfs_set_trans_block_group(trans, inode); @@ -1258,6 +1339,7 @@ void btrfs_delete_inode(struct inode *inode) unsigned long nr; int ret; + btrfs_wait_ordered_range(inode, 0, (u64)-1); truncate_inode_pages(&inode->i_data, 0); if (is_bad_inode(inode)) { goto no_delete; @@ -1403,7 +1485,6 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); return 0; } @@ -1705,7 +1786,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->delalloc_bytes = 0; BTRFS_I(inode)->root = root; @@ -1930,7 +2010,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -2066,64 +2145,18 @@ out_unlock: static int merge_extent_mapping(struct extent_map_tree *em_tree, struct extent_map *existing, - struct extent_map *em) + struct extent_map *em, + u64 map_start, u64 map_len) { u64 start_diff; - u64 new_end; - int ret = 0; - int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE; - - if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE) - goto invalid; - - if (!real_blocks && em->block_start != existing->block_start) - goto invalid; - - new_end = max(existing->start + existing->len, em->start + em->len); - - if (existing->start >= em->start) { - if (em->start + em->len < existing->start) - goto invalid; - start_diff = existing->start - em->start; - if (real_blocks && em->block_start + start_diff != - existing->block_start) - goto invalid; - - em->len = new_end - em->start; - - remove_extent_mapping(em_tree, existing); - /* free for the tree */ - free_extent_map(existing); - ret = add_extent_mapping(em_tree, em); - - } else if (em->start > existing->start) { - - if (existing->start + existing->len < em->start) - goto invalid; - - start_diff = em->start - existing->start; - if (real_blocks && existing->block_start + start_diff != - em->block_start) - goto invalid; - - remove_extent_mapping(em_tree, existing); - em->block_start = existing->block_start; - em->start = existing->start; - em->len = new_end - existing->start; - free_extent_map(existing); - - ret = add_extent_mapping(em_tree, em); - } else { - goto invalid; - } - return ret; - -invalid: - printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n", - existing->start, existing->len, existing->block_start, - em->start, em->len, em->block_start); - return -EIO; + BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); + start_diff = map_start - em->start; + em->start = map_start; + em->len = map_len; + if (em->block_start < EXTENT_MAP_LAST_BYTE) + em->block_start += start_diff; + return add_extent_mapping(em_tree, em); } struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, @@ -2170,10 +2203,9 @@ again: err = -ENOMEM; goto out; } - + em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; - em->bdev = root->fs_info->fs_devices->latest_bdev; ret = btrfs_lookup_file_extent(trans, root, path, objectid, start, trans != NULL); if (ret < 0) { @@ -2314,6 +2346,9 @@ insert: */ if (ret == -EEXIST) { struct extent_map *existing; + + ret = 0; + existing = lookup_extent_mapping(em_tree, start, len); if (existing && (existing->start > start || existing->start + existing->len <= start)) { @@ -2325,7 +2360,8 @@ insert: em->len); if (existing) { err = merge_extent_mapping(em_tree, existing, - em); + em, start, + root->sectorsize); free_extent_map(existing); if (err) { free_extent_map(em); @@ -2341,6 +2377,7 @@ insert: } else { free_extent_map(em); em = existing; + err = 0; } } spin_unlock(&em_tree->lock); @@ -2348,8 +2385,9 @@ out: btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); - if (!err) + if (!err) { err = ret; + } } if (err) { free_extent_map(em); @@ -2474,8 +2512,7 @@ btrfs_readpages(struct file *file, struct address_space *mapping, return extent_readpages(tree, mapping, pages, nr_pages, btrfs_get_extent); } - -static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) +static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) { struct extent_io_tree *tree; struct extent_map_tree *map; @@ -2493,15 +2530,54 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) return ret; } +static int btrfs_releasepage(struct page *page, gfp_t gfp_flags) +{ + struct btrfs_ordered_extent *ordered; + + ordered = btrfs_lookup_ordered_extent(page->mapping->host, + page_offset(page)); + if (ordered) { + btrfs_put_ordered_extent(ordered); + return 0; + } + return __btrfs_releasepage(page, gfp_flags); +} + static void btrfs_invalidatepage(struct page *page, unsigned long offset) { struct extent_io_tree *tree; + struct btrfs_ordered_extent *ordered; + u64 page_start = page_offset(page); + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + wait_on_page_writeback(page); tree = &BTRFS_I(page->mapping->host)->io_tree; - extent_invalidatepage(tree, page, offset); - btrfs_releasepage(page, GFP_NOFS); + if (offset) { + btrfs_releasepage(page, GFP_NOFS); + return; + } + + lock_extent(tree, page_start, page_end, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(page->mapping->host, + page_offset(page)); + if (ordered) { + clear_extent_bit(tree, page_start, page_end, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_LOCKED, 1, 0, GFP_NOFS); + btrfs_writepage_end_io_hook(page, page_start, + page_end, NULL, 1); + btrfs_put_ordered_extent(ordered); + lock_extent(tree, page_start, page_end, GFP_NOFS); + } + clear_extent_bit(tree, page_start, page_end, + EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_ORDERED, + 1, 1, GFP_NOFS); + __btrfs_releasepage(page, GFP_NOFS); + if (PagePrivate(page)) { - invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); + invalidate_extent_lru(tree, page_offset(page), + PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -2527,35 +2603,63 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page) { struct inode *inode = fdentry(vma->vm_file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long end; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_ordered_extent *ordered; + char *kaddr; + unsigned long zero_start; loff_t size; int ret; u64 page_start; + u64 page_end; ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0); if (ret) goto out; ret = -EINVAL; - +again: lock_page(page); - wait_on_page_writeback(page); size = i_size_read(inode); - page_start = (u64)page->index << PAGE_CACHE_SHIFT; + page_start = page_offset(page); + page_end = page_start + PAGE_CACHE_SIZE - 1; if ((page->mapping != inode->i_mapping) || - (page_start > size)) { + (page_start >= size)) { /* page got truncated out from underneath us */ goto out_unlock; } + wait_on_page_writeback(page); + + lock_extent(io_tree, page_start, page_end, GFP_NOFS); + set_page_extent_mapped(page); + + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) { + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); + unlock_page(page); + btrfs_wait_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + goto again; + } + + set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, + page_end, GFP_NOFS); + ret = 0; /* page is wholly or partially inside EOF */ if (page_start + PAGE_CACHE_SIZE > size) - end = size & ~PAGE_CACHE_MASK; + zero_start = size & ~PAGE_CACHE_MASK; else - end = PAGE_CACHE_SIZE; + zero_start = PAGE_CACHE_SIZE; - ret = btrfs_cow_one_page(inode, page, end); + if (zero_start != PAGE_CACHE_SIZE) { + kaddr = kmap(page); + memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); + flush_dcache_page(page); + kunmap(page); + } + set_page_dirty(page); + unlock_extent(io_tree, page_start, page_end, GFP_NOFS); out_unlock: unlock_page(page); @@ -2662,15 +2766,28 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) if (!ei) return NULL; ei->last_trans = 0; - ei->ordered_trans = 0; + btrfs_ordered_inode_tree_init(&ei->ordered_tree); return &ei->vfs_inode; } void btrfs_destroy_inode(struct inode *inode) { + struct btrfs_ordered_extent *ordered; WARN_ON(!list_empty(&inode->i_dentry)); WARN_ON(inode->i_data.nrpages); + while(1) { + ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); + if (!ordered) + break; + else { + printk("found ordered extent %Lu %Lu\n", + ordered->file_offset, ordered->len); + btrfs_remove_ordered_extent(inode, ordered); + btrfs_put_ordered_extent(ordered); + btrfs_put_ordered_extent(ordered); + } + } btrfs_drop_extent_cache(inode, 0, (u64)-1); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } @@ -2869,7 +2986,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_mapping, GFP_NOFS); mutex_init(&BTRFS_I(inode)->csum_mutex); BTRFS_I(inode)->delalloc_bytes = 0; - atomic_set(&BTRFS_I(inode)->ordered_writeback, 0); BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } dir->i_sb->s_dirt = 1; @@ -2921,6 +3037,20 @@ out_fail: return err; } +static int btrfs_set_page_dirty(struct page *page) +{ + struct inode *inode = page->mapping->host; + u64 page_start = page_offset(page); + u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + + if (!test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, + EXTENT_DELALLOC, 0)) { +printk("inode %lu page %Lu not delalloc\n", inode->i_ino, page_offset(page)); +WARN_ON(1); + } + return __set_page_dirty_nobuffers(page); +} + static int btrfs_permission(struct inode *inode, int mask, struct nameidata *nd) { @@ -2967,6 +3097,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { .merge_bio_hook = btrfs_merge_bio_hook, .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .writepage_end_io_hook = btrfs_writepage_end_io_hook, .readpage_io_failed_hook = btrfs_io_failed_hook, .set_bit_hook = btrfs_set_bit_hook, .clear_bit_hook = btrfs_clear_bit_hook, @@ -2982,7 +3113,7 @@ static struct address_space_operations btrfs_aops = { .direct_IO = btrfs_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = btrfs_set_page_dirty, }; static struct address_space_operations btrfs_symlink_aops = { diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 254da822566..6513270f054 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -22,48 +22,30 @@ #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" +#include "extent_io.h" -struct tree_entry { - u64 root_objectid; - u64 objectid; - struct inode *inode; - struct rb_node rb_node; -}; -/* - * returns > 0 if entry passed (root, objectid) is > entry, - * < 0 if (root, objectid) < entry and zero if they are equal - */ -static int comp_entry(struct tree_entry *entry, u64 root_objectid, - u64 objectid) +static u64 entry_end(struct btrfs_ordered_extent *entry) { - if (root_objectid < entry->root_objectid) - return -1; - if (root_objectid > entry->root_objectid) - return 1; - if (objectid < entry->objectid) - return -1; - if (objectid > entry->objectid) - return 1; - return 0; + if (entry->file_offset + entry->len < entry->file_offset) + return (u64)-1; + return entry->file_offset + entry->len; } -static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid, - u64 objectid, struct rb_node *node) +static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, + struct rb_node *node) { struct rb_node ** p = &root->rb_node; struct rb_node * parent = NULL; - struct tree_entry *entry; - int comp; + struct btrfs_ordered_extent *entry; while(*p) { parent = *p; - entry = rb_entry(parent, struct tree_entry, rb_node); + entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); - comp = comp_entry(entry, root_objectid, objectid); - if (comp < 0) + if (file_offset < entry->file_offset) p = &(*p)->rb_left; - else if (comp > 0) + else if (file_offset >= entry_end(entry)) p = &(*p)->rb_right; else return parent; @@ -74,24 +56,23 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid, return NULL; } -static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid, - u64 objectid, struct rb_node **prev_ret) +static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, + struct rb_node **prev_ret) { struct rb_node * n = root->rb_node; struct rb_node *prev = NULL; - struct tree_entry *entry; - struct tree_entry *prev_entry = NULL; - int comp; + struct rb_node *test; + struct btrfs_ordered_extent *entry; + struct btrfs_ordered_extent *prev_entry = NULL; while(n) { - entry = rb_entry(n, struct tree_entry, rb_node); + entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; - comp = comp_entry(entry, root_objectid, objectid); - if (comp < 0) + if (file_offset < entry->file_offset) n = n->rb_left; - else if (comp > 0) + else if (file_offset >= entry_end(entry)) n = n->rb_right; else return n; @@ -99,195 +80,329 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid, if (!prev_ret) return NULL; - while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) { - prev = rb_next(prev); - prev_entry = rb_entry(prev, struct tree_entry, rb_node); + while(prev && file_offset >= entry_end(prev_entry)) { + test = rb_next(prev); + if (!test) + break; + prev_entry = rb_entry(test, struct btrfs_ordered_extent, + rb_node); + if (file_offset < entry_end(prev_entry)) + break; + + prev = test; + } + if (prev) + prev_entry = rb_entry(prev, struct btrfs_ordered_extent, + rb_node); + while(prev && file_offset < entry_end(prev_entry)) { + test = rb_prev(prev); + if (!test) + break; + prev_entry = rb_entry(test, struct btrfs_ordered_extent, + rb_node); + prev = test; } *prev_ret = prev; return NULL; } -static inline struct rb_node *tree_search(struct rb_root *root, - u64 root_objectid, u64 objectid) +static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) +{ + if (file_offset < entry->file_offset || + entry->file_offset + entry->len <= file_offset) + return 0; + return 1; +} + +static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, + u64 file_offset) { + struct rb_root *root = &tree->tree; struct rb_node *prev; struct rb_node *ret; - ret = __tree_search(root, root_objectid, objectid, &prev); + struct btrfs_ordered_extent *entry; + + if (tree->last) { + entry = rb_entry(tree->last, struct btrfs_ordered_extent, + rb_node); + if (offset_in_entry(entry, file_offset)) + return tree->last; + } + ret = __tree_search(root, file_offset, &prev); if (!ret) - return prev; + ret = prev; + if (ret) + tree->last = ret; return ret; } -int btrfs_add_ordered_inode(struct inode *inode) +int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len) { - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 root_objectid = root->root_key.objectid; - u64 transid = root->fs_info->running_transaction->transid; - struct tree_entry *entry; - struct rb_node *node; struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry; - if (transid <= BTRFS_I(inode)->ordered_trans) - return 0; - - tree = &root->fs_info->running_transaction->ordered_inode_tree; - - read_lock(&tree->lock); - node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL); - read_unlock(&tree->lock); - if (node) { - return 0; - } - - entry = kmalloc(sizeof(*entry), GFP_NOFS); + tree = &BTRFS_I(inode)->ordered_tree; + entry = kzalloc(sizeof(*entry), GFP_NOFS); if (!entry) return -ENOMEM; - write_lock(&tree->lock); - entry->objectid = inode->i_ino; - entry->root_objectid = root_objectid; + mutex_lock(&tree->mutex); + entry->file_offset = file_offset; + entry->start = start; + entry->len = len; entry->inode = inode; + /* one ref for the tree */ + atomic_set(&entry->refs, 1); + init_waitqueue_head(&entry->wait); + INIT_LIST_HEAD(&entry->list); - node = tree_insert(&tree->tree, root_objectid, - inode->i_ino, &entry->rb_node); - - BTRFS_I(inode)->ordered_trans = transid; - if (!node) - igrab(inode); - - write_unlock(&tree->lock); + node = tree_insert(&tree->tree, file_offset, + &entry->rb_node); + if (node) { + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + atomic_inc(&entry->refs); + } + set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, + entry_end(entry) - 1, GFP_NOFS); - if (node) - kfree(entry); + set_bit(BTRFS_ORDERED_START, &entry->flags); + mutex_unlock(&tree->mutex); + BUG_ON(node); return 0; } -int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode) +int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; + struct btrfs_ordered_extent *entry; - write_lock(&tree->lock); - node = tree_search(&tree->tree, *root_objectid, *objectid); + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, sum->file_offset); if (!node) { - write_unlock(&tree->lock); - return 0; +search_fail: +printk("add ordered sum failed to find a node for inode %lu offset %Lu\n", inode->i_ino, sum->file_offset); + node = rb_first(&tree->tree); + while(node) { + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + printk("entry %Lu %Lu %Lu\n", entry->file_offset, entry->file_offset + entry->len, entry->start); + node = rb_next(node); + } + BUG(); } - entry = rb_entry(node, struct tree_entry, rb_node); + BUG_ON(!node); - while(comp_entry(entry, *root_objectid, *objectid) >= 0) { - node = rb_next(node); - if (!node) - break; - entry = rb_entry(node, struct tree_entry, rb_node); - } - if (!node) { - write_unlock(&tree->lock); - return 0; + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, sum->file_offset)) { + goto search_fail; } - *root_objectid = entry->root_objectid; - *inode = entry->inode; - atomic_inc(&entry->inode->i_count); - *objectid = entry->objectid; - write_unlock(&tree->lock); - return 1; + list_add_tail(&sum->list, &entry->list); + mutex_unlock(&tree->mutex); + return 0; } -int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode) +int btrfs_dec_test_ordered_pending(struct inode *inode, + u64 file_offset, u64 io_size) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - - write_lock(&tree->lock); - node = tree_search(&tree->tree, *root_objectid, *objectid); + struct btrfs_ordered_extent *entry; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + int ret; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1, + GFP_NOFS); + node = tree_search(tree, file_offset); if (!node) { - write_unlock(&tree->lock); - return 0; + ret = 1; + goto out; } - entry = rb_entry(node, struct tree_entry, rb_node); - while(comp_entry(entry, *root_objectid, *objectid) >= 0) { - node = rb_next(node); - if (!node) - break; - entry = rb_entry(node, struct tree_entry, rb_node); + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, file_offset)) { + ret = 1; + goto out; } - if (!node) { - write_unlock(&tree->lock); - return 0; + + ret = test_range_bit(io_tree, entry->file_offset, + entry->file_offset + entry->len - 1, + EXTENT_ORDERED, 0); + if (!test_bit(BTRFS_ORDERED_START, &entry->flags)) { +printk("inode %lu not ready yet for extent %Lu %Lu\n", inode->i_ino, entry->file_offset, entry_end(entry)); } + if (ret == 0) + ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); +out: + mutex_unlock(&tree->mutex); + return ret == 0; +} - *root_objectid = entry->root_objectid; - *objectid = entry->objectid; - *inode = entry->inode; - atomic_inc(&entry->inode->i_count); - rb_erase(node, &tree->tree); - write_unlock(&tree->lock); - kfree(entry); - return 1; +int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) +{ + if (atomic_dec_and_test(&entry->refs)) + kfree(entry); + return 0; } -static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree, - struct inode *inode, - u64 root_objectid, u64 objectid) +int btrfs_remove_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry) { - struct tree_entry *entry; + struct btrfs_ordered_inode_tree *tree; struct rb_node *node; - struct rb_node *prev; - write_lock(&tree->lock); - node = __tree_search(&tree->tree, root_objectid, objectid, &prev); - if (!node) { - write_unlock(&tree->lock); - return; - } + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = &entry->rb_node; rb_erase(node, &tree->tree); - BTRFS_I(inode)->ordered_trans = 0; - write_unlock(&tree->lock); - atomic_dec(&inode->i_count); - entry = rb_entry(node, struct tree_entry, rb_node); - kfree(entry); - return; + tree->last = NULL; + set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); + mutex_unlock(&tree->mutex); + wake_up(&entry->wait); + return 0; } -void btrfs_del_ordered_inode(struct inode *inode, int force) +void btrfs_wait_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry) { - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 root_objectid = root->root_key.objectid; + u64 start = entry->file_offset; + u64 end = start + entry->len - 1; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); +#else + do_sync_mapping_range(inode->i_mapping, start, end, + SYNC_FILE_RANGE_WRITE); +#endif + wait_event(entry->wait, + test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); +} - if (!BTRFS_I(inode)->ordered_trans) { - return; - } +static void btrfs_start_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry, int wait) +{ + u64 start = entry->file_offset; + u64 end = start + entry->len - 1; - if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) || - mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) - return; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); +#else + do_sync_mapping_range(inode->i_mapping, start, end, + SYNC_FILE_RANGE_WRITE); +#endif + if (wait) + wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, + &entry->flags)); +} - spin_lock(&root->fs_info->new_trans_lock); - if (root->fs_info->running_transaction) { - struct btrfs_ordered_inode_tree *tree; - tree = &root->fs_info->running_transaction->ordered_inode_tree; - __btrfs_del_ordered_inode(tree, inode, root_objectid, - inode->i_ino); +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) +{ + u64 end; + struct btrfs_ordered_extent *ordered; + int found; + int should_wait = 0; + +again: + if (start + len < start) + end = (u64)-1; + else + end = start + len - 1; + found = 0; + while(1) { + ordered = btrfs_lookup_first_ordered_extent(inode, end); + if (!ordered) { + break; + } + if (ordered->file_offset >= start + len) { + btrfs_put_ordered_extent(ordered); + break; + } + if (ordered->file_offset + ordered->len < start) { + btrfs_put_ordered_extent(ordered); + break; + } + btrfs_start_ordered_extent(inode, ordered, should_wait); + found++; + end = ordered->file_offset; + btrfs_put_ordered_extent(ordered); + if (end == 0) + break; + end--; + } + if (should_wait && found) { + should_wait = 0; + goto again; } - spin_unlock(&root->fs_info->new_trans_lock); } -int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode) +int btrfs_add_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent *ordered, + u64 start, u64 len) { - struct btrfs_transaction *cur = root->fs_info->running_transaction; - while(cur == root->fs_info->running_transaction && - atomic_read(&BTRFS_I(inode)->ordered_writeback)) { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) - congestion_wait(WRITE, HZ/20); -#else - blk_congestion_wait(WRITE, HZ/20); -#endif - } + WARN_ON(1); return 0; +#if 0 + int ret; + struct btrfs_ordered_inode_tree *tree; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { + ret = -EAGAIN; + goto out; + } + set_extent_ordered(io_tree, start, start + len - 1, GFP_NOFS); + ret = 0; +out: + mutex_unlock(&tree->mutex); + return ret; +#endif +} + +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + u64 file_offset) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, file_offset); + if (!node) + goto out; + + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + if (!offset_in_entry(entry, file_offset)) + entry = NULL; + if (entry) + atomic_inc(&entry->refs); +out: + mutex_unlock(&tree->mutex); + return entry; +} + +struct btrfs_ordered_extent * +btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) +{ + struct btrfs_ordered_inode_tree *tree; + struct rb_node *node; + struct btrfs_ordered_extent *entry = NULL; + + tree = &BTRFS_I(inode)->ordered_tree; + mutex_lock(&tree->mutex); + node = tree_search(tree, file_offset); + if (!node) + goto out; + + entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); + atomic_inc(&entry->refs); +out: + mutex_unlock(&tree->mutex); + return entry; } diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 4fa78736423..33292c5fe90 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -20,24 +20,73 @@ #define __BTRFS_ORDERED_DATA__ struct btrfs_ordered_inode_tree { - rwlock_t lock; + struct mutex mutex; struct rb_root tree; + struct rb_node *last; }; +struct btrfs_sector_sum { + u64 offset; + u32 sum; +}; + +struct btrfs_ordered_sum { + u64 file_offset; + u64 len; + struct list_head list; + struct btrfs_sector_sum sums; +}; + +/* bits for the flags field */ +#define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */ +#define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */ +#define BTRFS_ORDERED_START 2 /* set when tree setup */ + +struct btrfs_ordered_extent { + u64 file_offset; + u64 start; + u64 len; + unsigned long flags; + atomic_t refs; + struct list_head list; + struct inode *inode; + wait_queue_head_t wait; + struct rb_node rb_node; +}; + + +static inline int btrfs_ordered_sum_size(struct btrfs_root *root, u64 bytes) +{ + unsigned long num_sectors = (bytes + root->sectorsize - 1) / + root->sectorsize; + return sizeof(struct btrfs_ordered_sum) + + num_sectors * sizeof(struct btrfs_sector_sum); +} + static inline void btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) { - rwlock_init(&t->lock); + mutex_init(&t->mutex); t->tree.rb_node = NULL; + t->last = NULL; } -int btrfs_add_ordered_inode(struct inode *inode); -int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode); -int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree, - u64 *root_objectid, u64 *objectid, - struct inode **inode); -void btrfs_del_ordered_inode(struct inode *inode, int force); -int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode); +int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); +int btrfs_remove_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry); +int btrfs_dec_test_ordered_pending(struct inode *inode, + u64 file_offset, u64 io_size); +int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, + u64 start, u64 len); +int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_sum *sum); +struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, + u64 file_offset); +void btrfs_wait_ordered_extent(struct inode *inode, + struct btrfs_ordered_extent *entry); +void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); +struct btrfs_ordered_extent * +btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); +int btrfs_add_ordered_pending(struct inode *inode, + struct btrfs_ordered_extent *ordered, + u64 start, u64 len); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a8a3cb03de5..86a5acc19ce 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -67,7 +67,6 @@ static noinline int join_transaction(struct btrfs_root *root) cur_trans->start_time = get_seconds(); INIT_LIST_HEAD(&cur_trans->pending_snapshots); list_add_tail(&cur_trans->list, &root->fs_info->trans_list); - btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree); extent_io_tree_init(&cur_trans->dirty_pages, root->fs_info->btree_inode->i_mapping, GFP_NOFS); @@ -158,10 +157,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, wake_up(&cur_trans->writer_wait); if (cur_trans->in_commit && throttle) { - int ret; + DEFINE_WAIT(wait); mutex_unlock(&root->fs_info->trans_mutex); - ret = wait_for_commit(root, cur_trans); - BUG_ON(ret); + prepare_to_wait(&root->fs_info->transaction_throttle, &wait, + TASK_UNINTERRUPTIBLE); + schedule(); + finish_wait(&root->fs_info->transaction_throttle, &wait); mutex_lock(&root->fs_info->trans_mutex); } @@ -486,58 +487,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, return ret; } -int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - struct btrfs_transaction *cur_trans = trans->transaction; - struct inode *inode; - u64 root_objectid = 0; - u64 objectid = 0; - int ret; - - atomic_inc(&root->fs_info->throttles); - while(1) { - ret = btrfs_find_first_ordered_inode( - &cur_trans->ordered_inode_tree, - &root_objectid, &objectid, &inode); - if (!ret) - break; - - mutex_unlock(&root->fs_info->trans_mutex); - - if (S_ISREG(inode->i_mode)) { - atomic_inc(&BTRFS_I(inode)->ordered_writeback); - filemap_fdatawrite(inode->i_mapping); - atomic_dec(&BTRFS_I(inode)->ordered_writeback); - } - iput(inode); - - mutex_lock(&root->fs_info->trans_mutex); - } - while(1) { - root_objectid = 0; - objectid = 0; - ret = btrfs_find_del_first_ordered_inode( - &cur_trans->ordered_inode_tree, - &root_objectid, &objectid, &inode); - if (!ret) - break; - mutex_unlock(&root->fs_info->trans_mutex); - - if (S_ISREG(inode->i_mode)) { - atomic_inc(&BTRFS_I(inode)->ordered_writeback); - filemap_write_and_wait(inode->i_mapping); - atomic_dec(&BTRFS_I(inode)->ordered_writeback); - } - atomic_dec(&inode->i_count); - iput(inode); - - mutex_lock(&root->fs_info->trans_mutex); - } - atomic_dec(&root->fs_info->throttles); - return 0; -} - static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_pending_snapshot *pending) @@ -666,6 +615,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, extent_io_tree_init(pinned_copy, root->fs_info->btree_inode->i_mapping, GFP_NOFS); +printk("commit trans %Lu\n", trans->transid); trans->transaction->in_commit = 1; cur_trans = trans->transaction; if (cur_trans->list.prev != &root->fs_info->trans_list) { @@ -699,8 +649,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - ret = btrfs_write_ordered_inodes(trans, root); - } while (cur_trans->num_writers > 1 || (cur_trans->num_joined != joined)); @@ -736,6 +684,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_copy_pinned(root, pinned_copy); + wake_up(&root->fs_info->transaction_throttle); + mutex_unlock(&root->fs_info->trans_mutex); ret = btrfs_write_and_wait_transaction(trans, root); BUG_ON(ret); @@ -758,6 +708,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); mutex_unlock(&root->fs_info->trans_mutex); +printk("done commit trans %Lu\n", trans->transid); kmem_cache_free(btrfs_trans_handle_cachep, trans); if (root->fs_info->closing) { diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 9ccd5a5b170..910350cd4cf 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -19,7 +19,6 @@ #ifndef __BTRFS_TRANSACTION__ #define __BTRFS_TRANSACTION__ #include "btrfs_inode.h" -#include "ordered-data.h" struct btrfs_transaction { u64 transid; @@ -31,7 +30,6 @@ struct btrfs_transaction { struct list_head list; struct extent_io_tree dirty_pages; unsigned long start_time; - struct btrfs_ordered_inode_tree ordered_inode_tree; wait_queue_head_t writer_wait; wait_queue_head_t commit_wait; struct list_head pending_snapshots; @@ -88,8 +86,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); int btrfs_clean_old_snapshots(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans, - struct btrfs_root *root); int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, struct btrfs_root *root); #endif -- cgit v1.2.3-70-g09d2 From 247e743cbe6e655768c3679f84821e03c1577902 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 17 Jul 2008 12:53:51 -0400 Subject: Btrfs: Use async helpers to deal with pages that have been improperly dirtied Higher layers sometimes call set_page_dirty without asking the filesystem to help. This causes many problems for the data=ordered and cow code. This commit detects pages that haven't been properly setup for IO and kicks off an async helper to deal with them. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 6 ++++ fs/btrfs/disk-io.c | 4 +++ fs/btrfs/extent_io.c | 10 ++++++ fs/btrfs/extent_io.h | 1 + fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++----- 6 files changed, 106 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ceebc052ddc..4ddc8a8f82c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -546,6 +546,12 @@ struct btrfs_fs_info { struct btrfs_workers endio_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; + /* + * fixup workers take dirty pages that didn't properly go through + * the cow mechanism and make them safe to write. It happens + * for the sys_munmap function call path + */ + struct btrfs_workers fixup_workers; struct task_struct *transaction_kthread; struct task_struct *cleaner_kthread; int thread_pool_size; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4a5ebafb935..66466d125c0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1329,11 +1329,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->fixup_workers, 1); btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); + btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); @@ -1454,6 +1456,7 @@ fail_tree_root: fail_sys_array: fail_sb_buffer: extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); + btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); @@ -1710,6 +1713,7 @@ int close_ctree(struct btrfs_root *root) truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); + btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3f82a6e9ca4..feff16cb9b4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2050,6 +2050,16 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, lock_extent(tree, start, page_end, GFP_NOFS); unlock_start = start; + if (tree->ops && tree->ops->writepage_start_hook) { + ret = tree->ops->writepage_start_hook(page, start, page_end); + if (ret == -EAGAIN) { + unlock_extent(tree, start, page_end, GFP_NOFS); + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; + } + } + end = page_end; if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { printk("found delalloc bits after lock_extent\n"); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 2268a799589..23affd27af5 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -30,6 +30,7 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, struct bio *bio, int mirror_num); struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); + int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; int (*merge_bio_hook)(struct page *page, unsigned long offset, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 12e765f7e0d..20928639d17 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -313,6 +313,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; SetPageUptodate(p); + ClearPageChecked(p); set_page_dirty(p); } } else { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c5a62f0b959..47a008c1930 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -403,6 +403,87 @@ static int add_pending_csums(struct btrfs_trans_handle *trans, return 0; } +struct btrfs_writepage_fixup { + struct page *page; + struct btrfs_work work; +}; + +/* see btrfs_writepage_start_hook for details on why this is required */ +void btrfs_writepage_fixup_worker(struct btrfs_work *work) +{ + struct btrfs_writepage_fixup *fixup; + struct btrfs_ordered_extent *ordered; + struct page *page; + struct inode *inode; + u64 page_start; + u64 page_end; + + fixup = container_of(work, struct btrfs_writepage_fixup, work); + page = fixup->page; + + lock_page(page); + if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { + ClearPageChecked(page); + goto out_page; + } + + inode = page->mapping->host; + page_start = page_offset(page); + page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; + + lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); + ordered = btrfs_lookup_ordered_extent(inode, page_start); + if (ordered) + goto out; + + set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start, page_end, + GFP_NOFS); + ClearPageChecked(page); +out: + unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); +out_page: + unlock_page(page); + page_cache_release(page); +} + +/* + * There are a few paths in the higher layers of the kernel that directly + * set the page dirty bit without asking the filesystem if it is a + * good idea. This causes problems because we want to make sure COW + * properly happens and the data=ordered rules are followed. + * + * In our case any range that doesn't have the EXTENT_ORDERED bit set + * hasn't been properly setup for IO. We kick off an async process + * to fix it up. The async helper will wait for ordered extents, set + * the delalloc bit and make it safe to write the page. + */ +int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) +{ + struct inode *inode = page->mapping->host; + struct btrfs_writepage_fixup *fixup; + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret; + + ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end, + EXTENT_ORDERED, 0); + if (ret) + return 0; + + if (PageChecked(page)) + return -EAGAIN; + + fixup = kzalloc(sizeof(*fixup), GFP_NOFS); + if (!fixup) + return -EAGAIN; +printk("queueing worker to fixup page %lu %Lu\n", inode->i_ino, page_offset(page)); + SetPageChecked(page); + page_cache_get(page); + fixup->work.func = btrfs_writepage_fixup_worker; + fixup->page = page; + btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); + return -EAGAIN; +} + int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { @@ -1263,6 +1344,7 @@ again: flush_dcache_page(page); kunmap(page); } + ClearPageChecked(page); set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); @@ -2658,6 +2740,7 @@ again: flush_dcache_page(page); kunmap(page); } + ClearPageChecked(page); set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); @@ -3039,15 +3122,6 @@ out_fail: static int btrfs_set_page_dirty(struct page *page) { - struct inode *inode = page->mapping->host; - u64 page_start = page_offset(page); - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; - - if (!test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, - EXTENT_DELALLOC, 0)) { -printk("inode %lu page %Lu not delalloc\n", inode->i_ino, page_offset(page)); -WARN_ON(1); - } return __set_page_dirty_nobuffers(page); } @@ -3098,6 +3172,7 @@ static struct extent_io_ops btrfs_extent_io_ops = { .readpage_io_hook = btrfs_readpage_io_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, .writepage_end_io_hook = btrfs_writepage_end_io_hook, + .writepage_start_hook = btrfs_writepage_start_hook, .readpage_io_failed_hook = btrfs_io_failed_hook, .set_bit_hook = btrfs_set_bit_hook, .clear_bit_hook = btrfs_clear_bit_hook, -- cgit v1.2.3-70-g09d2 From 211f90e68b679d27fe23c5505f86d6ce62c98bae Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 18 Jul 2008 11:56:15 -0400 Subject: Btrfs: Don't allow releasepage to succeed if EXTENT_ORDERED is set Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 ++- fs/btrfs/inode.c | 14 +++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index feff16cb9b4..3153b4fdd0b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2542,7 +2542,8 @@ int try_release_extent_state(struct extent_map_tree *map, u64 end = start + PAGE_CACHE_SIZE - 1; int ret = 1; - if (test_range_bit(tree, start, end, EXTENT_IOBITS, 0)) + if (test_range_bit(tree, start, end, + EXTENT_IOBITS | EXTENT_ORDERED, 0)) ret = 0; else { if ((mask & GFP_NOFS) == GFP_NOFS) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4d729d90d2b..8803abc89bb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -477,10 +477,8 @@ printk("queueing worker to fixup page %lu %Lu\n", inode->i_ino, page_offset(page return -EAGAIN; } -int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, - struct extent_state *state, int uptodate) +static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) { - struct inode *inode = page->mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent; @@ -550,6 +548,12 @@ int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, return 0; } +int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, + struct extent_state *state, int uptodate) +{ + return btrfs_finish_ordered_io(page->mapping->host, start, end); +} + int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) { int ret = 0; @@ -2663,8 +2667,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) clear_extent_bit(tree, page_start, page_end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_LOCKED, 1, 0, GFP_NOFS); - btrfs_writepage_end_io_hook(page, page_start, - page_end, NULL, 1); + btrfs_finish_ordered_io(page->mapping->host, + page_start, page_end); btrfs_put_ordered_extent(ordered); lock_extent(tree, page_start, page_end, GFP_NOFS); } -- cgit v1.2.3-70-g09d2 From 7f3c74fb831fa19bafe087e817c0a5ff3883f1ea Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 18 Jul 2008 12:01:11 -0400 Subject: Btrfs: Keep extent mappings in ram until pending ordered extents are done It was possible for stale mappings from disk to be used instead of the new pending ordered extent. This adds a flag to the extent map struct to keep it pinned until the pending ordered extent is actually on disk. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 27 +++++++++++++++------------ fs/btrfs/extent_map.c | 4 ++++ fs/btrfs/extent_map.h | 3 +++ fs/btrfs/file-item.c | 5 +---- fs/btrfs/file.c | 14 ++++++++++---- fs/btrfs/inode.c | 15 +++++++++++++++ 6 files changed, 48 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3153b4fdd0b..d4a63ae7ed1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2000,7 +2000,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, struct block_device *bdev; int ret; int nr = 0; - size_t page_offset = 0; + size_t pg_offset = 0; size_t blocksize; loff_t i_size = i_size_read(inode); unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; @@ -2008,9 +2008,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 delalloc_end; WARN_ON(!PageLocked(page)); - page_offset = i_size & (PAGE_CACHE_SIZE - 1); + pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || - (page->index == end_index && !page_offset)) { + (page->index == end_index && !pg_offset)) { page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; @@ -2020,12 +2020,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, char *userpage; userpage = kmap_atomic(page, KM_USER0); - memset(userpage + page_offset, 0, - PAGE_CACHE_SIZE - page_offset); + memset(userpage + pg_offset, 0, + PAGE_CACHE_SIZE - pg_offset); kunmap_atomic(userpage, KM_USER0); flush_dcache_page(page); } - page_offset = 0; + pg_offset = 0; set_page_extent_mapped(page); @@ -2088,7 +2088,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_start = page_end + 1; break; } - em = epd->get_extent(inode, page, page_offset, cur, + em = epd->get_extent(inode, page, pg_offset, cur, end - cur + 1, 1); if (IS_ERR(em) || !em) { SetPageError(page); @@ -2113,12 +2113,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_extent(tree, unlock_start, cur + iosize -1, GFP_NOFS); + if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, cur, cur + iosize - 1, NULL, 1); cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; unlock_start = cur; continue; } @@ -2127,7 +2128,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, if (0 && !test_range_bit(tree, cur, cur + iosize - 1, EXTENT_DIRTY, 0)) { cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; continue; } clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); @@ -2141,6 +2142,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, SetPageError(page); } else { unsigned long max_nr = end_index + 1; + set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { printk("warning page %lu not writeback, " @@ -2150,14 +2152,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, } ret = submit_extent_page(WRITE, tree, page, sector, - iosize, page_offset, bdev, + iosize, pg_offset, bdev, &epd->bio, max_nr, end_bio_extent_writepage, 0); if (ret) SetPageError(page); } cur = cur + iosize; - page_offset += iosize; + pg_offset += iosize; nr++; } done: @@ -2579,7 +2581,8 @@ int try_release_extent_mapping(struct extent_map_tree *map, spin_unlock(&map->lock); break; } - if (em->start != start) { + if (test_bit(EXTENT_FLAG_PINNED, &em->flags) || + em->start != start) { spin_unlock(&map->lock); free_extent_map(em); break; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 81123277c2b..71b1ac15535 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -173,6 +173,9 @@ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) static int mergable_maps(struct extent_map *prev, struct extent_map *next) { + if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) + return 0; + if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && @@ -320,6 +323,7 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { int ret = 0; + WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); BUG_ON(spin_trylock(&tree->lock)); rb_erase(&em->rb_node, &tree->map); em->in_tree = 0; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 56314217cfc..a3978ec2784 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -8,6 +8,9 @@ #define EXTENT_MAP_INLINE (u64)-2 #define EXTENT_MAP_DELALLOC (u64)-1 +/* bits for the flags field */ +#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ + struct extent_map { struct rb_node rb_node; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index e02f1e5acb0..d9c69e16d36 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -192,7 +192,6 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, (char *)§or_sum->sum); sector_sum->offset = page_offset(bvec->bv_page) + bvec->bv_offset; - sector_sum++; bio_index++; total_bytes += bvec->bv_len; @@ -201,9 +200,6 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, } btrfs_add_ordered_sum(inode, ordered, sums); btrfs_put_ordered_extent(ordered); - if (total_bytes != bio->bi_size) { -printk("warning, total bytes %lu bio size %u\n", total_bytes, bio->bi_size); - } return 0; } @@ -372,6 +368,7 @@ next_sector: write_extent_buffer(leaf, §or_sum->sum, (unsigned long)item, BTRFS_CRC32_SIZE); } + total_bytes += root->sectorsize; sector_sum++; if (total_bytes < sums->len) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 40ad1b2958c..eccdb9562ba 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -358,9 +358,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) struct extent_map *split = NULL; struct extent_map *split2 = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - struct extent_map *tmp; u64 len = end - start + 1; - u64 next_start; int ret; int testend = 1; @@ -381,8 +379,16 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) spin_unlock(&em_tree->lock); break; } - tmp = rb_entry(&em->rb_node, struct extent_map, rb_node); - next_start = tmp->start; + if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) { + start = em->start + em->len; + free_extent_map(em); + spin_unlock(&em_tree->lock); + if (start < end) { + len = end - start + 1; + continue; + } + break; + } remove_extent_mapping(em_tree, em); if (em->block_start < EXTENT_MAP_LAST_BYTE && diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8803abc89bb..08dbe738b51 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -144,6 +144,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) em->len = ins.offset; em->block_start = ins.objectid; em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); while(1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -483,6 +484,8 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map *em; u64 alloc_hint = 0; struct list_head list; struct btrfs_key ins; @@ -524,6 +527,17 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len, ordered_extent->len, 0); BUG_ON(ret); + + + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, ordered_extent->file_offset, + ordered_extent->len); + if (em) { + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + free_extent_map(em); + } + spin_unlock(&em_tree->lock); + btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1); @@ -538,6 +552,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_ordered_update_i_size(inode, ordered_extent); btrfs_remove_ordered_extent(inode, ordered_extent); + /* once for us */ btrfs_put_ordered_extent(ordered_extent); /* once for the tree */ -- cgit v1.2.3-70-g09d2 From 6af118ce51b52ceda357c671550c79628b9c4a65 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Jul 2008 11:18:07 -0400 Subject: Btrfs: Index extent buffers in an rbtree Before, extent buffers were a temporary object, meant to map a number of pages at once and collect operations on them. But, a few extra fields have crept in, and they are also the best place to store a per-tree block lock field as well. This commit puts the extent buffers into an rbtree, and ensures a single extent buffer for each tree block. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 26 ++--- fs/btrfs/extent_io.c | 309 +++++++++++++++++++-------------------------------- fs/btrfs/extent_io.h | 11 +- fs/btrfs/inode.c | 3 - 4 files changed, 129 insertions(+), 220 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 99bb385c298..86e84a8579e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -381,7 +381,6 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, end = min_t(u64, eb->len, PAGE_CACHE_SIZE); end = eb->start + end - 1; - release_extent_buffer_tail_pages(eb); err: free_extent_buffer(eb); out: @@ -563,21 +562,21 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) struct extent_map_tree *map; int ret; - if (page_count(page) > 3) { - /* once for page->private, once for the caller, once - * once for the page cache - */ - return 0; - } tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; + ret = try_release_extent_state(map, tree, page, gfp_flags); + if (!ret) { + return 0; + } + + ret = try_release_extent_buffer(tree, page); if (ret == 1) { - invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); } + return ret; } @@ -588,7 +587,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { - invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); + printk("warning page private not zero on page %Lu\n", + page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -1456,7 +1456,6 @@ fail_tree_root: free_extent_buffer(tree_root->node); fail_sys_array: fail_sb_buffer: - extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); btrfs_stop_workers(&fs_info->fixup_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); @@ -1705,13 +1704,6 @@ int close_ctree(struct btrfs_root *root) filemap_write_and_wait(fs_info->btree_inode->i_mapping); - extent_io_tree_empty_lru(&fs_info->free_space_cache); - extent_io_tree_empty_lru(&fs_info->block_group_cache); - extent_io_tree_empty_lru(&fs_info->pinned_extents); - extent_io_tree_empty_lru(&fs_info->pending_del); - extent_io_tree_empty_lru(&fs_info->extent_ins); - extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree); - truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); btrfs_stop_workers(&fs_info->fixup_workers); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d4a63ae7ed1..32bb4ed3723 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -91,29 +91,16 @@ void extent_io_tree_init(struct extent_io_tree *tree, struct address_space *mapping, gfp_t mask) { tree->state.rb_node = NULL; + tree->buffer.rb_node = NULL; tree->ops = NULL; tree->dirty_bytes = 0; spin_lock_init(&tree->lock); - spin_lock_init(&tree->lru_lock); + spin_lock_init(&tree->buffer_lock); tree->mapping = mapping; - INIT_LIST_HEAD(&tree->buffer_lru); - tree->lru_size = 0; tree->last = NULL; } EXPORT_SYMBOL(extent_io_tree_init); -void extent_io_tree_empty_lru(struct extent_io_tree *tree) -{ - struct extent_buffer *eb; - while(!list_empty(&tree->buffer_lru)) { - eb = list_entry(tree->buffer_lru.next, struct extent_buffer, - lru); - list_del_init(&eb->lru); - free_extent_buffer(eb); - } -} -EXPORT_SYMBOL(extent_io_tree_empty_lru); - struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; @@ -245,6 +232,50 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, return ret; } +static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, + u64 offset, struct rb_node *node) +{ + struct rb_root *root = &tree->buffer; + struct rb_node ** p = &root->rb_node; + struct rb_node * parent = NULL; + struct extent_buffer *eb; + + while(*p) { + parent = *p; + eb = rb_entry(parent, struct extent_buffer, rb_node); + + if (offset < eb->start) + p = &(*p)->rb_left; + else if (offset > eb->start) + p = &(*p)->rb_right; + else + return eb; + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); + return NULL; +} + +static struct extent_buffer *buffer_search(struct extent_io_tree *tree, + u64 offset) +{ + struct rb_root *root = &tree->buffer; + struct rb_node * n = root->rb_node; + struct extent_buffer *eb; + + while(n) { + eb = rb_entry(n, struct extent_buffer, rb_node); + if (offset < eb->start) + n = n->rb_left; + else if (offset > eb->start) + n = n->rb_right; + else + return eb; + } + return NULL; +} + /* * utility function to look for merge candidates inside a given range. * Any extents with matching state are merged together into a single @@ -1817,9 +1848,8 @@ void set_page_extent_mapped(struct page *page) { if (!PagePrivate(page)) { SetPagePrivate(page); - WARN_ON(!page->mapping->a_ops->invalidatepage); - set_page_private(page, EXTENT_PAGE_PRIVATE); page_cache_get(page); + set_page_private(page, EXTENT_PAGE_PRIVATE); } } @@ -2627,51 +2657,6 @@ out: return sector; } -static int add_lru(struct extent_io_tree *tree, struct extent_buffer *eb) -{ - if (list_empty(&eb->lru)) { - extent_buffer_get(eb); - list_add(&eb->lru, &tree->buffer_lru); - tree->lru_size++; - if (tree->lru_size >= BUFFER_LRU_MAX) { - struct extent_buffer *rm; - rm = list_entry(tree->buffer_lru.prev, - struct extent_buffer, lru); - tree->lru_size--; - list_del_init(&rm->lru); - free_extent_buffer(rm); - } - } else - list_move(&eb->lru, &tree->buffer_lru); - return 0; -} -static struct extent_buffer *find_lru(struct extent_io_tree *tree, - u64 start, unsigned long len) -{ - struct list_head *lru = &tree->buffer_lru; - struct list_head *cur = lru->next; - struct extent_buffer *eb; - - if (list_empty(lru)) - return NULL; - - do { - eb = list_entry(cur, struct extent_buffer, lru); - if (eb->start == start && eb->len == len) { - extent_buffer_get(eb); - return eb; - } - cur = cur->next; - } while (cur != lru); - return NULL; -} - -static inline unsigned long num_extent_pages(u64 start, u64 len) -{ - return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - - (start >> PAGE_CACHE_SHIFT); -} - static inline struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i) { @@ -2688,44 +2673,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, return p; } -int release_extent_buffer_tail_pages(struct extent_buffer *eb) -{ - unsigned long num_pages = num_extent_pages(eb->start, eb->len); - struct page *page; - unsigned long i; - - if (num_pages == 1) - return 0; - for (i = 1; i < num_pages; i++) { - page = extent_buffer_page(eb, i); - page_cache_release(page); - } - return 0; -} - - -int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, - unsigned long len) +static inline unsigned long num_extent_pages(u64 start, u64 len) { - struct list_head *lru = &tree->buffer_lru; - struct list_head *cur = lru->next; - struct extent_buffer *eb; - int found = 0; - - spin_lock(&tree->lru_lock); - if (list_empty(lru)) - goto out; - - do { - eb = list_entry(cur, struct extent_buffer, lru); - if (eb->start <= start && eb->start + eb->len > start) { - eb->flags &= ~EXTENT_UPTODATE; - } - cur = cur->next; - } while (cur != lru); -out: - spin_unlock(&tree->lru_lock); - return found; + return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + (start >> PAGE_CACHE_SHIFT); } static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, @@ -2736,15 +2687,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *eb = NULL; unsigned long flags; - spin_lock(&tree->lru_lock); - eb = find_lru(tree, start, len); - spin_unlock(&tree->lru_lock); - if (eb) { - return eb; - } - eb = kmem_cache_zalloc(extent_buffer_cache, mask); - INIT_LIST_HEAD(&eb->lru); eb->start = start; eb->len = len; spin_lock_irqsave(&leak_lock, flags); @@ -2773,17 +2716,24 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT; struct extent_buffer *eb; + struct extent_buffer *exists = NULL; struct page *p; struct address_space *mapping = tree->mapping; int uptodate = 1; + spin_lock(&tree->buffer_lock); + eb = buffer_search(tree, start); + if (eb) { + atomic_inc(&eb->refs); + spin_unlock(&tree->buffer_lock); + return eb; + } + spin_unlock(&tree->buffer_lock); + eb = __alloc_extent_buffer(tree, start, len, mask); if (!eb) return NULL; - if (eb->flags & EXTENT_BUFFER_FILLED) - goto lru_add; - if (page0) { eb->first_page = page0; i = 1; @@ -2800,7 +2750,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); if (!p) { WARN_ON(1); - goto fail; + goto free_eb; } set_page_extent_mapped(p); mark_page_accessed(p); @@ -2818,25 +2768,28 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, eb->flags |= EXTENT_UPTODATE; eb->flags |= EXTENT_BUFFER_FILLED; -lru_add: - spin_lock(&tree->lru_lock); - add_lru(tree, eb); - spin_unlock(&tree->lru_lock); + spin_lock(&tree->buffer_lock); + exists = buffer_tree_insert(tree, start, &eb->rb_node); + if (exists) { + /* add one reference for the caller */ + atomic_inc(&exists->refs); + spin_unlock(&tree->buffer_lock); + goto free_eb; + } + spin_unlock(&tree->buffer_lock); + + /* add one reference for the tree */ + atomic_inc(&eb->refs); return eb; -fail: - spin_lock(&tree->lru_lock); - list_del_init(&eb->lru); - spin_unlock(&tree->lru_lock); +free_eb: if (!atomic_dec_and_test(&eb->refs)) - return NULL; - for (index = 1; index < i; index++) { + return exists; + for (index = 1; index < i; index++) page_cache_release(extent_buffer_page(eb, index)); - } - if (i > 0) - page_cache_release(extent_buffer_page(eb, 0)); + page_cache_release(extent_buffer_page(eb, 0)); __free_extent_buffer(eb); - return NULL; + return exists; } EXPORT_SYMBOL(alloc_extent_buffer); @@ -2844,89 +2797,27 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, gfp_t mask) { - unsigned long num_pages = num_extent_pages(start, len); - unsigned long i; - unsigned long index = start >> PAGE_CACHE_SHIFT; struct extent_buffer *eb; - struct page *p; - struct address_space *mapping = tree->mapping; - int uptodate = 1; - eb = __alloc_extent_buffer(tree, start, len, mask); - if (!eb) - return NULL; - - if (eb->flags & EXTENT_BUFFER_FILLED) - goto lru_add; - - for (i = 0; i < num_pages; i++, index++) { - p = find_get_page(mapping, index); - if (!p) { - goto fail; - } - if (TestSetPageLocked(p)) { - page_cache_release(p); - goto fail; - } - - set_page_extent_mapped(p); - mark_page_accessed(p); - - if (i == 0) { - eb->first_page = p; - set_page_extent_head(p, len); - } else { - set_page_private(p, EXTENT_PAGE_PRIVATE); - } - - if (!PageUptodate(p)) - uptodate = 0; - unlock_page(p); - } - if (uptodate) - eb->flags |= EXTENT_UPTODATE; - eb->flags |= EXTENT_BUFFER_FILLED; + spin_lock(&tree->buffer_lock); + eb = buffer_search(tree, start); + if (eb) + atomic_inc(&eb->refs); + spin_unlock(&tree->buffer_lock); -lru_add: - spin_lock(&tree->lru_lock); - add_lru(tree, eb); - spin_unlock(&tree->lru_lock); return eb; -fail: - spin_lock(&tree->lru_lock); - list_del_init(&eb->lru); - spin_unlock(&tree->lru_lock); - if (!atomic_dec_and_test(&eb->refs)) - return NULL; - for (index = 1; index < i; index++) { - page_cache_release(extent_buffer_page(eb, index)); - } - if (i > 0) - page_cache_release(extent_buffer_page(eb, 0)); - __free_extent_buffer(eb); - return NULL; } EXPORT_SYMBOL(find_extent_buffer); void free_extent_buffer(struct extent_buffer *eb) { - unsigned long i; - unsigned long num_pages; - if (!eb) return; if (!atomic_dec_and_test(&eb->refs)) return; - WARN_ON(!list_empty(&eb->lru)); - num_pages = num_extent_pages(eb->start, eb->len); - - for (i = 1; i < num_pages; i++) { - page_cache_release(extent_buffer_page(eb, i)); - } - page_cache_release(extent_buffer_page(eb, 0)); - __free_extent_buffer(eb); + WARN_ON(1); } EXPORT_SYMBOL(free_extent_buffer); @@ -3583,3 +3474,35 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, } } EXPORT_SYMBOL(memmove_extent_buffer); + +int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) +{ + u64 start = page_offset(page); + struct extent_buffer *eb; + int ret = 1; + unsigned long i; + unsigned long num_pages; + + spin_lock(&tree->buffer_lock); + eb = buffer_search(tree, start); + if (!eb) + goto out; + + if (atomic_read(&eb->refs) > 1) { + ret = 0; + goto out; + } + /* at this point we can safely release the extent buffer */ + num_pages = num_extent_pages(eb->start, eb->len); + for (i = 0; i < num_pages; i++) { + struct page *page = extent_buffer_page(eb, i); + page_cache_release(page); + } + rb_erase(&eb->rb_node, &tree->buffer); + __free_extent_buffer(eb); +out: + spin_unlock(&tree->buffer_lock); + return ret; +} +EXPORT_SYMBOL(try_release_extent_buffer); + diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 23affd27af5..dd367617d78 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -54,13 +54,12 @@ struct extent_io_ops { struct extent_io_tree { struct rb_root state; + struct rb_root buffer; struct address_space *mapping; u64 dirty_bytes; spinlock_t lock; + spinlock_t buffer_lock; struct extent_io_ops *ops; - spinlock_t lru_lock; - struct list_head buffer_lru; - int lru_size; struct extent_state *last; }; @@ -87,10 +86,10 @@ struct extent_buffer { unsigned long map_start; unsigned long map_len; struct page *first_page; - struct list_head lru; atomic_t refs; int flags; struct list_head leak_list; + struct rb_node rb_node; }; struct extent_map_tree; @@ -112,10 +111,10 @@ typedef struct extent_map *(get_extent_t)(struct inode *inode, void extent_io_tree_init(struct extent_io_tree *tree, struct address_space *mapping, gfp_t mask); -void extent_io_tree_empty_lru(struct extent_io_tree *tree); int try_release_extent_mapping(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); +int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page); int try_release_extent_state(struct extent_map_tree *map, struct extent_io_tree *tree, struct page *page, gfp_t mask); @@ -241,8 +240,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, unsigned long *map_start, unsigned long *map_len, int km); void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); -int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, - unsigned long len); int release_extent_buffer_tail_pages(struct extent_buffer *eb); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8fb6dc25e7a..60852ada658 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2670,7 +2670,6 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_mapping(map, tree, page, gfp_flags); if (ret == 1) { - invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -2721,8 +2720,6 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset) ClearPageChecked(page); if (PagePrivate(page)) { - invalidate_extent_lru(tree, page_offset(page), - PAGE_CACHE_SIZE); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); -- cgit v1.2.3-70-g09d2 From a61e6f29dc7c9d56a776a518eed92bbc61848263 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Jul 2008 11:18:08 -0400 Subject: Btrfs: Use a mutex in the extent buffer for tree block locking This replaces the use of the page cache lock bit for locking, which wasn't suitable for block size < page size and couldn't be used recursively. The mutexes alone don't fix either problem, but they are the first step. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 7 +------ fs/btrfs/extent_io.c | 9 +++++++++ fs/btrfs/extent_io.h | 1 + fs/btrfs/locking.c | 13 ++++++------- 4 files changed, 17 insertions(+), 13 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ccd49322f79..c51cd11de20 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1451,7 +1451,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, struct extent_buffer *buf; buf = btrfs_find_tree_block(root, bytenr, num_bytes); if (buf) { - if (!btrfs_try_tree_lock(buf) && + if (btrfs_try_tree_lock(buf) && btrfs_buffer_uptodate(buf, 0)) { u64 transid = root->fs_info->running_transaction->transid; @@ -3345,11 +3345,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) set_state_private(block_group_cache, found_key.objectid, (unsigned long)cache); - /* hack for now */ - if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) { - cache_block_group(root->fs_info->extent_root, - cache); - } if (key.objectid >= btrfs_super_total_bytes(&info->super_copy)) break; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 32bb4ed3723..7380449cb5b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2690,6 +2690,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, eb = kmem_cache_zalloc(extent_buffer_cache, mask); eb->start = start; eb->len = len; + mutex_init(&eb->mutex); spin_lock_irqsave(&leak_lock, flags); list_add(&eb->leak_list, &buffers); spin_unlock_irqrestore(&leak_lock, flags); @@ -2837,6 +2838,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); + lock_page(page); if (i == 0) set_page_extent_head(page, eb->len); else @@ -2854,6 +2856,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, end = start + PAGE_CACHE_SIZE - 1; if (test_range_bit(tree, start, end, EXTENT_DIRTY, 0)) { + unlock_page(page); continue; } } @@ -2865,6 +2868,7 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, PAGECACHE_TAG_DIRTY); } read_unlock_irq(&page->mapping->tree_lock); + unlock_page(page); } return 0; } @@ -2893,12 +2897,17 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, * on us if the page isn't already dirty. */ if (i == 0) { + lock_page(page); set_page_extent_head(page, eb->len); } else if (PagePrivate(page) && page->private != EXTENT_PAGE_PRIVATE) { + lock_page(page); set_page_extent_mapped(page); + unlock_page(page); } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); + if (i == 0) + unlock_page(page); } return set_extent_dirty(tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index dd367617d78..6c03e6a1993 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -90,6 +90,7 @@ struct extent_buffer { int flags; struct list_head leak_list; struct rb_node rb_node; + struct mutex mutex; }; struct extent_map_tree; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 058a506a0dd..01a9ac2be3f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -29,32 +29,31 @@ int btrfs_tree_lock(struct extent_buffer *eb) { int i; - if (!TestSetPageLocked(eb->first_page)) + if (mutex_trylock(&eb->mutex)) return 0; for (i = 0; i < 512; i++) { cpu_relax(); - if (!TestSetPageLocked(eb->first_page)) + if (mutex_trylock(&eb->mutex)) return 0; } cpu_relax(); - lock_page(eb->first_page); + mutex_lock(&eb->mutex); return 0; } int btrfs_try_tree_lock(struct extent_buffer *eb) { - return TestSetPageLocked(eb->first_page); + return mutex_trylock(&eb->mutex); } int btrfs_tree_unlock(struct extent_buffer *eb) { - WARN_ON(!PageLocked(eb->first_page)); - unlock_page(eb->first_page); + mutex_unlock(&eb->mutex); return 0; } int btrfs_tree_locked(struct extent_buffer *eb) { - return PageLocked(eb->first_page); + return mutex_is_locked(&eb->mutex); } -- cgit v1.2.3-70-g09d2 From f421950f86bf96a11fef932e167ab2e70d4c43a0 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 22 Jul 2008 11:18:09 -0400 Subject: Btrfs: Fix some data=ordered related data corruptions Stress testing was showing data checksum errors, most of which were caused by a lookup bug in the extent_map tree. The tree was caching the last pointer returned, and searches would check the last pointer first. But, search callers also expect the search to return the very first matching extent in the range, which wasn't always true with the last pointer usage. For now, the code to cache the last return value is just removed. It is easy to fix, but I think lookups are rare enough that it isn't required anymore. This commit also replaces do_sync_mapping_range with a local copy of the related functions. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 + fs/btrfs/extent_io.c | 20 --------- fs/btrfs/extent_io.h | 1 - fs/btrfs/extent_map.c | 9 ---- fs/btrfs/extent_map.h | 1 - fs/btrfs/file.c | 15 +++---- fs/btrfs/inode.c | 54 +++++++++++++---------- fs/btrfs/ordered-data.c | 115 ++++++++++++++++++++++++++++++++++++++++-------- fs/btrfs/ordered-data.h | 4 ++ fs/btrfs/transaction.c | 2 - 10 files changed, 140 insertions(+), 83 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 96ab2797c09..f8fccdac305 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1590,6 +1590,8 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); /* inode.c */ +int btrfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_root *new_root, struct btrfs_trans_handle *trans, u64 new_dirid, struct btrfs_block_group_cache *block_group); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7380449cb5b..9965993748d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -97,7 +97,6 @@ void extent_io_tree_init(struct extent_io_tree *tree, spin_lock_init(&tree->lock); spin_lock_init(&tree->buffer_lock); tree->mapping = mapping; - tree->last = NULL; } EXPORT_SYMBOL(extent_io_tree_init); @@ -173,12 +172,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, struct tree_entry *entry; struct tree_entry *prev_entry = NULL; - if (tree->last) { - struct extent_state *state; - state = tree->last; - if (state->start <= offset && offset <= state->end) - return &tree->last->rb_node; - } while(n) { entry = rb_entry(n, struct tree_entry, rb_node); prev = n; @@ -189,7 +182,6 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, else if (offset > entry->end) n = n->rb_right; else { - tree->last = rb_entry(n, struct extent_state, rb_node); return n; } } @@ -223,10 +215,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, ret = __etree_search(tree, offset, &prev, NULL); if (!ret) { - if (prev) { - tree->last = rb_entry(prev, struct extent_state, - rb_node); - } return prev; } return ret; @@ -301,8 +289,6 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { state->start = other->start; other->tree = NULL; - if (tree->last == other) - tree->last = state; rb_erase(&other->rb_node, &tree->state); free_extent_state(other); } @@ -314,8 +300,6 @@ static int merge_state(struct extent_io_tree *tree, other->state == state->state) { other->start = state->start; state->tree = NULL; - if (tree->last == state) - tree->last = other; rb_erase(&state->rb_node, &tree->state); free_extent_state(state); } @@ -378,7 +362,6 @@ static int insert_state(struct extent_io_tree *tree, return -EEXIST; } state->tree = tree; - tree->last = state; merge_state(tree, state); return 0; } @@ -444,9 +427,6 @@ static int clear_state_bit(struct extent_io_tree *tree, if (delete || state->state == 0) { if (state->tree) { clear_state_cb(tree, state, state->state); - if (tree->last == state) { - tree->last = extent_state_next(state); - } rb_erase(&state->rb_node, &tree->state); state->tree = NULL; free_extent_state(state); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6c03e6a1993..315cfceae31 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -60,7 +60,6 @@ struct extent_io_tree { spinlock_t lock; spinlock_t buffer_lock; struct extent_io_ops *ops; - struct extent_state *last; }; struct extent_state { diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 71b1ac15535..8a502ee2f23 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -42,7 +42,6 @@ void extent_map_exit(void) void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) { tree->map.rb_node = NULL; - tree->last = NULL; spin_lock_init(&tree->lock); } EXPORT_SYMBOL(extent_map_tree_init); @@ -239,7 +238,6 @@ int add_extent_mapping(struct extent_map_tree *tree, merge->in_tree = 0; free_extent_map(merge); } - tree->last = em; out: return ret; } @@ -273,10 +271,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 end = range_end(start, len); BUG_ON(spin_trylock(&tree->lock)); - em = tree->last; - if (em && end > em->start && start < extent_map_end(em)) - goto found; - rb_node = __tree_search(&tree->map, start, &prev, &next); if (!rb_node && prev) { em = rb_entry(prev, struct extent_map, rb_node); @@ -305,7 +299,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, found: atomic_inc(&em->refs); - tree->last = em; out: return em; } @@ -327,8 +320,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) BUG_ON(spin_trylock(&tree->lock)); rb_erase(&em->rb_node, &tree->map); em->in_tree = 0; - if (tree->last == em) - tree->last = NULL; return ret; } EXPORT_SYMBOL(remove_extent_mapping); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index a3978ec2784..26ac6fe0b26 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -26,7 +26,6 @@ struct extent_map { struct extent_map_tree { struct rb_root map; - struct extent_map *last; spinlock_t lock; }; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 591a30208ac..e5ffb66ad32 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -381,14 +381,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) break; } if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) { - start = em->start + em->len; - free_extent_map(em); - spin_unlock(&em_tree->lock); - if (start < end) { - len = end - start + 1; - continue; - } - break; + printk(KERN_CRIT "inode %lu trying to drop pinned " + "extent start %llu end %llu, em [%llu %llu]\n", + inode->i_ino, + (unsigned long long)start, + (unsigned long long)end, + (unsigned long long)em->start, + (unsigned long long)em->len); } remove_extent_mapping(em_tree, em); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 60852ada658..3da12a4d913 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -485,7 +485,7 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) fixup = kzalloc(sizeof(*fixup), GFP_NOFS); if (!fixup) return -EAGAIN; -printk("queueing worker to fixup page %lu %Lu\n", inode->i_ino, page_offset(page)); + SetPageChecked(page); page_cache_get(page); fixup->work.func = btrfs_writepage_fixup_worker; @@ -502,11 +502,13 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; + struct extent_map *em_orig; u64 alloc_hint = 0; u64 clear_start; u64 clear_end; struct list_head list; struct btrfs_key ins; + struct rb_node *rb; int ret; ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); @@ -535,6 +537,22 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) mutex_lock(&BTRFS_I(inode)->extent_mutex); + spin_lock(&em_tree->lock); + clear_start = ordered_extent->file_offset; + clear_end = ordered_extent->file_offset + ordered_extent->len; + em = lookup_extent_mapping(em_tree, clear_start, + ordered_extent->len); + em_orig = em; + while(em && clear_start < extent_map_end(em) && clear_end > em->start) { + clear_bit(EXTENT_FLAG_PINNED, &em->flags); + rb = rb_next(&em->rb_node); + if (!rb) + break; + em = rb_entry(rb, struct extent_map, rb_node); + } + free_extent_map(em_orig); + spin_unlock(&em_tree->lock); + ret = btrfs_drop_extents(trans, root, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -548,22 +566,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len, 0); BUG_ON(ret); - spin_lock(&em_tree->lock); - clear_start = ordered_extent->file_offset; - clear_end = ordered_extent->file_offset + ordered_extent->len; - while(clear_start < clear_end) { - em = lookup_extent_mapping(em_tree, clear_start, - clear_end - clear_start); - if (em) { - clear_bit(EXTENT_FLAG_PINNED, &em->flags); - clear_start = em->start + em->len; - free_extent_map(em); - } else { - break; - } - } - spin_unlock(&em_tree->lock); - btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1); @@ -2318,7 +2320,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, u64 extent_end = 0; u64 objectid = inode->i_ino; u32 found_type; - struct btrfs_path *path; + struct btrfs_path *path = NULL; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_file_extent_item *item; struct extent_buffer *leaf; @@ -2328,9 +2330,6 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; - path = btrfs_alloc_path(); - BUG_ON(!path); - again: spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); @@ -2354,6 +2353,12 @@ again: em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; + + if (!path) { + path = btrfs_alloc_path(); + BUG_ON(!path); + } + ret = btrfs_lookup_file_extent(trans, root, path, objectid, start, trans != NULL); if (ret < 0) { @@ -2530,7 +2535,8 @@ insert: } spin_unlock(&em_tree->lock); out: - btrfs_free_path(path); + if (path) + btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); if (!err) { @@ -2643,8 +2649,8 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc) return extent_write_full_page(tree, page, btrfs_get_extent, wbc); } -static int btrfs_writepages(struct address_space *mapping, - struct writeback_control *wbc) +int btrfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) { struct extent_io_tree *tree; tree = &BTRFS_I(mapping->host)->io_tree; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 0d87795fdd8..830dbaea685 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include "ctree.h" #include "transaction.h" #include "btrfs_inode.h" @@ -307,12 +309,7 @@ void btrfs_start_ordered_extent(struct inode *inode, * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start, end, SYNC_FILE_RANGE_WRITE); -#else - do_sync_mapping_range(inode->i_mapping, start, end, - SYNC_FILE_RANGE_WRITE); -#endif + btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); if (wait) wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); @@ -327,28 +324,26 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) u64 orig_end; u64 wait_end; struct btrfs_ordered_extent *ordered; - u64 mask = BTRFS_I(inode)->root->sectorsize - 1; if (start + len < start) { - wait_end = (inode->i_size + mask) & ~mask; - orig_end = (u64)-1; + orig_end = INT_LIMIT(loff_t); } else { orig_end = start + len - 1; - wait_end = orig_end; + if (orig_end > INT_LIMIT(loff_t)) + orig_end = INT_LIMIT(loff_t); } + wait_end = orig_end; again: /* start IO across the range first to instantiate any delalloc * extents */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start, wait_end, SYNC_FILE_RANGE_WRITE); -#else - do_sync_mapping_range(inode->i_mapping, start, wait_end, - SYNC_FILE_RANGE_WRITE); -#endif - end = orig_end; - wait_on_extent_writeback(&BTRFS_I(inode)->io_tree, start, orig_end); + btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); + + btrfs_wait_on_page_writeback_range(inode->i_mapping, + start >> PAGE_CACHE_SHIFT, + orig_end >> PAGE_CACHE_SHIFT); + end = orig_end; while(1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); if (!ordered) { @@ -565,3 +560,87 @@ out: return ret; } + +/** + * taken from mm/filemap.c because it isn't exported + * + * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range + * @mapping: address space structure to write + * @start: offset in bytes where the range starts + * @end: offset in bytes where the range ends (inclusive) + * @sync_mode: enable synchronous operation + * + * Start writeback against all of a mapping's dirty pages that lie + * within the byte offsets inclusive. + * + * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as + * opposed to a regular memory cleansing writeback. The difference between + * these two operations is that if a dirty page/buffer is encountered, it must + * be waited upon, and not just skipped over. + */ +int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end, int sync_mode) +{ + struct writeback_control wbc = { + .sync_mode = sync_mode, + .nr_to_write = mapping->nrpages * 2, + .range_start = start, + .range_end = end, + .for_writepages = 1, + }; + return btrfs_writepages(mapping, &wbc); +} + +/** + * taken from mm/filemap.c because it isn't exported + * + * wait_on_page_writeback_range - wait for writeback to complete + * @mapping: target address_space + * @start: beginning page index + * @end: ending page index + * + * Wait for writeback to complete against pages indexed by start->end + * inclusive + */ +int btrfs_wait_on_page_writeback_range(struct address_space *mapping, + pgoff_t start, pgoff_t end) +{ + struct pagevec pvec; + int nr_pages; + int ret = 0; + pgoff_t index; + + if (end < start) + return 0; + + pagevec_init(&pvec, 0); + index = start; + while ((index <= end) && + (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, + PAGECACHE_TAG_WRITEBACK, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { + unsigned i; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + /* until radix tree lookup accepts end_index */ + if (page->index > end) + continue; + + wait_on_page_writeback(page); + if (PageError(page)) + ret = -EIO; + } + pagevec_release(&pvec); + cond_resched(); + } + + /* Check for outstanding write errors */ + if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + ret = -ENOSPC; + if (test_and_clear_bit(AS_EIO, &mapping->flags)) + ret = -EIO; + + return ret; +} diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 1794efd13ca..8e8e3c0404f 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -132,4 +132,8 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); +int btrfs_wait_on_page_writeback_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end, int sync_mode); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 05823904ecb..38c75a0256c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -649,7 +649,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, extent_io_tree_init(pinned_copy, root->fs_info->btree_inode->i_mapping, GFP_NOFS); -printk("commit trans %Lu\n", trans->transid); trans->transaction->in_commit = 1; trans->transaction->blocked = 1; cur_trans = trans->transaction; @@ -745,7 +744,6 @@ printk("commit trans %Lu\n", trans->transid); list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); mutex_unlock(&root->fs_info->trans_mutex); -printk("done commit trans %Lu\n", trans->transid); kmem_cache_free(btrfs_trans_handle_cachep, trans); if (root->fs_info->closing) { -- cgit v1.2.3-70-g09d2 From 89642229a582a5c2b6d2ed8ec16986387d9a9047 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 24 Jul 2008 09:41:53 -0400 Subject: Btrfs: Search data ordered extents first for checksums on read Checksum items are not inserted into the tree until all of the io from a given extent is complete. This means one dirty page from an extent may be written, freed, and then read again before the entire extent is on disk and the checksum item is inserted. The checksums themselves are stored in the ordered extent so they can be inserted in bulk when IO is complete. On read, if a checksum item isn't found, the ordered extents were being searched for a checksum record. This all worked most of the time, but the checksum insertion code tries to reduce the number of tree operations by pre-inserting checksum items based on i_size and a few other factors. This means the read code might find a checksum item that hasn't yet really been filled in. This commit changes things to check the ordered extents first and only dive into the btree if nothing was found. This removes the need for extra locking and is more reliable. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 9 ++++++++- fs/btrfs/extent_io.c | 8 ++++---- fs/btrfs/inode.c | 33 ++++++++++++++++++--------------- fs/btrfs/ordered-data.c | 1 + 4 files changed, 31 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 86e84a8579e..7ce3f83c5dd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1011,9 +1011,16 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); spin_unlock(&em_tree->lock); - if (!em) + if (!em) { + __unplug_io_fn(bdi, page); return; + } + if (em->block_start >= EXTENT_MAP_LAST_BYTE) { + free_extent_map(em); + __unplug_io_fn(bdi, page); + return; + } offset = offset - em->start; btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree, em->block_start + offset, page); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9965993748d..e3547a992d5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1949,18 +1949,18 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); cur + iosize - 1); } if (!ret) { - unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1; - nr -= page->index; + unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + pnr -= page->index; ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, - bdev, bio, nr, + bdev, bio, pnr, end_bio_extent_readpage, mirror_num); + nr++; } if (ret) SetPageError(page); cur = cur + iosize; page_offset += iosize; - nr++; } if (!nr) { if (!PageError(page)) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 28e667052ec..0e90315ea80 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -611,22 +611,25 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) btrfs_test_flag(inode, NODATASUM)) return 0; + /* + * It is possible there is an ordered extent that has + * not yet finished for this range in the file. If so, + * that extent will have a csum cached, and it will insert + * the sum after all the blocks in the extent are fully + * on disk. So, look for an ordered extent and use the + * sum if found. We have to do this before looking in the + * btree because csum items are pre-inserted based on + * the file size. btrfs_lookup_csum might find an item + * that still hasn't been fully filled. + */ + ret = btrfs_find_ordered_sum(inode, start, &csum); + if (ret == 0) + goto found; + + ret = 0; path = btrfs_alloc_path(); - mutex_lock(&BTRFS_I(inode)->csum_mutex); item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0); if (IS_ERR(item)) { - /* - * It is possible there is an ordered extent that has - * not yet finished for this range in the file. If so, - * that extent will have a csum cached, and it will insert - * the sum after all the blocks in the extent are fully - * on disk. So, look for an ordered extent and use the - * sum if found. - */ - ret = btrfs_find_ordered_sum(inode, start, &csum); - if (ret == 0) - goto found; - ret = PTR_ERR(item); /* a csum that isn't present is a preallocated region. */ if (ret == -ENOENT || ret == -EFBIG) @@ -641,7 +644,6 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end) found: set_state_private(io_tree, start, csum); out: - mutex_unlock(&BTRFS_I(inode)->csum_mutex); if (path) btrfs_free_path(path); return ret; @@ -1375,7 +1377,7 @@ again: } if (!PageUptodate(page)) { ret = -EIO; - goto out; + goto out_unlock; } } wait_on_page_writeback(page); @@ -1406,6 +1408,7 @@ again: set_page_dirty(page); unlock_extent(io_tree, page_start, page_end, GFP_NOFS); +out_unlock: unlock_page(page); page_cache_release(page); out: diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b695f5b29c4..e42fd233e04 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -557,6 +557,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) } out: mutex_unlock(&tree->mutex); + btrfs_put_ordered_extent(ordered); return ret; } -- cgit v1.2.3-70-g09d2 From 33958dc6d38fb4ca7e62273855fcb2db7e616263 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 30 Jul 2008 10:29:12 -0400 Subject: Btrfs: Fix verify_parent_transid It was incorrectly clearing the up to date flag on the buffer even when the buffer properly verified. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/extent_io.c | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 27ffa9b7ddc..ec1ba8ddb35 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -235,8 +235,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, (unsigned long long)parent_transid, (unsigned long long)btrfs_header_generation(eb)); ret = 1; -out: clear_extent_buffer_uptodate(io_tree, eb); +out: unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); return ret; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e3547a992d5..964ec1622d6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2647,6 +2647,8 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, return eb->first_page; i += eb->start >> PAGE_CACHE_SHIFT; mapping = eb->first_page->mapping; + if (!mapping) + return NULL; read_lock_irq(&mapping->tree_lock); p = radix_tree_lookup(&mapping->page_tree, i); read_unlock_irq(&mapping->tree_lock); @@ -2908,7 +2910,8 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree, GFP_NOFS); for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); - ClearPageUptodate(page); + if (page) + ClearPageUptodate(page); } return 0; } -- cgit v1.2.3-70-g09d2 From bcc63abbf3e9bf948a1b0129b3e6120ec7d7f698 Mon Sep 17 00:00:00 2001 From: Yan Date: Wed, 30 Jul 2008 16:29:20 -0400 Subject: Btrfs: implement memory reclaim for leaf reference cache The memory reclaiming issue happens when snapshot exists. In that case, some cache entries may not be used during old snapshot dropping, so they will remain in the cache until umount. The patch adds a field to struct btrfs_leaf_ref to record create time. Besides, the patch makes all dead roots of a given snapshot linked together in order of create time. After a old snapshot was completely dropped, we check the dead root list and remove all cache entries created before the oldest dead root in the list. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 1 - fs/btrfs/ctree.h | 3 ++- fs/btrfs/dir-item.c | 1 - fs/btrfs/disk-io.c | 5 +++-- fs/btrfs/extent-tree.c | 18 +++++++++--------- fs/btrfs/extent_io.c | 1 - fs/btrfs/file-item.c | 1 - fs/btrfs/file.c | 1 - fs/btrfs/inode.c | 22 +++++++++++----------- fs/btrfs/locking.c | 1 - fs/btrfs/print-tree.c | 1 - fs/btrfs/ref-cache.c | 48 +++++++++++++++++++++++++----------------------- fs/btrfs/ref-cache.h | 11 ++++++----- fs/btrfs/transaction.c | 40 ++++++++++++++++++++++++++++++---------- fs/btrfs/volumes.c | 1 - 15 files changed, 86 insertions(+), 69 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 245eb00435d..c4792062dd5 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3275,4 +3275,3 @@ int btrfs_previous_item(struct btrfs_root *root, } return 1; } - diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 83422088c62..be16cd49ef6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -666,7 +666,8 @@ struct btrfs_root { /* the dirty list is only used by non-reference counted roots */ struct list_head dirty_list; - spinlock_t orphan_lock; + spinlock_t list_lock; + struct list_head dead_list; struct list_head orphan_list; }; diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index eb4dd3d75cf..125094617fe 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -340,4 +340,3 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, } return 0; } - diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index ec1ba8ddb35..e826730d750 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -735,8 +735,9 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->orphan_list); + INIT_LIST_HEAD(&root->dead_list); spin_lock_init(&root->node_lock); - spin_lock_init(&root->orphan_lock); + spin_lock_init(&root->list_lock); mutex_init(&root->objectid_mutex); btrfs_leaf_ref_tree_init(&root->ref_tree_struct); @@ -1717,7 +1718,7 @@ int close_ctree(struct btrfs_root *root) printk("btrfs: at umount reference cache size %Lu\n", fs_info->total_ref_cache_size); } - + if (fs_info->extent_root->node) free_extent_buffer(fs_info->extent_root->node); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fe1ddbd2bfd..37ca8df30c3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -867,8 +867,8 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, /* * For (parent_gen > 0 && parent_gen > ref_gen): * - * we reach here through the oldest root, therefore - * all other reference from same snapshot should have + * we reach here through the oldest root, therefore + * all other reference from same snapshot should have * a larger generation. */ if ((root_objectid != btrfs_ref_root(leaf, ref_item)) || @@ -954,7 +954,7 @@ int btrfs_cross_ref_exists(struct btrfs_root *root, if (!eb) continue; extent_start = eb->start; - } else + } else extent_start = bytenr; ret = get_reference_status(root, extent_start, ref_generation, @@ -1048,7 +1048,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_leaf_ref *ref; struct btrfs_extent_info *info; - ref = btrfs_alloc_leaf_ref(nr_file_extents); + ref = btrfs_alloc_leaf_ref(root, nr_file_extents); if (!ref) { WARN_ON(1); goto out; @@ -1059,7 +1059,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ref->generation = btrfs_header_generation(buf); ref->nritems = nr_file_extents; info = ref->extents; - + for (i = 0; nr_file_extents > 0 && i < nritems; i++) { u64 disk_bytenr; btrfs_item_key_to_cpu(buf, &key, i); @@ -1085,7 +1085,7 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, BUG_ON(!root->ref_tree); ret = btrfs_add_leaf_ref(root, ref); WARN_ON(ret); - btrfs_free_leaf_ref(ref); + btrfs_free_leaf_ref(root, ref); } out: return 0; @@ -2316,7 +2316,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, } static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, struct extent_buffer *leaf) { u64 leaf_owner; @@ -2367,7 +2367,7 @@ static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans, } static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, struct btrfs_leaf_ref *ref) { int i; @@ -2521,7 +2521,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, ret = drop_leaf_ref(trans, root, ref); BUG_ON(ret); btrfs_remove_leaf_ref(root, ref); - btrfs_free_leaf_ref(ref); + btrfs_free_leaf_ref(root, ref); *level = 0; break; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 964ec1622d6..5368e3b6eb9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3497,4 +3497,3 @@ out: return ret; } EXPORT_SYMBOL(try_release_extent_buffer); - diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index afe42d00b5a..2311061f070 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -422,4 +422,3 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans, BUG_ON(ret); return ret; } - diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ded5281f846..412ab4a2638 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1095,4 +1095,3 @@ struct file_operations btrfs_file_operations = { .compat_ioctl = btrfs_ioctl, #endif }; - diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3aa82cec6bf..7af8be076ee 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -835,17 +835,17 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - spin_lock(&root->orphan_lock); + spin_lock(&root->list_lock); /* already on the orphan list, we're good */ if (!list_empty(&BTRFS_I(inode)->i_orphan)) { - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); return 0; } list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); /* * insert an orphan item to track this unlinked/truncated file @@ -864,20 +864,20 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - spin_lock(&root->orphan_lock); + spin_lock(&root->list_lock); if (list_empty(&BTRFS_I(inode)->i_orphan)) { - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); return 0; } list_del_init(&BTRFS_I(inode)->i_orphan); if (!trans) { - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); return 0; } - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); ret = btrfs_del_orphan_item(trans, root, inode->i_ino); @@ -973,9 +973,9 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * add this inode to the orphan list so btrfs_orphan_del does * the proper thing when we hit it */ - spin_lock(&root->orphan_lock); + spin_lock(&root->list_lock); list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); - spin_unlock(&root->orphan_lock); + spin_unlock(&root->list_lock); /* * if this is a bad inode, means we actually succeeded in @@ -3269,13 +3269,13 @@ void btrfs_destroy_inode(struct inode *inode) BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED) posix_acl_release(BTRFS_I(inode)->i_default_acl); - spin_lock(&BTRFS_I(inode)->root->orphan_lock); + spin_lock(&BTRFS_I(inode)->root->list_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan" " list\n", inode->i_ino); dump_stack(); } - spin_unlock(&BTRFS_I(inode)->root->orphan_lock); + spin_unlock(&BTRFS_I(inode)->root->list_lock); while(1) { ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index d617c29787f..d43e14c7471 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -56,4 +56,3 @@ int btrfs_tree_locked(struct extent_buffer *eb) { return mutex_is_locked(&eb->mutex); } - diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 14d86372030..f1374d597a1 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -198,4 +198,3 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) free_extent_buffer(next); } } - diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index ec9587784a3..272b9890c98 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -21,12 +21,18 @@ #include "ref-cache.h" #include "transaction.h" -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents) +struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, + int nr_extents) { struct btrfs_leaf_ref *ref; + size_t size = btrfs_leaf_ref_size(nr_extents); - ref = kmalloc(btrfs_leaf_ref_size(nr_extents), GFP_NOFS); + ref = kmalloc(size, GFP_NOFS); if (ref) { + spin_lock(&root->fs_info->ref_cache_lock); + root->fs_info->total_ref_cache_size += size; + spin_unlock(&root->fs_info->ref_cache_lock); + memset(ref, 0, sizeof(*ref)); atomic_set(&ref->usage, 1); INIT_LIST_HEAD(&ref->list); @@ -34,14 +40,20 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents) return ref; } -void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref) +void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { if (!ref) return; WARN_ON(atomic_read(&ref->usage) == 0); if (atomic_dec_and_test(&ref->usage)) { + size_t size = btrfs_leaf_ref_size(ref->nritems); + BUG_ON(ref->in_tree); kfree(ref); + + spin_lock(&root->fs_info->ref_cache_lock); + root->fs_info->total_ref_cache_size -= size; + spin_unlock(&root->fs_info->ref_cache_lock); } } @@ -64,7 +76,7 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, else return parent; } - + entry = rb_entry(node, struct btrfs_leaf_ref, rb_node); entry->in_tree = 1; rb_link_node(node, parent, p); @@ -91,9 +103,8 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) return NULL; } -int btrfs_remove_leaf_refs(struct btrfs_root *root) +int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen) { - struct rb_node *rb; struct btrfs_leaf_ref *ref = NULL; struct btrfs_leaf_ref_tree *tree = root->ref_tree; @@ -101,17 +112,18 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root) return 0; spin_lock(&tree->lock); - while(!btrfs_leaf_ref_tree_empty(tree)) { - rb = rb_first(&tree->root); - ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); + while(!list_empty(&tree->list)) { + ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); + BUG_ON(!ref->in_tree); + if (ref->root_gen > max_root_gen) + break; + rb_erase(&ref->rb_node, &tree->root); ref->in_tree = 0; list_del_init(&ref->list); spin_unlock(&tree->lock); - - btrfs_free_leaf_ref(ref); - + btrfs_free_leaf_ref(root, ref); cond_resched(); spin_lock(&tree->lock); } @@ -143,7 +155,6 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { int ret = 0; struct rb_node *rb; - size_t size = btrfs_leaf_ref_size(ref->nritems); struct btrfs_leaf_ref_tree *tree = root->ref_tree; spin_lock(&tree->lock); @@ -151,9 +162,6 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) if (rb) { ret = -EEXIST; } else { - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size += size; - spin_unlock(&root->fs_info->ref_cache_lock); atomic_inc(&ref->usage); list_add_tail(&ref->list, &tree->list); } @@ -163,15 +171,10 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { - size_t size = btrfs_leaf_ref_size(ref->nritems); struct btrfs_leaf_ref_tree *tree = root->ref_tree; BUG_ON(!ref->in_tree); spin_lock(&tree->lock); - - spin_lock(&root->fs_info->ref_cache_lock); - root->fs_info->total_ref_cache_size -= size; - spin_unlock(&root->fs_info->ref_cache_lock); rb_erase(&ref->rb_node, &tree->root); ref->in_tree = 0; @@ -179,7 +182,6 @@ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) spin_unlock(&tree->lock); - btrfs_free_leaf_ref(ref); + btrfs_free_leaf_ref(root, ref); return 0; } - diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index 823c049f72f..c361b321c0c 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -30,6 +30,7 @@ struct btrfs_leaf_ref { int in_tree; atomic_t usage; + u64 root_gen; u64 bytenr; u64 owner; u64 generation; @@ -41,14 +42,13 @@ struct btrfs_leaf_ref { static inline size_t btrfs_leaf_ref_size(int nr_extents) { - return sizeof(struct btrfs_leaf_ref) + + return sizeof(struct btrfs_leaf_ref) + sizeof(struct btrfs_extent_info) * nr_extents; } static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) { tree->root.rb_node = NULL; - tree->last = NULL; INIT_LIST_HEAD(&tree->list); spin_lock_init(&tree->lock); } @@ -59,12 +59,13 @@ static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree) } void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); -struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(int nr_extents); -void btrfs_free_leaf_ref(struct btrfs_leaf_ref *ref); +struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, + int nr_extents); +void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, u64 bytenr); int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); -int btrfs_remove_leaf_refs(struct btrfs_root *root); +int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen); int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 216f3157162..52c5524896a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -98,20 +98,24 @@ static noinline int record_root_in_trans(struct btrfs_root *root) BUG_ON(!dirty); dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS); BUG_ON(!dirty->root); - dirty->latest_root = root; INIT_LIST_HEAD(&dirty->list); root->commit_root = btrfs_root_node(root); - root->dirty_root = dirty; memcpy(dirty->root, root, sizeof(*root)); - dirty->root->ref_tree = &root->ref_tree_struct; - spin_lock_init(&dirty->root->node_lock); + spin_lock_init(&dirty->root->list_lock); mutex_init(&dirty->root->objectid_mutex); + INIT_LIST_HEAD(&dirty->root->dead_list); dirty->root->node = root->commit_root; dirty->root->commit_root = NULL; + + spin_lock(&root->list_lock); + list_add(&dirty->root->dead_list, &root->dead_list); + spin_unlock(&root->list_lock); + + root->dirty_root = dirty; } else { WARN_ON(1); } @@ -356,8 +360,6 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); update_cowonly_root(trans, root); - if (root->fs_info->closing) - btrfs_remove_leaf_refs(root); } return 0; } @@ -410,7 +412,11 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, free_extent_buffer(root->commit_root); root->commit_root = NULL; - + + spin_lock(&root->list_lock); + list_del_init(&dirty->root->dead_list); + spin_unlock(&root->list_lock); + kfree(dirty->root); kfree(dirty); @@ -497,6 +503,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, unsigned long nr; u64 num_bytes; u64 bytes_used; + u64 max_useless; int ret = 0; int err; @@ -554,10 +561,25 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, } mutex_unlock(&root->fs_info->drop_mutex); + spin_lock(&root->list_lock); + list_del_init(&dirty->root->dead_list); + if (!list_empty(&root->dead_list)) { + struct btrfs_root *oldest; + oldest = list_entry(root->dead_list.prev, + struct btrfs_root, dead_list); + max_useless = oldest->root_key.offset - 1; + } else { + max_useless = root->root_key.offset - 1; + } + spin_unlock(&root->list_lock); + nr = trans->blocks_used; ret = btrfs_end_transaction(trans, tree_root); BUG_ON(ret); + ret = btrfs_remove_leaf_refs(root, max_useless); + BUG_ON(ret); + free_extent_buffer(dirty->root->node); kfree(dirty->root); kfree(dirty); @@ -785,10 +807,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, put_transaction(cur_trans); put_transaction(cur_trans); + list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); if (root->fs_info->closing) list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots); - else - list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots); mutex_unlock(&root->fs_info->trans_mutex); kmem_cache_free(btrfs_trans_handle_cachep, trans); @@ -814,4 +835,3 @@ again: } return 0; } - diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5e6ee7a6f73..18db4cbe279 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2527,4 +2527,3 @@ again: error: return ret; } - -- cgit v1.2.3-70-g09d2 From 0ee0fda06b943d7ef65f3cec50ab9d427a2baf97 Mon Sep 17 00:00:00 2001 From: Sven Wegener Date: Wed, 30 Jul 2008 16:54:26 -0400 Subject: Btrfs: Add compatibility for kernels >= 2.6.27-rc1 Add a couple of #if's to follow API changes. Signed-off-by: Sven Wegener Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 23 +++++++++++++++++++++++ fs/btrfs/file.c | 4 ++++ fs/btrfs/inode.c | 12 ++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5368e3b6eb9..1cf4baba434 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2649,9 +2649,24 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, mapping = eb->first_page->mapping; if (!mapping) return NULL; + + /* + * extent_buffer_page is only called after pinning the page + * by increasing the reference count. So we know the page must + * be in the radix tree. + */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + rcu_read_lock(); +#else read_lock_irq(&mapping->tree_lock); +#endif p = radix_tree_lookup(&mapping->page_tree, i); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + rcu_read_unlock(); +#else read_unlock_irq(&mapping->tree_lock); +#endif return p; } @@ -2843,13 +2858,21 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, } } clear_page_dirty_for_io(page); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + spin_lock_irq(&page->mapping->tree_lock); +#else read_lock_irq(&page->mapping->tree_lock); +#endif if (!PageDirty(page)) { radix_tree_tag_clear(&page->mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + spin_unlock_irq(&page->mapping->tree_lock); +#else read_unlock_irq(&page->mapping->tree_lock); +#endif unlock_page(page); } return 0; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 412ab4a2638..388ac397c2c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -886,7 +886,11 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, #ifdef REMOVE_SUID_PATH err = remove_suid(&file->f_path); #else +# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + err = file_remove_suid(file); +# else err = remove_suid(fdentry(file)); +# endif #endif if (err) goto out_nolock; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7af8be076ee..c4afa9d78da 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3293,7 +3293,9 @@ void btrfs_destroy_inode(struct inode *inode) kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) +static void init_once(void *foo) +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void init_once(struct kmem_cache * cachep, void *foo) #else static void init_once(void * foo, struct kmem_cache * cachep, @@ -3321,7 +3323,9 @@ void btrfs_destroy_cachep(void) struct kmem_cache *btrfs_cache_create(const char *name, size_t size, unsigned long extra_flags, -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + void (*ctor)(void *) +#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) void (*ctor)(struct kmem_cache *, void *) #else void (*ctor)(void *, struct kmem_cache *, @@ -3561,8 +3565,12 @@ static int btrfs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) +static int btrfs_permission(struct inode *inode, int mask) +#else static int btrfs_permission(struct inode *inode, int mask, struct nameidata *nd) +#endif { if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE)) return -EACCES; -- cgit v1.2.3-70-g09d2 From 2db04966ae9a3eeb57f28df9aac4e77d5b287cb1 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 7 Aug 2008 11:19:43 -0400 Subject: Btrfs: Change TestSetPageLocked() to trylock_page() Add backwards compatibility in compat.h Signed-off-by: David Woodhouse --- compat.h | 3 +++ extent_io.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletions(-) Signed-off-by: Chris Mason --- fs/btrfs/compat.h | 3 +++ fs/btrfs/extent_io.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h index d39a768e65f..b3349a66999 100644 --- a/fs/btrfs/compat.h +++ b/fs/btrfs/compat.h @@ -1,6 +1,9 @@ #ifndef _COMPAT_H_ #define _COMPAT_H_ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26) +#define trylock_page(page) (!TestSetPageLocked(page)) +#endif /* * Even if AppArmor isn't enabled, it still has different prototypes. diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1cf4baba434..f46f88620c7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -14,6 +14,7 @@ #include #include "extent_io.h" #include "extent_map.h" +#include "compat.h" /* temporary define until extent_map moves out of btrfs */ struct kmem_cache *btrfs_cache_create(const char *name, size_t size, @@ -3055,7 +3056,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); if (!wait) { - if (TestSetPageLocked(page)) + if (!trylock_page(page)) goto unlock_exit; } else { lock_page(page); -- cgit v1.2.3-70-g09d2 From 902b22f341efa00be802418a0a8c57bddcd269a6 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 20 Aug 2008 08:51:49 -0400 Subject: Btrfs: Remove broken optimisations in end_bio functions. These ended up freeing objects while they were still using them. Under guidance from Chris, just rip out the 'clever' bits and do things the simple way. Signed-off-by: David Woodhouse Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 159 +++++++-------------------------------------------- 1 file changed, 21 insertions(+), 138 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f46f88620c7..83ba0c32872 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -15,6 +15,8 @@ #include "extent_io.h" #include "extent_map.h" #include "compat.h" +#include "ctree.h" +#include "btrfs_inode.h" /* temporary define until extent_map moves out of btrfs */ struct kmem_cache *btrfs_cache_create(const char *name, size_t size, @@ -1394,15 +1396,11 @@ static int end_bio_extent_writepage(struct bio *bio, { int uptodate = err == 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_state *state = bio->bi_private; - struct extent_io_tree *tree = state->tree; - struct rb_node *node; + struct extent_io_tree *tree; u64 start; u64 end; - u64 cur; int whole_page; int ret; - unsigned long flags; #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) if (bio->bi_size) @@ -1410,6 +1408,8 @@ static int end_bio_extent_writepage(struct bio *bio, #endif do { struct page *page = bvec->bv_page; + tree = &BTRFS_I(page->mapping->host)->io_tree; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; end = start + bvec->bv_len - 1; @@ -1423,7 +1423,7 @@ static int end_bio_extent_writepage(struct bio *bio, prefetchw(&bvec->bv_page->flags); if (tree->ops && tree->ops->writepage_end_io_hook) { ret = tree->ops->writepage_end_io_hook(page, start, - end, state, uptodate); + end, NULL, uptodate); if (ret) uptodate = 0; } @@ -1431,9 +1431,8 @@ static int end_bio_extent_writepage(struct bio *bio, if (!uptodate && tree->ops && tree->ops->writepage_io_failed_hook) { ret = tree->ops->writepage_io_failed_hook(bio, page, - start, end, state); + start, end, NULL); if (ret == 0) { - state = NULL; uptodate = (err == 0); continue; } @@ -1445,68 +1444,7 @@ static int end_bio_extent_writepage(struct bio *bio, SetPageError(page); } - /* - * bios can get merged in funny ways, and so we need to - * be careful with the state variable. We know the - * state won't be merged with others because it has - * WRITEBACK set, but we can't be sure each biovec is - * sequential in the file. So, if our cached state - * doesn't match the expected end, search the tree - * for the correct one. - */ - - spin_lock_irqsave(&tree->lock, flags); - if (!state || state->end != end) { - state = NULL; - node = __etree_search(tree, start, NULL, NULL); - if (node) { - state = rb_entry(node, struct extent_state, - rb_node); - if (state->end != end || - !(state->state & EXTENT_WRITEBACK)) - state = NULL; - } - if (!state) { - spin_unlock_irqrestore(&tree->lock, flags); - clear_extent_writeback(tree, start, - end, GFP_ATOMIC); - goto next_io; - } - } - cur = end; - while(1) { - struct extent_state *clear = state; - cur = state->start; - node = rb_prev(&state->rb_node); - if (node) { - state = rb_entry(node, - struct extent_state, - rb_node); - } else { - state = NULL; - } - - clear_state_bit(tree, clear, EXTENT_WRITEBACK, - 1, 0); - if (cur == start) - break; - if (cur < start) { - WARN_ON(1); - break; - } - if (!node) - break; - } - /* before releasing the lock, make sure the next state - * variable has the expected bits set and corresponds - * to the correct offsets in the file - */ - if (state && (state->end + 1 != start || - !(state->state & EXTENT_WRITEBACK))) { - state = NULL; - } - spin_unlock_irqrestore(&tree->lock, flags); -next_io: + clear_extent_writeback(tree, start, end, GFP_ATOMIC); if (whole_page) end_page_writeback(page); @@ -1539,13 +1477,9 @@ static int end_bio_extent_readpage(struct bio *bio, { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_state *state = bio->bi_private; - struct extent_io_tree *tree = state->tree; - struct rb_node *node; + struct extent_io_tree *tree; u64 start; u64 end; - u64 cur; - unsigned long flags; int whole_page; int ret; @@ -1556,6 +1490,8 @@ static int end_bio_extent_readpage(struct bio *bio, do { struct page *page = bvec->bv_page; + tree = &BTRFS_I(page->mapping->host)->io_tree; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; end = start + bvec->bv_len - 1; @@ -1570,80 +1506,26 @@ static int end_bio_extent_readpage(struct bio *bio, if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, - state); + NULL); if (ret) uptodate = 0; } if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { ret = tree->ops->readpage_io_failed_hook(bio, page, - start, end, state); + start, end, NULL); if (ret == 0) { - state = NULL; uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); continue; } } - spin_lock_irqsave(&tree->lock, flags); - if (!state || state->end != end) { - state = NULL; - node = __etree_search(tree, start, NULL, NULL); - if (node) { - state = rb_entry(node, struct extent_state, - rb_node); - if (state->end != end || - !(state->state & EXTENT_LOCKED)) - state = NULL; - } - if (!state) { - spin_unlock_irqrestore(&tree->lock, flags); - if (uptodate) - set_extent_uptodate(tree, start, end, - GFP_ATOMIC); - unlock_extent(tree, start, end, GFP_ATOMIC); - goto next_io; - } - } + if (uptodate) + set_extent_uptodate(tree, start, end, + GFP_ATOMIC); + unlock_extent(tree, start, end, GFP_ATOMIC); - cur = end; - while(1) { - struct extent_state *clear = state; - cur = state->start; - node = rb_prev(&state->rb_node); - if (node) { - state = rb_entry(node, - struct extent_state, - rb_node); - } else { - state = NULL; - } - if (uptodate) { - set_state_cb(tree, clear, EXTENT_UPTODATE); - clear->state |= EXTENT_UPTODATE; - } - clear_state_bit(tree, clear, EXTENT_LOCKED, - 1, 0); - if (cur == start) - break; - if (cur < start) { - WARN_ON(1); - break; - } - if (!node) - break; - } - /* before releasing the lock, make sure the next state - * variable has the expected bits set and corresponds - * to the correct offsets in the file - */ - if (state && (state->end + 1 != start || - !(state->state & EXTENT_LOCKED))) { - state = NULL; - } - spin_unlock_irqrestore(&tree->lock, flags); -next_io: if (whole_page) { if (uptodate) { SetPageUptodate(page); @@ -1683,8 +1565,7 @@ static int end_bio_extent_preparewrite(struct bio *bio, { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - struct extent_state *state = bio->bi_private; - struct extent_io_tree *tree = state->tree; + struct extent_io_tree *tree; u64 start; u64 end; @@ -1695,6 +1576,8 @@ static int end_bio_extent_preparewrite(struct bio *bio, do { struct page *page = bvec->bv_page; + tree = &BTRFS_I(page->mapping->host)->io_tree; + start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; end = start + bvec->bv_len - 1; @@ -1765,7 +1648,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num) BUG_ON(state->end != end); spin_unlock_irq(&tree->lock); - bio->bi_private = state; + bio->bi_private = NULL; bio_get(bio); -- cgit v1.2.3-70-g09d2 From a1b32a5932cfac7c38b442582285f3da2a09dfd8 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 5 Sep 2008 16:09:51 -0400 Subject: Btrfs: Add debugging checks to track down corrupted metadata Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 7 ++++++- fs/btrfs/extent_io.c | 21 ++++++++++++++------- fs/btrfs/file.c | 15 ++++++++------- fs/btrfs/volumes.c | 40 +++++++++++++++++++++------------------- 4 files changed, 49 insertions(+), 34 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6a218f792e5..8e7a938bfbc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; - +printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) @@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { + printk("bad tree block start %llu %llu\n", + (unsigned long long)found_start, + (unsigned long long)eb->start); ret = -EIO; goto err; } @@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, if (ret == 0) { buf->flags |= EXTENT_UPTODATE; + } else { + WARN_ON(1); } return buf; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 83ba0c32872..7ca89c45d40 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } /* the get_extent function already copied into the page */ if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { + check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; page_offset += iosize; @@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, * properly set. releasepage may drop page->private * on us if the page isn't already dirty. */ + lock_page(page); if (i == 0) { - lock_page(page); set_page_extent_head(page, eb->len); } else if (PagePrivate(page) && page->private != EXTENT_PAGE_PRIVATE) { - lock_page(page); set_page_extent_mapped(page); - unlock_page(page); } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); - if (i == 0) - unlock_page(page); + set_extent_dirty(tree, page_offset(page), + page_offset(page) + PAGE_CACHE_SIZE -1, + GFP_NOFS); + unlock_page(page); } - return set_extent_dirty(tree, eb->start, - eb->start + eb->len - 1, GFP_NOFS); + return 0; } EXPORT_SYMBOL(set_extent_buffer_dirty); @@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (all_uptodate) { if (start_i == 0) eb->flags |= EXTENT_UPTODATE; + if (ret) { + printk("all up to date but ret is %d\n", ret); + } goto unlock_exit; } @@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, mirror_num); if (err) { ret = err; + printk("err %d from __extent_read_full_page\n", ret); } } else { unlock_page(page); @@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, submit_one_bio(READ, bio, mirror_num); if (ret || !wait) { + if (ret) + printk("ret %d wait %d returning\n", ret, wait); return ret; } for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); if (!PageUptodate(page)) { + printk("page not uptodate after wait_on_page_locked\n"); ret = -EIO; } } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index eb8e4556fa7..e9e86fbaa24 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -39,9 +39,10 @@ #include "compat.h" -static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, - struct page **prepared_pages, - const char __user * buf) +static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, + int write_bytes, + struct page **prepared_pages, + const char __user * buf) { long page_fault = 0; int i; @@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, return page_fault ? -EFAULT : 0; } -static void btrfs_drop_pages(struct page **pages, size_t num_pages) +static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -359,7 +360,7 @@ out_unlock: return err; } -int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) +int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) { struct extent_map *em; struct extent_map *split = NULL; @@ -515,7 +516,7 @@ out: * it is either truncated or split. Anything entirely inside the range * is deleted from the tree. */ -int btrfs_drop_extents(struct btrfs_trans_handle *trans, +int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { @@ -785,7 +786,7 @@ out: /* * this gets pages into the page cache and locks them down */ -static int prepare_pages(struct btrfs_root *root, struct file *file, +static int noinline prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5b1b60839d2..37a8ea23e81 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void) return 0; } -static struct btrfs_device *__find_device(struct list_head *head, u64 devid, - u8 *uuid) +static noinline struct btrfs_device *__find_device(struct list_head *head, + u64 devid, u8 *uuid) { struct btrfs_device *dev; struct list_head *cur; @@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid, return NULL; } -static struct btrfs_fs_devices *find_fsid(u8 *fsid) +static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) { struct list_head *cur; struct btrfs_fs_devices *fs_devices; @@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid) * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ -int run_scheduled_bios(struct btrfs_device *device) +static int noinline run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; @@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work) run_scheduled_bios(device); } -static int device_list_add(const char *path, +static noinline int device_list_add(const char *path, struct btrfs_super_block *disk_super, u64 devid, struct btrfs_fs_devices **fs_devices_ret) { @@ -480,10 +480,10 @@ error: * called very infrequently and that a given device has a small number * of extents */ -static int find_free_dev_extent(struct btrfs_trans_handle *trans, - struct btrfs_device *device, - struct btrfs_path *path, - u64 num_bytes, u64 *start) +static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, + struct btrfs_device *device, + struct btrfs_path *path, + u64 num_bytes, u64 *start) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; @@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, return ret; } -int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, +int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, @@ -693,7 +693,8 @@ err: return ret; } -static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) +static noinline int find_next_chunk(struct btrfs_root *root, + u64 objectid, u64 *offset) { struct btrfs_path *path; int ret; @@ -735,8 +736,8 @@ error: return ret; } -static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, - u64 *objectid) +static noinline int find_next_devid(struct btrfs_root *root, + struct btrfs_path *path, u64 *objectid) { int ret; struct btrfs_key key; @@ -1103,8 +1104,8 @@ out_close_bdev: goto out; } -int btrfs_update_device(struct btrfs_trans_handle *trans, - struct btrfs_device *device) +int noinline btrfs_update_device(struct btrfs_trans_handle *trans, + struct btrfs_device *device) { int ret; struct btrfs_path *path; @@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } -static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, - int sub_stripes) +static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, + int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) return calc_size; @@ -2141,8 +2142,9 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ -int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, - int rw, struct bio *bio) +static int noinline schedule_bio(struct btrfs_root *root, + struct btrfs_device *device, + int rw, struct bio *bio) { int should_queue = 1; -- cgit v1.2.3-70-g09d2 From b214107eda845f9a5851ae198f5b972e0dc30c45 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 5 Sep 2008 16:43:31 -0400 Subject: Btrfs: trivial sparse fixes Fix a bunch of trivial sparse complaints. Signed-off-by: Christoph Hellwig Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 2 -- fs/btrfs/extent_io.c | 6 ++---- fs/btrfs/ioctl.c | 2 +- fs/btrfs/sysfs.c | 8 ++++---- 4 files changed, 7 insertions(+), 11 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6cd4e929bd4..ab07557bac0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2749,7 +2749,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, BUG_ON(slot < 0); if (slot != nritems) { - int i; unsigned int old_data = btrfs_item_end_nr(leaf, slot); if (old_data < data_end) { @@ -2913,7 +2912,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, nritems = btrfs_header_nritems(leaf); if (slot + nr != nritems) { - int i; int data_end = leaf_data_end(root, leaf); memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) + diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7ca89c45d40..239e7c908ab 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3400,10 +3400,8 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) } /* at this point we can safely release the extent buffer */ num_pages = num_extent_pages(eb->start, eb->len); - for (i = 0; i < num_pages; i++) { - struct page *page = extent_buffer_page(eb, i); - page_cache_release(page); - } + for (i = 0; i < num_pages; i++) + page_cache_release(extent_buffer_page(eb, i)); rb_erase(&eb->rb_node, &tree->buffer); __free_extent_buffer(eb); out: diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 59b64c738fd..f84b5f6991c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -739,7 +739,7 @@ long btrfs_ioctl_trans_end(struct file *file) goto out; } btrfs_end_transaction(trans, root); - file->private_data = 0; + file->private_data = NULL; mutex_lock(&root->fs_info->trans_mutex); root->fs_info->open_ioctl_trans--; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 1cf4f4f8d2c..61af5d8446e 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -254,7 +254,7 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) wait_for_completion(&fs->kobj_unregister); } -int btrfs_init_sysfs() +int btrfs_init_sysfs(void) { btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); if (!btrfs_kset) @@ -262,7 +262,7 @@ int btrfs_init_sysfs() return 0; } -void btrfs_exit_sysfs() +void btrfs_exit_sysfs(void) { kset_unregister(btrfs_kset); } @@ -289,12 +289,12 @@ void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) return; } -int btrfs_init_sysfs() +int btrfs_init_sysfs(void) { return 0; } -void btrfs_exit_sysfs() +void btrfs_exit_sysfs(void) { return; } -- cgit v1.2.3-70-g09d2 From 4bef084857ab8fe71cf49eae349c25e440a49150 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 8 Sep 2008 11:18:08 -0400 Subject: Btrfs: Tree logging fixes * Pin down data blocks to prevent them from being reallocated like so: trans 1: allocate file extent trans 2: free file extent trans 3: free file extent during old snapshot deletion trans 3: allocate file extent to new file trans 3: fsync new file Before the tree logging code, this was legal because the fsync would commit the transation that did the final data extent free and the transaction that allocated the extent to the new file at the same time. With the tree logging code, the tree log subtransaction can commit before the transaction that freed the extent. If we crash, we're left with two different files using the extent. * Don't wait in start_transaction if log replay is going on. This avoids deadlocks from iput while we're cleaning up link counts in the replay code. * Don't deadlock in replay_one_name by trying to read an inode off the disk while holding paths for the directory * Hold the buffer lock while we mark a buffer as written. This closes a race where someone is changing a buffer while we write it. They are supposed to mark it dirty again after they change it, but this violates the cow rules. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 33 ++++++++++++++++++++++++++++++--- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 20 +++++++++++++++++--- fs/btrfs/extent_io.c | 47 ++++++++++++++++++++++++++++++++++++----------- fs/btrfs/extent_io.h | 1 + fs/btrfs/transaction.c | 16 +++++++++++++--- fs/btrfs/tree-log.c | 13 +++++++++---- 7 files changed, 107 insertions(+), 24 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a4373db5967..42bf9916805 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -307,9 +307,7 @@ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) goto err; } found_level = btrfs_header_level(eb); - spin_lock(&root->fs_info->hash_lock); - btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); - spin_unlock(&root->fs_info->hash_lock); + csum_tree_block(root, eb, 0); err: free_extent_buffer(eb); @@ -1998,7 +1996,36 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) return ret; } +int btree_lock_page_hook(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_buffer *eb; + unsigned long len; + u64 bytenr = page_offset(page); + + if (page->private == EXTENT_PAGE_PRIVATE) + goto out; + + len = page->private >> 2; + eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); + if (!eb) + goto out; + + btrfs_tree_lock(eb); + spin_lock(&root->fs_info->hash_lock); + btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); + spin_unlock(&root->fs_info->hash_lock); + btrfs_tree_unlock(eb); + free_extent_buffer(eb); +out: + lock_page(page); + return 0; +} + static struct extent_io_ops btree_extent_io_ops = { + .write_cache_pages_lock_hook = btree_lock_page_hook, .writepage_io_hook = btree_writepage_io_hook, .readpage_end_io_hook = btree_readpage_end_io_hook, .submit_bio_hook = btree_submit_bio_hook, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 6b6fdc697f3..f84f5058dbb 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -80,4 +80,5 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); +int btree_lock_page_hook(struct page *page); #endif diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 646b9148ca2..3181759da1c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1590,13 +1590,17 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, } static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, - int pending) + int is_data, int pending) { int err = 0; WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); if (!pending) { struct extent_buffer *buf; + + if (is_data) + goto pinit; + buf = btrfs_find_tree_block(root, bytenr, num_bytes); if (buf) { /* we can reuse a block if it hasn't been written @@ -1624,6 +1628,7 @@ static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, } free_extent_buffer(buf); } +pinit: btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); } else { set_extent_bits(&root->fs_info->pending_del, @@ -1744,7 +1749,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root #endif if (pin) { - ret = pin_down_bytes(root, bytenr, num_bytes, 0); + ret = pin_down_bytes(root, bytenr, num_bytes, + owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0); if (ret > 0) mark_free = 1; BUG_ON(ret < 0); @@ -1862,9 +1868,17 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, ref_generation = 0; if (root == extent_root) { - pin_down_bytes(root, bytenr, num_bytes, 1); + pin_down_bytes(root, bytenr, num_bytes, 0, 1); return 0; } + /* if metadata always pin */ + if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) + pin = 1; + + /* if data pin when any transaction has committed this */ + if (ref_generation != trans->transid) + pin = 1; + ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid, ref_generation, owner_objectid, owner_offset, pin, pin == 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 239e7c908ab..319a0c7a4a5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -29,7 +29,10 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); + +#ifdef LEAK_DEBUG static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; +#endif #define BUFFER_LRU_MAX 64 @@ -106,7 +109,9 @@ EXPORT_SYMBOL(extent_io_tree_init); struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; +#ifdef LEAK_DEBUG unsigned long flags; +#endif state = kmem_cache_alloc(extent_state_cache, mask); if (!state) @@ -114,10 +119,11 @@ struct extent_state *alloc_extent_state(gfp_t mask) state->state = 0; state->private = 0; state->tree = NULL; +#ifdef LEAK_DEBUG spin_lock_irqsave(&leak_lock, flags); list_add(&state->leak_list, &states); spin_unlock_irqrestore(&leak_lock, flags); - +#endif atomic_set(&state->refs, 1); init_waitqueue_head(&state->wq); return state; @@ -129,11 +135,15 @@ void free_extent_state(struct extent_state *state) if (!state) return; if (atomic_dec_and_test(&state->refs)) { +#ifdef LEAK_DEBUG unsigned long flags; +#endif WARN_ON(state->tree); +#ifdef LEAK_DEBUG spin_lock_irqsave(&leak_lock, flags); list_del(&state->leak_list); spin_unlock_irqrestore(&leak_lock, flags); +#endif kmem_cache_free(extent_state_cache, state); } } @@ -2070,13 +2080,13 @@ done: } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) -/* Taken directly from 2.6.23 for 2.6.18 back port */ +/* Taken directly from 2.6.23 with a mod for a lockpage hook */ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, void *data); +#endif /** - * write_cache_pages - walk the list of dirty pages of the given address space - * and write all of them. + * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @writepage: function called for each page @@ -2090,9 +2100,10 @@ typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. */ -static int write_cache_pages(struct address_space *mapping, - struct writeback_control *wbc, writepage_t writepage, - void *data) +int extent_write_cache_pages(struct extent_io_tree *tree, + struct address_space *mapping, + struct writeback_control *wbc, + writepage_t writepage, void *data) { struct backing_dev_info *bdi = mapping->backing_dev_info; int ret = 0; @@ -2138,7 +2149,10 @@ retry: * swizzled back from swapper_space to tmpfs file * mapping */ - lock_page(page); + if (tree->ops && tree->ops->write_cache_pages_lock_hook) + tree->ops->write_cache_pages_lock_hook(page); + else + lock_page(page); if (unlikely(page->mapping != mapping)) { unlock_page(page); @@ -2187,9 +2201,12 @@ retry: } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; + + if (wbc->range_cont) + wbc->range_start = index << PAGE_CACHE_SHIFT; return ret; } -#endif +EXPORT_SYMBOL(extent_write_cache_pages); int extent_write_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, @@ -2214,7 +2231,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_writepage(page, wbc, &epd); - write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); + extent_write_cache_pages(tree, mapping, &wbc_writepages, + __extent_writepage, &epd); if (epd.bio) { submit_one_bio(WRITE, epd.bio, 0); } @@ -2235,7 +2253,8 @@ int extent_writepages(struct extent_io_tree *tree, .get_extent = get_extent, }; - ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); + ret = extent_write_cache_pages(tree, mapping, wbc, + __extent_writepage, &epd); if (epd.bio) { submit_one_bio(WRITE, epd.bio, 0); } @@ -2567,15 +2586,19 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, gfp_t mask) { struct extent_buffer *eb = NULL; +#ifdef LEAK_DEBUG unsigned long flags; +#endif eb = kmem_cache_zalloc(extent_buffer_cache, mask); eb->start = start; eb->len = len; mutex_init(&eb->mutex); +#ifdef LEAK_DEBUG spin_lock_irqsave(&leak_lock, flags); list_add(&eb->leak_list, &buffers); spin_unlock_irqrestore(&leak_lock, flags); +#endif atomic_set(&eb->refs, 1); return eb; @@ -2583,10 +2606,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, static void __free_extent_buffer(struct extent_buffer *eb) { +#ifdef LEAK_DEBUG unsigned long flags; spin_lock_irqsave(&leak_lock, flags); list_del(&eb->leak_list); spin_unlock_irqrestore(&leak_lock, flags); +#endif kmem_cache_free(extent_buffer_cache, eb); } diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 315cfceae31..3cb411a5f4d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -50,6 +50,7 @@ struct extent_io_ops { unsigned long old, unsigned long bits); int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits); + int (*write_cache_pages_lock_hook)(struct page *page); }; struct extent_io_tree { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 49c4f5b40ed..61a377bcb2f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -161,7 +161,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, int ret; mutex_lock(&root->fs_info->trans_mutex); - if ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2) + if (!root->fs_info->log_root_recovering && + ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)) wait_current_trans(root); ret = join_transaction(root); BUG_ON(ret); @@ -328,9 +329,17 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; - page = find_lock_page(btree_inode->i_mapping, index); + page = find_get_page(btree_inode->i_mapping, index); if (!page) continue; + + btree_lock_page_hook(page); + if (!page->mapping) { + unlock_page(page); + page_cache_release(page); + continue; + } + if (PageWriteback(page)) { if (PageDirty(page)) wait_on_page_writeback(page); @@ -360,7 +369,8 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, if (!page) continue; if (PageDirty(page)) { - lock_page(page); + btree_lock_page_hook(page); + wait_on_page_writeback(page); err = write_one_page(page, 0); if (err) werr = err; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d1ce8314b94..13d7ee8e0c5 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1176,8 +1176,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, struct btrfs_key found_key; struct btrfs_key log_key; struct inode *dir; - struct inode *inode; u8 log_type; + int exists; int ret; dir = read_one_inode(root, key->objectid); @@ -1190,6 +1190,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len); btrfs_dir_item_key_to_cpu(eb, di, &log_key); + exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); + if (exists == 0) + exists = 1; + else + exists = 0; + btrfs_release_path(root, path); + if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, name, name_len, 1); @@ -1224,11 +1231,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, * don't drop the conflicting directory entry if the inode * for the new entry doesn't exist */ - inode = read_one_inode(root, log_key.objectid); - if (!inode) + if (!exists) goto out; - iput(inode); ret = drop_one_dir_item(trans, root, path, dir, dst_di); BUG_ON(ret); -- cgit v1.2.3-70-g09d2 From 0f9dd46cda36b8de3b9f48bc42bd09d20b9c3b52 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 23 Sep 2008 13:14:11 -0400 Subject: Btrfs: free space accounting redo 1) replace the per fs_info extent_io_tree that tracked free space with two rb-trees per block group to track free space areas via offset and size. The reason to do this is because most allocations come with a hint byte where to start, so we can usually find a chunk of free space at that hint byte to satisfy the allocation and get good space packing. If we cannot find free space at or after the given offset we fall back on looking for a chunk of the given size as close to that given offset as possible. When we fall back on the size search we also try to find a slot as close to the size we want as possible, to avoid breaking small chunks off of huge areas if possible. 2) remove the extent_io_tree that tracked the block group cache from fs_info and replaced it with an rb-tree thats tracks block group cache via offset. also added a per space_info list that tracks the block group cache for the particular space so we can lookup related block groups easily. 3) cleaned up the allocation code to make it a little easier to read and a little less complicated. Basically there are 3 steps, first look from our provided hint. If we couldn't find from that given hint, start back at our original search start and look for space from there. If that fails try to allocate space if we can and start looking again. If not we're screwed and need to start over again. 4) small fixes. there were some issues in volumes.c where we wouldn't allocate the rest of the disk. fixed cow_file_range to actually pass the alloc_hint, which has helped a good bit in making the fs_mark test I run have semi-normal results as we run out of space. Generally with data allocations we don't track where we last allocated from, so everytime we did a data allocation we'd search through every block group that we have looking for free space. Now searching a block group with no free space isn't terribly time consuming, it was causing a slight degradation as we got more data block groups. The alloc_hint has fixed this slight degredation and made things semi-normal. There is still one nagging problem I'm working on where we will get ENOSPC when there is definitely plenty of space. This only happens with metadata allocations, and only when we are almost full. So you generally hit the 85% mark first, but sometimes you'll hit the BUG before you hit the 85% wall. I'm still tracking it down, but until then this seems to be pretty stable and make a significant performance gain. Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/ctree.c | 3 +- fs/btrfs/ctree.h | 46 ++- fs/btrfs/disk-io.c | 7 +- fs/btrfs/extent-tree.c | 869 +++++++++++++++++++++++--------------------- fs/btrfs/extent_io.c | 4 + fs/btrfs/free-space-cache.c | 415 +++++++++++++++++++++ fs/btrfs/inode.c | 3 +- fs/btrfs/volumes.c | 11 +- 9 files changed, 925 insertions(+), 435 deletions(-) create mode 100644 fs/btrfs/free-space-cache.c (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index b7addbfd8c2..eb36ae981bd 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,7 +7,7 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ transaction.o bit-radix.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ - ref-cache.o export.o tree-log.o acl.o + ref-cache.o export.o tree-log.o acl.o free-space-cache.o else # Normal Makefile diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 18e84472abb..6f467901246 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2725,9 +2725,8 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, total_size = total_data + (nr * sizeof(struct btrfs_item)); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); - if (ret == 0) { + if (ret == 0) return -EEXIST; - } if (ret < 0) goto out; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index eb65fd80888..730aae3bc18 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -483,7 +483,6 @@ struct btrfs_csum_item { #define BTRFS_BLOCK_GROUP_DUP (1 << 5) #define BTRFS_BLOCK_GROUP_RAID10 (1 << 6) - struct btrfs_block_group_item { __le64 used; __le64 chunk_objectid; @@ -498,17 +497,40 @@ struct btrfs_space_info { int full; int force_alloc; struct list_head list; + + /* for block groups in our same type */ + struct list_head block_groups; + spinlock_t lock; +}; + +struct btrfs_free_space { + struct rb_node bytes_index; + struct rb_node offset_index; + u64 offset; + u64 bytes; }; struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; - struct btrfs_space_info *space_info; spinlock_t lock; u64 pinned; u64 flags; int cached; int ro; + int dirty; + + struct btrfs_space_info *space_info; + + /* free space cache stuff */ + struct rb_root free_space_bytes; + struct rb_root free_space_offset; + + /* block group cache stuff */ + struct rb_node cache_node; + + /* for block groups in the same raid type */ + struct list_head list; }; struct btrfs_device; @@ -525,8 +547,10 @@ struct btrfs_fs_info { struct btrfs_root *log_root_tree; struct radix_tree_root fs_roots_radix; - struct extent_io_tree free_space_cache; - struct extent_io_tree block_group_cache; + /* block group cache stuff */ + spinlock_t block_group_cache_lock; + struct rb_root block_group_cache_tree; + struct extent_io_tree pinned_extents; struct extent_io_tree pending_del; struct extent_io_tree extent_ins; @@ -1814,4 +1838,18 @@ int btrfs_sync_fs(struct super_block *sb, int wait); int btrfs_check_acl(struct inode *inode, int mask); int btrfs_init_acl(struct inode *inode, struct inode *dir); int btrfs_acl_chmod(struct inode *inode); + +/* free-space-cache.c */ +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size); +int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, + u64 bytenr, u64 size); +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache + *block_group); +struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache + *block_group, u64 offset, + u64 bytes); +void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, + u64 bytes); +u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); #endif diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f6f7821d43a..535bd0fe1a7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1410,10 +1410,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; - extent_io_tree_init(&fs_info->free_space_cache, - fs_info->btree_inode->i_mapping, GFP_NOFS); - extent_io_tree_init(&fs_info->block_group_cache, - fs_info->btree_inode->i_mapping, GFP_NOFS); + spin_lock_init(&fs_info->block_group_cache_lock); + fs_info->block_group_cache_tree.rb_node = NULL; + extent_io_tree_init(&fs_info->pinned_extents, fs_info->btree_inode->i_mapping, GFP_NOFS); extent_io_tree_init(&fs_info->pending_del, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1c10ffc837c..813566acc5d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -29,12 +29,6 @@ #include "locking.h" #include "ref-cache.h" -#define BLOCK_GROUP_DATA EXTENT_WRITEBACK -#define BLOCK_GROUP_METADATA EXTENT_UPTODATE -#define BLOCK_GROUP_SYSTEM EXTENT_NEW - -#define BLOCK_GROUP_DIRTY EXTENT_DIRTY - static int finish_current_insert(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); static int del_pending_extents(struct btrfs_trans_handle *trans, struct @@ -62,6 +56,127 @@ void maybe_unlock_mutex(struct btrfs_root *root) } } +static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) +{ + return (cache->flags & bits) == bits; +} + +/* + * this adds the block group to the fs_info rb tree for the block group + * cache + */ +int btrfs_add_block_group_cache(struct btrfs_fs_info *info, + struct btrfs_block_group_cache *block_group) +{ + struct rb_node **p; + struct rb_node *parent = NULL; + struct btrfs_block_group_cache *cache; + + spin_lock(&info->block_group_cache_lock); + p = &info->block_group_cache_tree.rb_node; + + while (*p) { + parent = *p; + cache = rb_entry(parent, struct btrfs_block_group_cache, + cache_node); + if (block_group->key.objectid < cache->key.objectid) { + p = &(*p)->rb_left; + } else if (block_group->key.objectid > cache->key.objectid) { + p = &(*p)->rb_right; + } else { + spin_unlock(&info->block_group_cache_lock); + return -EEXIST; + } + } + + rb_link_node(&block_group->cache_node, parent, p); + rb_insert_color(&block_group->cache_node, + &info->block_group_cache_tree); + spin_unlock(&info->block_group_cache_lock); + + return 0; +} + +/* + * This will return the block group at or after bytenr if contains is 0, else + * it will return the block group that contains the bytenr + */ +static struct btrfs_block_group_cache * +block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr, + int contains) +{ + struct btrfs_block_group_cache *cache, *ret = NULL; + struct rb_node *n; + u64 end, start; + + spin_lock(&info->block_group_cache_lock); + n = info->block_group_cache_tree.rb_node; + + while (n) { + cache = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + end = cache->key.objectid + cache->key.offset - 1; + start = cache->key.objectid; + + if (bytenr < start) { + if (!contains && (!ret || start < ret->key.objectid)) + ret = cache; + n = n->rb_left; + } else if (bytenr > start) { + if (contains && bytenr <= end) { + ret = cache; + break; + } + n = n->rb_right; + } else { + ret = cache; + break; + } + } + spin_unlock(&info->block_group_cache_lock); + + return ret; +} + +/* + * this is only called by cache_block_group, since we could have freed extents + * we need to check the pinned_extents for any extents that can't be used yet + * since their free space will be released as soon as the transaction commits. + */ +static int add_new_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_fs_info *info, u64 start, u64 end) +{ + u64 extent_start, extent_end, size; + int ret; + + while (start < end) { + ret = find_first_extent_bit(&info->pinned_extents, start, + &extent_start, &extent_end, + EXTENT_DIRTY); + if (ret) + break; + + if (extent_start == start) { + start = extent_end + 1; + } else if (extent_start > start && extent_start < end) { + size = extent_start - start; + ret = btrfs_add_free_space(block_group, start, size); + BUG_ON(ret); + start = extent_end + 1; + } else { + break; + } + } + + if (start < end) { + size = end - start; + ret = btrfs_add_free_space(block_group, start, size); + BUG_ON(ret); + } + + return 0; +} + static int cache_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *block_group) { @@ -69,10 +184,8 @@ static int cache_block_group(struct btrfs_root *root, int ret = 0; struct btrfs_key key; struct extent_buffer *leaf; - struct extent_io_tree *free_space_cache; int slot; u64 last = 0; - u64 hole_size; u64 first_free; int found = 0; @@ -80,7 +193,6 @@ static int cache_block_group(struct btrfs_root *root, return 0; root = root->fs_info->extent_root; - free_space_cache = &root->fs_info->free_space_cache; if (block_group->cached) return 0; @@ -96,7 +208,8 @@ static int cache_block_group(struct btrfs_root *root, * skip the locking here */ path->skip_locking = 1; - first_free = block_group->key.objectid; + first_free = max_t(u64, block_group->key.objectid, + BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE); key.objectid = block_group->key.objectid; key.offset = 0; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); @@ -119,32 +232,28 @@ static int cache_block_group(struct btrfs_root *root, ret = btrfs_next_leaf(root, path); if (ret < 0) goto err; - if (ret == 0) { + if (ret == 0) continue; - } else { + else break; - } } btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid < block_group->key.objectid) { + if (key.objectid < block_group->key.objectid) goto next; - } + if (key.objectid >= block_group->key.objectid + - block_group->key.offset) { + block_group->key.offset) break; - } if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) { if (!found) { last = first_free; found = 1; } - if (key.objectid > last) { - hole_size = key.objectid - last; - set_extent_dirty(free_space_cache, last, - last + hole_size - 1, - GFP_NOFS); - } + + add_new_free_space(block_group, root->fs_info, last, + key.objectid); + last = key.objectid + key.offset; } next: @@ -153,13 +262,11 @@ next: if (!found) last = first_free; - if (block_group->key.objectid + - block_group->key.offset > last) { - hole_size = block_group->key.objectid + - block_group->key.offset - last; - set_extent_dirty(free_space_cache, last, - last + hole_size - 1, GFP_NOFS); - } + + add_new_free_space(block_group, root->fs_info, last, + block_group->key.objectid + + block_group->key.offset); + block_group->cached = 1; ret = 0; err: @@ -167,166 +274,79 @@ err: return ret; } +/* + * return the block group that starts at or after bytenr + */ struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) { - struct extent_io_tree *block_group_cache; - struct btrfs_block_group_cache *block_group = NULL; - u64 ptr; - u64 start; - u64 end; - int ret; + struct btrfs_block_group_cache *cache; - bytenr = max_t(u64, bytenr, - BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE); - block_group_cache = &info->block_group_cache; - ret = find_first_extent_bit(block_group_cache, - bytenr, &start, &end, - BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA | - BLOCK_GROUP_SYSTEM); - if (ret) { - return NULL; - } - ret = get_state_private(block_group_cache, start, &ptr); - if (ret) - return NULL; + cache = block_group_cache_tree_search(info, bytenr, 0); - block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr; - return block_group; + return cache; } +/* + * return the block group that contains teh given bytenr + */ struct btrfs_block_group_cache *btrfs_lookup_block_group(struct btrfs_fs_info *info, u64 bytenr) { - struct extent_io_tree *block_group_cache; - struct btrfs_block_group_cache *block_group = NULL; - u64 ptr; - u64 start; - u64 end; - int ret; + struct btrfs_block_group_cache *cache; - bytenr = max_t(u64, bytenr, - BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE); - block_group_cache = &info->block_group_cache; - ret = find_first_extent_bit(block_group_cache, - bytenr, &start, &end, - BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA | - BLOCK_GROUP_SYSTEM); - if (ret) { - return NULL; - } - ret = get_state_private(block_group_cache, start, &ptr); - if (ret) - return NULL; + cache = block_group_cache_tree_search(info, bytenr, 1); - block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr; - if (block_group->key.objectid <= bytenr && bytenr < - block_group->key.objectid + block_group->key.offset) - return block_group; - return NULL; + return cache; } -static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) -{ - return (cache->flags & bits) == bits; -} - -static int noinline find_search_start(struct btrfs_root *root, - struct btrfs_block_group_cache **cache_ret, - u64 *start_ret, u64 num, int data) +static int noinline find_free_space(struct btrfs_root *root, + struct btrfs_block_group_cache **cache_ret, + u64 *start_ret, u64 num, int data) { int ret; struct btrfs_block_group_cache *cache = *cache_ret; - struct extent_io_tree *free_space_cache; - struct extent_state *state; + struct btrfs_free_space *info = NULL; u64 last; - u64 start = 0; - u64 cache_miss = 0; u64 total_fs_bytes; u64 search_start = *start_ret; - int wrapped = 0; WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy); - free_space_cache = &root->fs_info->free_space_cache; if (!cache) goto out; + last = max(search_start, cache->key.objectid); + again: ret = cache_block_group(root, cache); - if (ret) { + if (ret) goto out; - } - last = max(search_start, cache->key.objectid); - if (!block_group_bits(cache, data) || cache->ro) + if (cache->ro || !block_group_bits(cache, data)) goto new_group; - spin_lock_irq(&free_space_cache->lock); - state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY); - while(1) { - if (!state) { - if (!cache_miss) - cache_miss = last; - spin_unlock_irq(&free_space_cache->lock); - goto new_group; - } - - start = max(last, state->start); - last = state->end + 1; - if (last - start < num) { - do { - state = extent_state_next(state); - } while(state && !(state->state & EXTENT_DIRTY)); - continue; - } - spin_unlock_irq(&free_space_cache->lock); - if (cache->ro) { - goto new_group; - } - if (start + num > cache->key.objectid + cache->key.offset) - goto new_group; - if (!block_group_bits(cache, data)) { - printk("block group bits don't match %Lu %d\n", cache->flags, data); - } - *start_ret = start; + info = btrfs_find_free_space(cache, last, num); + if (info) { + *start_ret = info->offset; return 0; } -out: - cache = btrfs_lookup_block_group(root->fs_info, search_start); - if (!cache) { - printk("Unable to find block group for %Lu\n", search_start); - WARN_ON(1); - } - return -ENOSPC; new_group: last = cache->key.objectid + cache->key.offset; -wrapped: + cache = btrfs_lookup_first_block_group(root->fs_info, last); - if (!cache || cache->key.objectid >= total_fs_bytes) { -no_cache: - if (!wrapped) { - wrapped = 1; - last = search_start; - goto wrapped; - } + if (!cache || cache->key.objectid >= total_fs_bytes) goto out; - } - if (cache_miss && !cache->cached) { - cache_block_group(root, cache); - last = cache_miss; - cache = btrfs_lookup_first_block_group(root->fs_info, last); - } - cache_miss = 0; - cache = btrfs_find_block_group(root, cache, last, data, 0); - if (!cache) - goto no_cache; + *cache_ret = cache; goto again; + +out: + return -ENOSPC; } static u64 div_factor(u64 num, int factor) @@ -338,16 +358,19 @@ static u64 div_factor(u64 num, int factor) return num; } -static int block_group_state_bits(u64 flags) +static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, + u64 flags) { - int bits = 0; - if (flags & BTRFS_BLOCK_GROUP_DATA) - bits |= BLOCK_GROUP_DATA; - if (flags & BTRFS_BLOCK_GROUP_METADATA) - bits |= BLOCK_GROUP_METADATA; - if (flags & BTRFS_BLOCK_GROUP_SYSTEM) - bits |= BLOCK_GROUP_SYSTEM; - return bits; + struct list_head *head = &info->space_info; + struct list_head *cur; + struct btrfs_space_info *found; + list_for_each(cur, head) { + found = list_entry(cur, struct btrfs_space_info, list); + if (found->flags == flags) + return found; + } + return NULL; + } static struct btrfs_block_group_cache * @@ -356,28 +379,19 @@ __btrfs_find_block_group(struct btrfs_root *root, u64 search_start, int data, int owner) { struct btrfs_block_group_cache *cache; - struct extent_io_tree *block_group_cache; struct btrfs_block_group_cache *found_group = NULL; struct btrfs_fs_info *info = root->fs_info; + struct btrfs_space_info *sinfo; u64 used; u64 last = 0; - u64 start; - u64 end; u64 free_check; - u64 ptr; - int bit; - int ret; int full_search = 0; int factor = 10; int wrapped = 0; - block_group_cache = &info->block_group_cache; - if (data & BTRFS_BLOCK_GROUP_METADATA) factor = 9; - bit = block_group_state_bits(data); - if (search_start) { struct btrfs_block_group_cache *shint; shint = btrfs_lookup_first_block_group(info, search_start); @@ -408,20 +422,30 @@ __btrfs_find_block_group(struct btrfs_root *root, else last = search_start; } + sinfo = __find_space_info(root->fs_info, data); + if (!sinfo) + goto found; again: while(1) { - ret = find_first_extent_bit(block_group_cache, last, - &start, &end, bit); - if (ret) - break; + struct list_head *l; - ret = get_state_private(block_group_cache, start, &ptr); - if (ret) { - last = end + 1; - continue; + cache = NULL; + + spin_lock(&sinfo->lock); + list_for_each(l, &sinfo->block_groups) { + struct btrfs_block_group_cache *entry; + entry = list_entry(l, struct btrfs_block_group_cache, + list); + if ((entry->key.objectid >= last) && + (!cache || (entry->key.objectid < + cache->key.objectid))) + cache = entry; } + spin_unlock(&sinfo->lock); + + if (!cache) + break; - cache = (struct btrfs_block_group_cache *)(unsigned long)ptr; spin_lock(&cache->lock); last = cache->key.objectid + cache->key.offset; used = btrfs_block_group_used(&cache->item); @@ -462,6 +486,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, ret = __btrfs_find_block_group(root, hint, search_start, data, owner); return ret; } + static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset) { @@ -1175,34 +1200,37 @@ fail: int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { - struct extent_io_tree *block_group_cache; - struct btrfs_block_group_cache *cache; - int ret; + struct btrfs_block_group_cache *cache, *entry; + struct rb_node *n; int err = 0; int werr = 0; struct btrfs_path *path; u64 last = 0; - u64 start; - u64 end; - u64 ptr; - block_group_cache = &root->fs_info->block_group_cache; path = btrfs_alloc_path(); if (!path) return -ENOMEM; mutex_lock(&root->fs_info->alloc_mutex); while(1) { - ret = find_first_extent_bit(block_group_cache, last, - &start, &end, BLOCK_GROUP_DIRTY); - if (ret) - break; + cache = NULL; + spin_lock(&root->fs_info->block_group_cache_lock); + for (n = rb_first(&root->fs_info->block_group_cache_tree); + n; n = rb_next(n)) { + entry = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + if (entry->dirty) { + cache = entry; + break; + } + } + spin_unlock(&root->fs_info->block_group_cache_lock); - last = end + 1; - ret = get_state_private(block_group_cache, start, &ptr); - if (ret) + if (!cache) break; - cache = (struct btrfs_block_group_cache *)(unsigned long)ptr; + + last += cache->key.offset; + err = write_one_cache_group(trans, root, path, cache); /* @@ -1214,29 +1242,14 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, werr = err; continue; } - clear_extent_bits(block_group_cache, start, end, - BLOCK_GROUP_DIRTY, GFP_NOFS); + + cache->dirty = 0; } btrfs_free_path(path); mutex_unlock(&root->fs_info->alloc_mutex); return werr; } -static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info, - u64 flags) -{ - struct list_head *head = &info->space_info; - struct list_head *cur; - struct btrfs_space_info *found; - list_for_each(cur, head) { - found = list_entry(cur, struct btrfs_space_info, list); - if (found->flags == flags) - return found; - } - return NULL; - -} - static int update_space_info(struct btrfs_fs_info *info, u64 flags, u64 total_bytes, u64 bytes_used, struct btrfs_space_info **space_info) @@ -1256,6 +1269,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, return -ENOMEM; list_add(&found->list, &info->space_info); + INIT_LIST_HEAD(&found->block_groups); + spin_lock_init(&found->lock); found->flags = flags; found->total_bytes = total_bytes; found->bytes_used = bytes_used; @@ -1318,7 +1333,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 thresh; u64 start; u64 num_bytes; - int ret; + int ret = 0; flags = reduce_alloc_profile(extent_root, flags); @@ -1355,10 +1370,11 @@ printk("space info full %Lu\n", flags); ret = btrfs_make_block_group(trans, extent_root, 0, flags, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); BUG_ON(ret); + out_unlock: mutex_unlock(&extent_root->fs_info->chunk_mutex); out: - return 0; + return ret; } static int update_block_group(struct btrfs_trans_handle *trans, @@ -1371,8 +1387,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, u64 total = num_bytes; u64 old_val; u64 byte_in_group; - u64 start; - u64 end; WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); while(total) { @@ -1382,12 +1396,9 @@ static int update_block_group(struct btrfs_trans_handle *trans, } byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); - start = cache->key.objectid; - end = start + cache->key.offset - 1; - set_extent_bits(&info->block_group_cache, start, end, - BLOCK_GROUP_DIRTY, GFP_NOFS); spin_lock(&cache->lock); + cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { @@ -1401,9 +1412,11 @@ static int update_block_group(struct btrfs_trans_handle *trans, btrfs_set_block_group_used(&cache->item, old_val); spin_unlock(&cache->lock); if (mark_free) { - set_extent_dirty(&info->free_space_cache, - bytenr, bytenr + num_bytes - 1, - GFP_NOFS); + int ret; + ret = btrfs_add_free_space(cache, bytenr, + num_bytes); + if (ret) + return -1; } } total -= num_bytes; @@ -1414,16 +1427,13 @@ static int update_block_group(struct btrfs_trans_handle *trans, static u64 first_logical_byte(struct btrfs_root *root, u64 search_start) { - u64 start; - u64 end; - int ret; - ret = find_first_extent_bit(&root->fs_info->block_group_cache, - search_start, &start, &end, - BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA | - BLOCK_GROUP_SYSTEM); - if (ret) + struct btrfs_block_group_cache *cache; + + cache = btrfs_lookup_first_block_group(root->fs_info, search_start); + if (!cache) return 0; - return start; + + return cache->key.objectid; } @@ -1501,8 +1511,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, u64 start; u64 end; int ret; - struct extent_io_tree *free_space_cache; - free_space_cache = &root->fs_info->free_space_cache; + struct btrfs_block_group_cache *cache; mutex_lock(&root->fs_info->alloc_mutex); while(1) { @@ -1512,7 +1521,9 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, break; btrfs_update_pinned_extents(root, start, end + 1 - start, 0); clear_extent_dirty(unpin, start, end, GFP_NOFS); - set_extent_dirty(free_space_cache, start, end, GFP_NOFS); + cache = btrfs_lookup_block_group(root->fs_info, start); + if (cache->cached) + btrfs_add_free_space(cache, start, end - start + 1); if (need_resched()) { mutex_unlock(&root->fs_info->alloc_mutex); cond_resched(); @@ -1875,9 +1886,12 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, /* if metadata always pin */ if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) { if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { + struct btrfs_block_group_cache *cache; + /* btrfs_free_reserved_extent */ - set_extent_dirty(&root->fs_info->free_space_cache, - bytenr, bytenr + num_bytes - 1, GFP_NOFS); + cache = btrfs_lookup_block_group(root->fs_info, bytenr); + BUG_ON(!cache); + btrfs_add_free_space(cache, bytenr, num_bytes); return 0; } pin = 1; @@ -1942,8 +1956,6 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, u64 total_needed = num_bytes; u64 *last_ptr = NULL; struct btrfs_block_group_cache *block_group; - int full_scan = 0; - int wrapped = 0; int chunk_alloc_done = 0; int empty_cluster = 2 * 1024 * 1024; int allowed_chunk_alloc = 0; @@ -1959,9 +1971,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, empty_cluster = 256 * 1024; } - if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) { + if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) last_ptr = &root->fs_info->last_data_alloc; - } + if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { last_ptr = &root->fs_info->last_log_alloc; if (!last_ptr == 0 && root->fs_info->last_alloc) { @@ -1972,9 +1984,8 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, if (last_ptr) { if (*last_ptr) hint_byte = *last_ptr; - else { + else empty_size += empty_cluster; - } } search_start = max(search_start, first_logical_byte(root, 0)); @@ -1983,145 +1994,172 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, if (search_end == (u64)-1) search_end = btrfs_super_total_bytes(&info->super_copy); - if (hint_byte) { - block_group = btrfs_lookup_first_block_group(info, hint_byte); - if (!block_group) - hint_byte = search_start; - block_group = btrfs_find_block_group(root, block_group, - hint_byte, data, 1); - if (last_ptr && *last_ptr == 0 && block_group) - hint_byte = block_group->key.objectid; - } else { - block_group = btrfs_find_block_group(root, - trans->block_group, - search_start, data, 1); - } search_start = max(search_start, hint_byte); - total_needed += empty_size; -check_failed: - if (!block_group) { - block_group = btrfs_lookup_first_block_group(info, - search_start); - if (!block_group) - block_group = btrfs_lookup_first_block_group(info, - orig_search_start); - } - if (full_scan && !chunk_alloc_done) { - if (allowed_chunk_alloc) { - do_chunk_alloc(trans, root, - num_bytes + 2 * 1024 * 1024, data, 1); - allowed_chunk_alloc = 0; - } else if (block_group && block_group_bits(block_group, data)) { - block_group->space_info->force_alloc = 1; +new_group: + block_group = btrfs_lookup_block_group(info, search_start); + + /* + * Ok this looks a little tricky, buts its really simple. First if we + * didn't find a block group obviously we want to start over. + * Secondly, if the block group we found does not match the type we + * need, and we have a last_ptr and its not 0, chances are the last + * allocation we made was at the end of the block group, so lets go + * ahead and skip the looking through the rest of the block groups and + * start at the beginning. This helps with metadata allocations, + * since you are likely to have a bunch of data block groups to search + * through first before you realize that you need to start over, so go + * ahead and start over and save the time. + */ + if (!block_group || (!block_group_bits(block_group, data) && + last_ptr && *last_ptr)) { + if (search_start != orig_search_start) { + if (last_ptr && *last_ptr) + *last_ptr = 0; + search_start = orig_search_start; + goto new_group; + } else if (!chunk_alloc_done && allowed_chunk_alloc) { + ret = do_chunk_alloc(trans, root, + num_bytes + 2 * 1024 * 1024, + data, 1); + if (ret < 0) { + struct btrfs_space_info *info; + + info = __find_space_info(root->fs_info, data); + goto error; + } + BUG_ON(ret); + chunk_alloc_done = 1; + search_start = orig_search_start; + goto new_group; + } else { + ret = -ENOSPC; + goto error; } - chunk_alloc_done = 1; - } - ret = find_search_start(root, &block_group, &search_start, - total_needed, data); - if (ret == -ENOSPC && last_ptr && *last_ptr) { - *last_ptr = 0; - block_group = btrfs_lookup_first_block_group(info, - orig_search_start); - search_start = orig_search_start; - ret = find_search_start(root, &block_group, &search_start, - total_needed, data); } - if (ret == -ENOSPC) - goto enospc; - if (ret) - goto error; - if (last_ptr && *last_ptr && search_start != *last_ptr) { - *last_ptr = 0; - if (!empty_size) { - empty_size += empty_cluster; - total_needed += empty_size; + /* + * this is going to seach through all of the existing block groups it + * can find, so if we don't find something we need to see if we can + * allocate what we need. + */ + ret = find_free_space(root, &block_group, &search_start, + total_needed, data); + if (ret == -ENOSPC) { + /* + * instead of allocating, start at the original search start + * and see if there is something to be found, if not then we + * allocate + */ + if (search_start != orig_search_start) { + if (last_ptr && *last_ptr) { + *last_ptr = 0; + total_needed += empty_cluster; + } + search_start = orig_search_start; + goto new_group; } - block_group = btrfs_lookup_first_block_group(info, - orig_search_start); - search_start = orig_search_start; - ret = find_search_start(root, &block_group, - &search_start, total_needed, data); - if (ret == -ENOSPC) - goto enospc; - if (ret) + + /* + * we've already allocated, we're pretty screwed + */ + if (chunk_alloc_done) { goto error; + } else if (!allowed_chunk_alloc && block_group && + block_group_bits(block_group, data)) { + block_group->space_info->force_alloc = 1; + goto error; + } else if (!allowed_chunk_alloc) { + goto error; + } + + ret = do_chunk_alloc(trans, root, num_bytes + 2 * 1024 * 1024, + data, 1); + if (ret < 0) + goto error; + + BUG_ON(ret); + chunk_alloc_done = 1; + if (block_group) + search_start = block_group->key.objectid + + block_group->key.offset; + else + search_start = orig_search_start; + goto new_group; } + if (ret) + goto error; + search_start = stripe_align(root, search_start); ins->objectid = search_start; ins->offset = num_bytes; - if (ins->objectid + num_bytes >= search_end) - goto enospc; + if (ins->objectid + num_bytes >= search_end) { + search_start = orig_search_start; + if (chunk_alloc_done) { + ret = -ENOSPC; + goto error; + } + goto new_group; + } if (ins->objectid + num_bytes > block_group->key.objectid + block_group->key.offset) { + if (search_start == orig_search_start && chunk_alloc_done) { + ret = -ENOSPC; + goto error; + } search_start = block_group->key.objectid + block_group->key.offset; goto new_group; } - if (test_range_bit(&info->extent_ins, ins->objectid, - ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) { - search_start = ins->objectid + num_bytes; - goto new_group; - } - - if (test_range_bit(&info->pinned_extents, ins->objectid, - ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) { - search_start = ins->objectid + num_bytes; - goto new_group; - } - if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start && ins->objectid < exclude_start + exclude_nr)) { search_start = exclude_start + exclude_nr; goto new_group; } - if (!(data & BTRFS_BLOCK_GROUP_DATA)) { - block_group = btrfs_lookup_block_group(info, ins->objectid); - if (block_group) - trans->block_group = block_group; - } + if (!(data & BTRFS_BLOCK_GROUP_DATA)) + trans->block_group = block_group; + ins->offset = num_bytes; if (last_ptr) { *last_ptr = ins->objectid + ins->offset; if (*last_ptr == - btrfs_super_total_bytes(&root->fs_info->super_copy)) { + btrfs_super_total_bytes(&root->fs_info->super_copy)) *last_ptr = 0; - } - } - return 0; - -new_group: - if (search_start + num_bytes >= search_end) { -enospc: - search_start = orig_search_start; - if (full_scan) { - ret = -ENOSPC; - goto error; - } - if (wrapped) { - if (!full_scan) - total_needed -= empty_size; - full_scan = 1; - } else - wrapped = 1; } - block_group = btrfs_lookup_first_block_group(info, search_start); - cond_resched(); - block_group = btrfs_find_block_group(root, block_group, - search_start, data, 0); - goto check_failed; + ret = 0; error: return ret; } +static void dump_space_info(struct btrfs_space_info *info, u64 bytes) +{ + struct btrfs_block_group_cache *cache; + struct list_head *l; + + printk(KERN_INFO "space_info has %Lu free, is %sfull\n", + info->total_bytes - info->bytes_used - info->bytes_pinned, + (info->full) ? "" : "not "); + + spin_lock(&info->lock); + list_for_each(l, &info->block_groups) { + cache = list_entry(l, struct btrfs_block_group_cache, list); + spin_lock(&cache->lock); + printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used " + "%Lu pinned\n", + cache->key.objectid, cache->key.offset, + btrfs_block_group_used(&cache->item), cache->pinned); + btrfs_dump_free_space(cache, bytes); + spin_unlock(&cache->lock); + } + spin_unlock(&info->lock); +} static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 num_bytes, u64 min_alloc_size, @@ -2133,6 +2171,7 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, u64 search_start = 0; u64 alloc_profile; struct btrfs_fs_info *info = root->fs_info; + struct btrfs_block_group_cache *cache; if (data) { alloc_profile = info->avail_data_alloc_bits & @@ -2160,11 +2199,9 @@ again: BTRFS_BLOCK_GROUP_METADATA | (info->metadata_alloc_profile & info->avail_metadata_alloc_bits), 0); - BUG_ON(ret); } ret = do_chunk_alloc(trans, root->fs_info->extent_root, num_bytes + 2 * 1024 * 1024, data, 0); - BUG_ON(ret); } WARN_ON(num_bytes < root->sectorsize); @@ -2175,26 +2212,44 @@ again: if (ret == -ENOSPC && num_bytes > min_alloc_size) { num_bytes = num_bytes >> 1; + num_bytes = num_bytes & ~(root->sectorsize - 1); num_bytes = max(num_bytes, min_alloc_size); do_chunk_alloc(trans, root->fs_info->extent_root, num_bytes, data, 1); goto again; } if (ret) { - printk("allocation failed flags %Lu\n", data); + struct btrfs_space_info *sinfo; + + sinfo = __find_space_info(root->fs_info, data); + printk("allocation failed flags %Lu, wanted %Lu\n", + data, num_bytes); + dump_space_info(sinfo, num_bytes); BUG(); } - clear_extent_dirty(&root->fs_info->free_space_cache, - ins->objectid, ins->objectid + ins->offset - 1, - GFP_NOFS); - return 0; + cache = btrfs_lookup_block_group(root->fs_info, ins->objectid); + if (!cache) { + printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid); + return -ENOSPC; + } + + ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset); + + return ret; } int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) { + struct btrfs_block_group_cache *cache; + maybe_lock_mutex(root); - set_extent_dirty(&root->fs_info->free_space_cache, - start, start + len - 1, GFP_NOFS); + cache = btrfs_lookup_block_group(root->fs_info, start); + if (!cache) { + printk(KERN_ERR "Unable to find block group for %Lu\n", start); + maybe_unlock_mutex(root); + return -ENOSPC; + } + btrfs_add_free_space(cache, start, len); maybe_unlock_mutex(root); return 0; } @@ -2264,8 +2319,8 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, ret = btrfs_insert_empty_items(trans, extent_root, path, keys, sizes, 2); - BUG_ON(ret); + extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); btrfs_set_extent_refs(path->nodes[0], extent_item, 1); @@ -2336,9 +2391,9 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); cache_block_group(root, block_group); - clear_extent_dirty(&root->fs_info->free_space_cache, - ins->objectid, ins->objectid + ins->offset - 1, - GFP_NOFS); + ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); + BUG_ON(ret); + ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, ref_generation, owner, owner_offset, ins); @@ -2843,31 +2898,24 @@ out: int btrfs_free_block_groups(struct btrfs_fs_info *info) { - u64 start; - u64 end; - u64 ptr; - int ret; + struct btrfs_block_group_cache *block_group; + struct rb_node *n; mutex_lock(&info->alloc_mutex); - while(1) { - ret = find_first_extent_bit(&info->block_group_cache, 0, - &start, &end, (unsigned int)-1); - if (ret) - break; - ret = get_state_private(&info->block_group_cache, start, &ptr); - if (!ret) - kfree((void *)(unsigned long)ptr); - clear_extent_bits(&info->block_group_cache, start, - end, (unsigned int)-1, GFP_NOFS); - } - while(1) { - ret = find_first_extent_bit(&info->free_space_cache, 0, - &start, &end, EXTENT_DIRTY); - if (ret) - break; - clear_extent_dirty(&info->free_space_cache, start, - end, GFP_NOFS); - } + spin_lock(&info->block_group_cache_lock); + while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { + block_group = rb_entry(n, struct btrfs_block_group_cache, + cache_node); + + btrfs_remove_free_space_cache(block_group); + rb_erase(&block_group->cache_node, + &info->block_group_cache_tree); + spin_lock(&block_group->space_info->lock); + list_del(&block_group->list); + spin_unlock(&block_group->space_info->lock); + kfree(block_group); + } + spin_unlock(&info->block_group_cache_lock); mutex_unlock(&info->alloc_mutex); return 0; } @@ -3386,7 +3434,6 @@ int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start) u64 total_found; u64 shrink_last_byte; struct btrfs_block_group_cache *shrink_block_group; - struct btrfs_fs_info *info = root->fs_info; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; @@ -3542,15 +3589,17 @@ next: goto out; } - clear_extent_bits(&info->block_group_cache, key.objectid, - key.objectid + key.offset - 1, - (unsigned int)-1, GFP_NOFS); - - - clear_extent_bits(&info->free_space_cache, - key.objectid, key.objectid + key.offset - 1, - (unsigned int)-1, GFP_NOFS); + spin_lock(&root->fs_info->block_group_cache_lock); + rb_erase(&shrink_block_group->cache_node, + &root->fs_info->block_group_cache_tree); + spin_unlock(&root->fs_info->block_group_cache_lock); + ret = btrfs_remove_free_space(shrink_block_group, key.objectid, + key.offset); + if (ret) { + btrfs_end_transaction(trans, root); + goto out; + } /* memset(shrink_block_group, 0, sizeof(*shrink_block_group)); kfree(shrink_block_group); @@ -3566,9 +3615,9 @@ next: /* the code to unpin extents might set a few bits in the free * space cache for this range again */ - clear_extent_bits(&info->free_space_cache, - key.objectid, key.objectid + key.offset - 1, - (unsigned int)-1, GFP_NOFS); + /* XXX? */ + ret = btrfs_remove_free_space(shrink_block_group, key.objectid, + key.offset); out: btrfs_free_path(path); mutex_unlock(&root->fs_info->alloc_mutex); @@ -3616,16 +3665,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) { struct btrfs_path *path; int ret; - int bit; struct btrfs_block_group_cache *cache; struct btrfs_fs_info *info = root->fs_info; struct btrfs_space_info *space_info; - struct extent_io_tree *block_group_cache; struct btrfs_key key; struct btrfs_key found_key; struct extent_buffer *leaf; - block_group_cache = &info->block_group_cache; root = info->extent_root; key.objectid = 0; key.offset = 0; @@ -3653,6 +3699,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) } spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->list); read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), sizeof(cache->item)); @@ -3661,31 +3708,19 @@ int btrfs_read_block_groups(struct btrfs_root *root) key.objectid = found_key.objectid + found_key.offset; btrfs_release_path(root, path); cache->flags = btrfs_block_group_flags(&cache->item); - bit = 0; - if (cache->flags & BTRFS_BLOCK_GROUP_DATA) { - bit = BLOCK_GROUP_DATA; - } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { - bit = BLOCK_GROUP_SYSTEM; - } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) { - bit = BLOCK_GROUP_METADATA; - } - set_avail_alloc_bits(info, cache->flags); ret = update_space_info(info, cache->flags, found_key.offset, btrfs_block_group_used(&cache->item), &space_info); BUG_ON(ret); cache->space_info = space_info; + spin_lock(&space_info->lock); + list_add(&cache->list, &space_info->block_groups); + spin_unlock(&space_info->lock); + + ret = btrfs_add_block_group_cache(root->fs_info, cache); + BUG_ON(ret); - /* use EXTENT_LOCKED to prevent merging */ - set_extent_bits(block_group_cache, found_key.objectid, - found_key.objectid + found_key.offset - 1, - EXTENT_LOCKED, GFP_NOFS); - set_state_private(block_group_cache, found_key.objectid, - (unsigned long)cache); - set_extent_bits(block_group_cache, found_key.objectid, - found_key.objectid + found_key.offset - 1, - bit | EXTENT_LOCKED, GFP_NOFS); if (key.objectid >= btrfs_super_total_bytes(&info->super_copy)) break; @@ -3703,22 +3738,22 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 size) { int ret; - int bit = 0; struct btrfs_root *extent_root; struct btrfs_block_group_cache *cache; - struct extent_io_tree *block_group_cache; WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); extent_root = root->fs_info->extent_root; - block_group_cache = &root->fs_info->block_group_cache; root->fs_info->last_trans_new_blockgroup = trans->transid; cache = kzalloc(sizeof(*cache), GFP_NOFS); - BUG_ON(!cache); + if (!cache) + return -ENOMEM; + cache->key.objectid = chunk_offset; cache->key.offset = size; spin_lock_init(&cache->lock); + INIT_LIST_HEAD(&cache->list); btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY); btrfs_set_block_group_used(&cache->item, bytes_used); @@ -3729,16 +3764,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ret = update_space_info(root->fs_info, cache->flags, size, bytes_used, &cache->space_info); BUG_ON(ret); + spin_lock(&cache->space_info->lock); + list_add(&cache->list, &cache->space_info->block_groups); + spin_unlock(&cache->space_info->lock); - bit = block_group_state_bits(type); - set_extent_bits(block_group_cache, chunk_offset, - chunk_offset + size - 1, - EXTENT_LOCKED, GFP_NOFS); - set_state_private(block_group_cache, chunk_offset, - (unsigned long)cache); - set_extent_bits(block_group_cache, chunk_offset, - chunk_offset + size - 1, - bit | EXTENT_LOCKED, GFP_NOFS); + ret = btrfs_add_block_group_cache(root->fs_info, cache); + BUG_ON(ret); ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, sizeof(cache->item)); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 319a0c7a4a5..8624f3e8803 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2634,6 +2634,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, if (eb) { atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); + mark_page_accessed(eb->first_page); return eb; } spin_unlock(&tree->buffer_lock); @@ -2713,6 +2714,9 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); + if (eb) + mark_page_accessed(eb->first_page); + return eb; } EXPORT_SYMBOL(find_extent_buffer); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c new file mode 100644 index 00000000000..01c26e8ae55 --- /dev/null +++ b/fs/btrfs/free-space-cache.c @@ -0,0 +1,415 @@ +/* + * Copyright (C) 2008 Red Hat. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include "ctree.h" + +static int tree_insert_offset(struct rb_root *root, u64 offset, + struct rb_node *node) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btrfs_free_space *info; + + while (*p) { + parent = *p; + info = rb_entry(parent, struct btrfs_free_space, offset_index); + + if (offset < info->offset) + p = &(*p)->rb_left; + else if (offset > info->offset) + p = &(*p)->rb_right; + else + return -EEXIST; + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); + + return 0; +} + +static int tree_insert_bytes(struct rb_root *root, u64 bytes, + struct rb_node *node) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btrfs_free_space *info; + + while (*p) { + parent = *p; + info = rb_entry(parent, struct btrfs_free_space, bytes_index); + + if (bytes < info->bytes) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + rb_link_node(node, parent, p); + rb_insert_color(node, root); + + return 0; +} + +/* + * searches the tree for the given offset. If contains is set we will return + * the free space that contains the given offset. If contains is not set we + * will return the free space that starts at or after the given offset and is + * at least bytes long. + */ +static struct btrfs_free_space *tree_search_offset(struct rb_root *root, + u64 offset, u64 bytes, + int contains) +{ + struct rb_node *n = root->rb_node; + struct btrfs_free_space *entry, *ret = NULL; + + while (n) { + entry = rb_entry(n, struct btrfs_free_space, offset_index); + + if (offset < entry->offset) { + if (!contains && + (!ret || entry->offset < ret->offset) && + (bytes <= entry->bytes)) + ret = entry; + n = n->rb_left; + } else if (offset > entry->offset) { + if (contains && + (entry->offset + entry->bytes - 1) >= offset) { + ret = entry; + break; + } + n = n->rb_right; + } else { + if (bytes > entry->bytes) { + n = n->rb_right; + continue; + } + ret = entry; + break; + } + } + + return ret; +} + +/* + * return a chunk at least bytes size, as close to offset that we can get. + */ +static struct btrfs_free_space *tree_search_bytes(struct rb_root *root, + u64 offset, u64 bytes) +{ + struct rb_node *n = root->rb_node; + struct btrfs_free_space *entry, *ret = NULL; + + while (n) { + entry = rb_entry(n, struct btrfs_free_space, bytes_index); + + if (bytes < entry->bytes) { + /* + * We prefer to get a hole size as close to the size we + * are asking for so we don't take small slivers out of + * huge holes, but we also want to get as close to the + * offset as possible so we don't have a whole lot of + * fragmentation. + */ + if (offset <= entry->offset) { + if (!ret) + ret = entry; + else if (entry->bytes < ret->bytes) + ret = entry; + else if (entry->offset < ret->offset) + ret = entry; + } + n = n->rb_left; + } else if (bytes > entry->bytes) { + n = n->rb_right; + } else { + /* + * Ok we may have multiple chunks of the wanted size, + * so we don't want to take the first one we find, we + * want to take the one closest to our given offset, so + * keep searching just in case theres a better match. + */ + n = n->rb_right; + if (offset > entry->offset) + continue; + else if (!ret || entry->offset < ret->offset) + ret = entry; + } + } + + return ret; +} + +static void unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + rb_erase(&info->offset_index, &block_group->free_space_offset); + rb_erase(&info->bytes_index, &block_group->free_space_bytes); +} + +static int link_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + int ret = 0; + + + ret = tree_insert_offset(&block_group->free_space_offset, info->offset, + &info->offset_index); + if (ret) + return ret; + + ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes, + &info->bytes_index); + if (ret) + return ret; + + return ret; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *right_info; + struct btrfs_free_space *left_info; + struct btrfs_free_space *info = NULL; + struct btrfs_free_space *alloc_info; + int ret = 0; + + alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!alloc_info) + return -ENOMEM; + + /* + * first we want to see if there is free space adjacent to the range we + * are adding, if there is remove that struct and add a new one to + * cover the entire range + */ + spin_lock(&block_group->lock); + + right_info = tree_search_offset(&block_group->free_space_offset, + offset+bytes, 0, 1); + left_info = tree_search_offset(&block_group->free_space_offset, + offset-1, 0, 1); + + if (right_info && right_info->offset == offset+bytes) { + unlink_free_space(block_group, right_info); + info = right_info; + info->offset = offset; + info->bytes += bytes; + } else if (right_info && right_info->offset != offset+bytes) { + printk(KERN_ERR "adding space in the middle of an existing " + "free space area. existing: offset=%Lu, bytes=%Lu. " + "new: offset=%Lu, bytes=%Lu\n", right_info->offset, + right_info->bytes, offset, bytes); + BUG(); + } + + if (left_info) { + unlink_free_space(block_group, left_info); + + if (unlikely((left_info->offset + left_info->bytes) != + offset)) { + printk(KERN_ERR "free space to the left of new free " + "space isn't quite right. existing: offset=%Lu," + " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n", + left_info->offset, left_info->bytes, offset, + bytes); + BUG(); + } + + if (info) { + info->offset = left_info->offset; + info->bytes += left_info->bytes; + kfree(left_info); + } else { + info = left_info; + info->bytes += bytes; + } + } + + if (info) { + ret = link_free_space(block_group, info); + if (!ret) + info = NULL; + goto out; + } + + info = alloc_info; + alloc_info = NULL; + info->offset = offset; + info->bytes = bytes; + + ret = link_free_space(block_group, info); + if (ret) + kfree(info); +out: + spin_unlock(&block_group->lock); + if (ret) { + printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); + if (ret == -EEXIST) + BUG(); + } + + if (alloc_info) + kfree(alloc_info); + + return ret; +} + +int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *info; + int ret = 0; + + spin_lock(&block_group->lock); + info = tree_search_offset(&block_group->free_space_offset, offset, 0, + 1); + + if (info && info->offset == offset) { + if (info->bytes < bytes) { + printk(KERN_ERR "Found free space at %Lu, size %Lu," + "trying to use %Lu\n", + info->offset, info->bytes, bytes); + WARN_ON(1); + ret = -EINVAL; + goto out; + } + + unlink_free_space(block_group, info); + + if (info->bytes == bytes) { + kfree(info); + goto out; + } + + info->offset += bytes; + info->bytes -= bytes; + + ret = link_free_space(block_group, info); + BUG_ON(ret); + } else { + WARN_ON(1); + } +out: + spin_unlock(&block_group->lock); + return ret; +} + +void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, + u64 bytes) +{ + struct btrfs_free_space *info; + struct rb_node *n; + int count = 0; + + for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { + info = rb_entry(n, struct btrfs_free_space, offset_index); + if (info->bytes >= bytes) + count++; + //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset, + // info->bytes); + } + printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" + "\n", count); +} + +u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space *info; + struct rb_node *n; + u64 ret = 0; + + for (n = rb_first(&block_group->free_space_offset); n; + n = rb_next(n)) { + info = rb_entry(n, struct btrfs_free_space, offset_index); + ret += info->bytes; + } + + return ret; +} + +void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) +{ + struct btrfs_free_space *info; + struct rb_node *node; + + spin_lock(&block_group->lock); + while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { + info = rb_entry(node, struct btrfs_free_space, bytes_index); + unlink_free_space(block_group, info); + kfree(info); + if (need_resched()) { + spin_unlock(&block_group->lock); + cond_resched(); + spin_lock(&block_group->lock); + } + } + spin_unlock(&block_group->lock); +} + +struct btrfs_free_space *btrfs_find_free_space_offset(struct + btrfs_block_group_cache + *block_group, u64 offset, + u64 bytes) +{ + struct btrfs_free_space *ret; + + spin_lock(&block_group->lock); + ret = tree_search_offset(&block_group->free_space_offset, offset, + bytes, 0); + spin_unlock(&block_group->lock); + + return ret; +} + +struct btrfs_free_space *btrfs_find_free_space_bytes(struct + btrfs_block_group_cache + *block_group, u64 offset, + u64 bytes) +{ + struct btrfs_free_space *ret; + + spin_lock(&block_group->lock); + + ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); + spin_unlock(&block_group->lock); + + return ret; +} + +struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache + *block_group, u64 offset, + u64 bytes) +{ + struct btrfs_free_space *ret; + + spin_lock(&block_group->lock); + ret = tree_search_offset(&block_group->free_space_offset, offset, + bytes, 0); + if (!ret) + ret = tree_search_bytes(&block_group->free_space_bytes, + offset, bytes); + + spin_unlock(&block_group->lock); + + return ret; +} diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 12c1c0530f3..65b4f864b0d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -141,7 +141,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) while(num_bytes > 0) { cur_alloc_size = min(num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - root->sectorsize, 0, 0, + root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); if (ret) { WARN_ON(1); @@ -558,7 +558,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans->transid, inode->i_ino, ordered_extent->file_offset, &ins); BUG_ON(ret); - mutex_lock(&BTRFS_I(inode)->extent_mutex); ret = btrfs_drop_extents(trans, root, inode, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 1546fa6f4f7..b9e5c2d82dd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -64,8 +64,8 @@ static void lock_chunks(struct btrfs_root *root) static void unlock_chunks(struct btrfs_root *root) { - mutex_unlock(&root->fs_info->alloc_mutex); mutex_unlock(&root->fs_info->chunk_mutex); + mutex_unlock(&root->fs_info->alloc_mutex); } int btrfs_cleanup_fs_uuids(void) @@ -1668,8 +1668,13 @@ again: else min_free = calc_size; - /* we add 1MB because we never use the first 1MB of the device */ - min_free += 1024 * 1024; + /* + * we add 1MB because we never use the first 1MB of the device, unless + * we've looped, then we are likely allocating the maximum amount of + * space left already + */ + if (!looped) + min_free += 1024 * 1024; /* build a private list of devices we will allocate from */ while(index < num_stripes) { -- cgit v1.2.3-70-g09d2 From 31840ae1a6b433ca0e6a8d341756ff478bbf959e Mon Sep 17 00:00:00 2001 From: Zheng Yan Date: Tue, 23 Sep 2008 13:14:14 -0400 Subject: Btrfs: Full back reference support This patch makes the back reference system to explicit record the location of parent node for all types of extents. The location of parent node is placed into the offset field of backref key. Every time a tree block is balanced, the back references for the affected lower level extents are updated. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 228 ++++++---- fs/btrfs/ctree.h | 62 ++- fs/btrfs/disk-io.c | 4 +- fs/btrfs/extent-tree.c | 1143 +++++++++++++++++++++++++++++------------------- fs/btrfs/extent_io.c | 3 +- fs/btrfs/file.c | 121 +++-- fs/btrfs/inode.c | 57 ++- fs/btrfs/ioctl.c | 57 +-- fs/btrfs/print-tree.c | 5 +- fs/btrfs/tree-log.c | 108 ++--- 10 files changed, 1066 insertions(+), 722 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6f467901246..50aea8cb653 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -125,7 +125,6 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, u32 nritems; int ret = 0; int level; - struct btrfs_key first_key; struct btrfs_root *new_root; new_root = kmalloc(sizeof(*new_root), GFP_NOFS); @@ -141,18 +140,10 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); - if (nritems) { - if (level == 0) - btrfs_item_key_to_cpu(buf, &first_key, 0); - else - btrfs_node_key_to_cpu(buf, &first_key, 0); - } else { - first_key.objectid = 0; - } - cow = btrfs_alloc_free_block(trans, new_root, buf->len, - new_root_objectid, - trans->transid, first_key.objectid, - level, buf->start, 0); + + cow = btrfs_alloc_free_block(trans, new_root, buf->len, 0, + new_root_objectid, trans->transid, + level, buf->start, 0); if (IS_ERR(cow)) { kfree(new_root); return PTR_ERR(cow); @@ -165,7 +156,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN); WARN_ON(btrfs_header_generation(buf) > trans->transid); - ret = btrfs_inc_ref(trans, new_root, buf, 0); + ret = btrfs_inc_ref(trans, new_root, buf, cow, NULL); kfree(new_root); if (ret) @@ -184,39 +175,31 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, u64 search_start, u64 empty_size, u64 prealloc_dest) { - u64 root_gen; + u64 parent_start; struct extent_buffer *cow; u32 nritems; int ret = 0; int different_trans = 0; int level; int unlock_orig = 0; - struct btrfs_key first_key; if (*cow_ret == buf) unlock_orig = 1; WARN_ON(!btrfs_tree_locked(buf)); - if (root->ref_cows) { - root_gen = trans->transid; - } else { - root_gen = 0; - } + if (parent) + parent_start = parent->start; + else + parent_start = 0; + WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); - if (nritems) { - if (level == 0) - btrfs_item_key_to_cpu(buf, &first_key, 0); - else - btrfs_node_key_to_cpu(buf, &first_key, 0); - } else { - first_key.objectid = 0; - } + if (prealloc_dest) { struct btrfs_key ins; @@ -224,19 +207,19 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, ins.offset = buf->len; ins.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_alloc_reserved_extent(trans, root, + ret = btrfs_alloc_reserved_extent(trans, root, parent_start, root->root_key.objectid, - root_gen, level, - first_key.objectid, + trans->transid, level, 0, &ins); BUG_ON(ret); cow = btrfs_init_new_buffer(trans, root, prealloc_dest, buf->len); } else { cow = btrfs_alloc_free_block(trans, root, buf->len, + parent_start, root->root_key.objectid, - root_gen, first_key.objectid, - level, search_start, empty_size); + trans->transid, level, + search_start, empty_size); } if (IS_ERR(cow)) return PTR_ERR(cow); @@ -249,17 +232,23 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, WARN_ON(btrfs_header_generation(buf) > trans->transid); if (btrfs_header_generation(buf) != trans->transid) { + u32 nr_extents; different_trans = 1; - ret = btrfs_inc_ref(trans, root, buf, 1); + ret = btrfs_inc_ref(trans, root, buf, cow, &nr_extents); if (ret) return ret; + + ret = btrfs_cache_ref(trans, root, buf, nr_extents); + WARN_ON(ret); } else { + ret = btrfs_update_ref(trans, root, buf, cow, 0, nritems); + if (ret) + return ret; clean_tree_block(trans, root, buf); } if (buf == root->node) { WARN_ON(parent && parent != buf); - root_gen = btrfs_header_generation(buf); spin_lock(&root->node_lock); root->node = cow; @@ -268,13 +257,14 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, if (buf != root->commit_root) { btrfs_free_extent(trans, root, buf->start, - buf->len, root->root_key.objectid, - root_gen, 0, 0, 1); + buf->len, buf->start, + root->root_key.objectid, + btrfs_header_generation(buf), + 0, 0, 1); } free_extent_buffer(buf); add_root_to_dirty_list(root); } else { - root_gen = btrfs_header_generation(parent); btrfs_set_node_blockptr(parent, parent_slot, cow->start); WARN_ON(trans->transid == 0); @@ -283,8 +273,8 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(parent); WARN_ON(btrfs_header_generation(parent) != trans->transid); btrfs_free_extent(trans, root, buf->start, buf->len, - btrfs_header_owner(parent), root_gen, - 0, 0, 1); + parent_start, btrfs_header_owner(parent), + btrfs_header_generation(parent), 0, 0, 1); } if (unlock_orig) btrfs_tree_unlock(buf); @@ -831,6 +821,12 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, root->node = child; spin_unlock(&root->node_lock); + ret = btrfs_update_extent_ref(trans, root, child->start, + mid->start, child->start, + root->root_key.objectid, + trans->transid, level - 1, 0); + BUG_ON(ret); + add_root_to_dirty_list(root); btrfs_tree_unlock(child); path->locks[level] = 0; @@ -840,7 +836,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, /* once for the path */ free_extent_buffer(mid); ret = btrfs_free_extent(trans, root, mid->start, mid->len, - root->root_key.objectid, + mid->start, root->root_key.objectid, btrfs_header_generation(mid), 0, 0, 1); /* once for the root ptr */ free_extent_buffer(mid); @@ -905,7 +901,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, if (wret) ret = wret; wret = btrfs_free_extent(trans, root, bytenr, - blocksize, + blocksize, parent->start, btrfs_header_owner(parent), generation, 0, 0, 1); if (wret) @@ -954,6 +950,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, if (wret) ret = wret; wret = btrfs_free_extent(trans, root, bytenr, blocksize, + parent->start, btrfs_header_owner(parent), root_gen, 0, 0, 1); if (wret) @@ -1499,6 +1496,41 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans, return ret; } +/* + * update item key. + * + * This function isn't completely safe. It's the caller's responsibility + * that the new key won't break the order + */ +int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *new_key) +{ + struct btrfs_disk_key disk_key; + struct extent_buffer *eb; + int slot; + + eb = path->nodes[0]; + slot = path->slots[0]; + if (slot > 0) { + btrfs_item_key(eb, &disk_key, slot - 1); + if (comp_keys(&disk_key, new_key) >= 0) + return -1; + } + if (slot < btrfs_header_nritems(eb) - 1) { + btrfs_item_key(eb, &disk_key, slot + 1); + if (comp_keys(&disk_key, new_key) <= 0) + return -1; + } + + btrfs_cpu_key_to_disk(&disk_key, new_key); + btrfs_set_item_key(eb, &disk_key, slot); + btrfs_mark_buffer_dirty(eb); + if (slot == 0) + fixup_low_keys(trans, root, path, &disk_key, 1); + return 0; +} + /* * try to push data from one node into the next node left in the * tree. @@ -1558,6 +1590,10 @@ static int push_node_left(struct btrfs_trans_handle *trans, btrfs_set_header_nritems(dst, dst_nritems + push_items); btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); + + ret = btrfs_update_ref(trans, root, src, dst, dst_nritems, push_items); + BUG_ON(ret); + return ret; } @@ -1619,6 +1655,10 @@ static int balance_node_right(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(src); btrfs_mark_buffer_dirty(dst); + + ret = btrfs_update_ref(trans, root, src, dst, 0, push_items); + BUG_ON(ret); + return ret; } @@ -1633,30 +1673,24 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { - u64 root_gen; u64 lower_gen; struct extent_buffer *lower; struct extent_buffer *c; struct extent_buffer *old; struct btrfs_disk_key lower_key; + int ret; BUG_ON(path->nodes[level]); BUG_ON(path->nodes[level-1] != root->node); - if (root->ref_cows) - root_gen = trans->transid; - else - root_gen = 0; - lower = path->nodes[level-1]; if (level == 1) btrfs_item_key(lower, &lower_key, 0); else btrfs_node_key(lower, &lower_key, 0); - c = btrfs_alloc_free_block(trans, root, root->nodesize, - root->root_key.objectid, - root_gen, le64_to_cpu(lower_key.objectid), + c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, + root->root_key.objectid, trans->transid, level, root->node->start, 0); if (IS_ERR(c)) return PTR_ERR(c); @@ -1679,7 +1713,7 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, btrfs_set_node_key(c, &lower_key, 0); btrfs_set_node_blockptr(c, 0, lower->start); lower_gen = btrfs_header_generation(lower); - WARN_ON(lower_gen == 0); + WARN_ON(lower_gen != trans->transid); btrfs_set_node_ptr_generation(c, 0, lower_gen); @@ -1690,6 +1724,12 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, root->node = c; spin_unlock(&root->node_lock); + ret = btrfs_update_extent_ref(trans, root, lower->start, + lower->start, c->start, + root->root_key.objectid, + trans->transid, level - 1, 0); + BUG_ON(ret); + /* the super has an extra ref to root->node */ free_extent_buffer(old); @@ -1698,20 +1738,6 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, path->nodes[level] = c; path->locks[level] = 1; path->slots[level] = 0; - - if (root->ref_cows && lower_gen != trans->transid) { - struct btrfs_path *back_path = btrfs_alloc_path(); - int ret; - mutex_lock(&root->fs_info->alloc_mutex); - ret = btrfs_insert_extent_backref(trans, - root->fs_info->extent_root, - path, lower->start, - root->root_key.objectid, - trans->transid, 0, 0); - BUG_ON(ret); - mutex_unlock(&root->fs_info->alloc_mutex); - btrfs_free_path(back_path); - } return 0; } @@ -1766,7 +1792,6 @@ static noinline int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { - u64 root_gen; struct extent_buffer *c; struct extent_buffer *split; struct btrfs_disk_key disk_key; @@ -1793,17 +1818,11 @@ static noinline int split_node(struct btrfs_trans_handle *trans, } c_nritems = btrfs_header_nritems(c); - if (root->ref_cows) - root_gen = trans->transid; - else - root_gen = 0; - btrfs_node_key(c, &disk_key, 0); split = btrfs_alloc_free_block(trans, root, root->nodesize, - root->root_key.objectid, - root_gen, - btrfs_disk_key_objectid(&disk_key), - level, c->start, 0); + path->nodes[level + 1]->start, + root->root_key.objectid, + trans->transid, level, c->start, 0); if (IS_ERR(split)) return PTR_ERR(split); @@ -1840,6 +1859,9 @@ static noinline int split_node(struct btrfs_trans_handle *trans, if (wret) ret = wret; + ret = btrfs_update_ref(trans, root, c, split, 0, c_nritems - mid); + BUG_ON(ret); + if (path->slots[level] >= mid) { path->slots[level] -= mid; btrfs_tree_unlock(c); @@ -1955,10 +1977,23 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root else nr = 1; + if (path->slots[0] >= left_nritems) + push_space += data_size + sizeof(*item); + i = left_nritems - 1; while (i >= nr) { item = btrfs_item_nr(left, i); + if (!empty && push_items > 0) { + if (path->slots[0] > i) + break; + if (path->slots[0] == i) { + int space = btrfs_leaf_free_space(root, left); + if (space + push_space * 2 > free_space) + break; + } + } + if (path->slots[0] == i) push_space += data_size + sizeof(*item); @@ -1973,6 +2008,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root this_item_size = btrfs_item_size(left, item); if (this_item_size + sizeof(*item) + push_space > free_space) break; + push_items++; push_space += this_item_size + sizeof(*item); if (i == 0) @@ -2046,6 +2082,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_mark_buffer_dirty(left); btrfs_mark_buffer_dirty(right); + ret = btrfs_update_ref(trans, root, left, right, 0, push_items); + BUG_ON(ret); + btrfs_item_key(right, &disk_key, 0); btrfs_set_node_key(upper, &disk_key, slot + 1); btrfs_mark_buffer_dirty(upper); @@ -2147,6 +2186,16 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root KM_USER1); } + if (!empty && push_items > 0) { + if (path->slots[0] < i) + break; + if (path->slots[0] == i) { + int space = btrfs_leaf_free_space(root, right); + if (space + push_space * 2 > free_space) + break; + } + } + if (path->slots[0] == i) push_space += data_size + sizeof(*item); @@ -2255,6 +2304,10 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root if (right_nritems) btrfs_mark_buffer_dirty(right); + ret = btrfs_update_ref(trans, root, right, left, + old_left_nritems, push_items); + BUG_ON(ret); + btrfs_item_key(right, &disk_key, 0); wret = fixup_low_keys(trans, root, path, &disk_key, 1); if (wret) @@ -2294,7 +2347,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_path *path, int data_size, int extend) { - u64 root_gen; struct extent_buffer *l; u32 nritems; int mid; @@ -2313,11 +2365,6 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (extend) space_needed = data_size; - if (root->ref_cows) - root_gen = trans->transid; - else - root_gen = 0; - /* first try to make some room by pushing left and right */ if (ins_key->type != BTRFS_DIR_ITEM_KEY) { wret = push_leaf_right(trans, root, path, data_size, 0); @@ -2348,13 +2395,10 @@ again: nritems = btrfs_header_nritems(l); mid = (nritems + 1)/ 2; - btrfs_item_key(l, &disk_key, 0); - right = btrfs_alloc_free_block(trans, root, root->leafsize, - root->root_key.objectid, - root_gen, - le64_to_cpu(disk_key.objectid), - 0, l->start, 0); + path->nodes[1]->start, + root->root_key.objectid, + trans->transid, 0, l->start, 0); if (IS_ERR(right)) { BUG_ON(1); return PTR_ERR(right); @@ -2485,6 +2529,9 @@ again: btrfs_mark_buffer_dirty(l); BUG_ON(path->slots[0] != slot); + ret = btrfs_update_ref(trans, root, l, right, 0, nritems); + BUG_ON(ret); + if (mid <= slot) { btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); @@ -2956,6 +3003,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, ret = wret; wret = btrfs_free_extent(trans, root, leaf->start, leaf->len, + path->nodes[1]->start, btrfs_header_owner(path->nodes[1]), root_gen, 0, 0, 1); if (wret) @@ -3007,7 +3055,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, free_extent_buffer(leaf); wret = btrfs_free_extent(trans, root, bytenr, - blocksize, + blocksize, path->nodes[1]->start, btrfs_header_owner(path->nodes[1]), root_gen, 0, 0, 1); if (wret) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 730aae3bc18..138c157bbc4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -40,7 +40,7 @@ extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_path_cachep; struct btrfs_ordered_sum; -#define BTRFS_MAGIC "_B8RfS_M" +#define BTRFS_MAGIC "_B9RfS_M" #define BTRFS_ACL_NOT_CACHED ((void *)-1) @@ -81,6 +81,9 @@ struct btrfs_ordered_sum; #define BTRFS_TREE_LOG_OBJECTID -6ULL #define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL +/* dummy objectid represents multiple objectids */ +#define BTRFS_MULTIPLE_OBJECTIDS -255ULL + /* * All files have objectids in this range. */ @@ -369,6 +372,7 @@ struct btrfs_extent_ref { __le64 generation; __le64 objectid; __le64 offset; + __le32 num_refs; } __attribute__ ((__packed__)); /* dev extents record free space on individual devices. The owner @@ -1047,9 +1051,6 @@ btrfs_inode_otime(struct btrfs_inode_item *inode_item) BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); -/* struct btrfs_extent_item */ -BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32); - /* struct btrfs_dev_extent */ BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64); @@ -1070,14 +1071,20 @@ BTRFS_SETGET_FUNCS(ref_root, struct btrfs_extent_ref, root, 64); BTRFS_SETGET_FUNCS(ref_generation, struct btrfs_extent_ref, generation, 64); BTRFS_SETGET_FUNCS(ref_objectid, struct btrfs_extent_ref, objectid, 64); BTRFS_SETGET_FUNCS(ref_offset, struct btrfs_extent_ref, offset, 64); +BTRFS_SETGET_FUNCS(ref_num_refs, struct btrfs_extent_ref, num_refs, 32); BTRFS_SETGET_STACK_FUNCS(stack_ref_root, struct btrfs_extent_ref, root, 64); BTRFS_SETGET_STACK_FUNCS(stack_ref_generation, struct btrfs_extent_ref, generation, 64); BTRFS_SETGET_STACK_FUNCS(stack_ref_objectid, struct btrfs_extent_ref, objectid, 64); -BTRFS_SETGET_STACK_FUNCS(stack_ref_offset, struct btrfs_extent_ref, offset, 64); +BTRFS_SETGET_STACK_FUNCS(stack_ref_offset, struct btrfs_extent_ref, + offset, 64); +BTRFS_SETGET_STACK_FUNCS(stack_ref_num_refs, struct btrfs_extent_ref, + num_refs, 32); +/* struct btrfs_extent_item */ +BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32); BTRFS_SETGET_STACK_FUNCS(stack_extent_refs, struct btrfs_extent_item, refs, 32); @@ -1474,8 +1481,7 @@ static inline struct dentry *fdentry(struct file *file) { } /* extent-tree.c */ -int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path, - u64 start, u64 len); +int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_update_pinned_extents(struct btrfs_root *root, u64 bytenr, u64 num, int pin); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, @@ -1495,10 +1501,9 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, int data, int owner); struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u32 blocksize, + u32 blocksize, u64 parent, u64 root_objectid, u64 ref_generation, - u64 first_objectid, int level, u64 hint, u64 empty_size); @@ -1508,23 +1513,24 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 new_size); int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct btrfs_path *path, u64 bytenr, + struct btrfs_path *path, + u64 bytenr, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset); int btrfs_alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 num_bytes, u64 min_bytes, + u64 num_bytes, u64 parent, u64 min_bytes, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, u64 empty_size, u64 hint_byte, u64 search_end, struct btrfs_key *ins, u64 data); int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, struct btrfs_key *ins); int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, struct btrfs_key *ins); @@ -1535,9 +1541,16 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, u64 search_end, struct btrfs_key *ins, u64 data); int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct extent_buffer *buf, int cache_ref); -int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root - *root, u64 bytenr, u64 num_bytes, + struct extent_buffer *orig_buf, struct extent_buffer *buf, + u32 *nr_extents); +int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct extent_buffer *buf, u32 nr_extents); +int btrfs_update_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct extent_buffer *orig_buf, + struct extent_buffer *buf, int start_slot, int nr); +int btrfs_free_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner_objectid, u64 owner_offset, int pin); int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); @@ -1545,10 +1558,15 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_io_tree *unpin); int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 bytenr, u64 num_bytes, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset); + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset); +int btrfs_update_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 orig_parent, u64 parent, + u64 root_objectid, u64 ref_generation, + u64 owner, u64 owner_offset); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_free_block_groups(struct btrfs_fs_info *info); @@ -1561,7 +1579,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_previous_item(struct btrfs_root *root, struct btrfs_path *path, u64 min_objectid, int type); - +int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct btrfs_path *path, + struct btrfs_key *new_key); struct extent_buffer *btrfs_root_node(struct btrfs_root *root); struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 25be96946a2..d35ca6a3f51 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -882,8 +882,8 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, root->ref_cows = 0; root->node = btrfs_alloc_free_block(trans, root, root->leafsize, - BTRFS_TREE_LOG_OBJECTID, - 0, 0, 0, 0, 0); + 0, BTRFS_TREE_LOG_OBJECTID, + trans->transid, 0, 0, 0); btrfs_set_header_nritems(root->node, 0); btrfs_set_header_level(root->node, 0); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 558fbe40736..5258923d621 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -29,6 +29,21 @@ #include "locking.h" #include "ref-cache.h" +#define PENDING_EXTENT_INSERT 0 +#define PENDING_EXTENT_DELETE 1 +#define PENDING_BACKREF_UPDATE 2 + +struct pending_extent_op { + int type; + u64 bytenr; + u64 num_bytes; + u64 parent; + u64 orig_parent; + u64 generation; + u64 orig_generation; + int level; +}; + static int finish_current_insert(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root); static int del_pending_extents(struct btrfs_trans_handle *trans, struct @@ -487,48 +502,15 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root, return ret; } -static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset) -{ - u32 high_crc = ~(u32)0; - u32 low_crc = ~(u32)0; - __le64 lenum; - lenum = cpu_to_le64(root_objectid); - high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum)); - lenum = cpu_to_le64(ref_generation); - low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); - if (owner >= BTRFS_FIRST_FREE_OBJECTID) { - lenum = cpu_to_le64(owner); - low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); - lenum = cpu_to_le64(owner_offset); - low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum)); - } - return ((u64)high_crc << 32) | (u64)low_crc; -} - -static int match_extent_ref(struct extent_buffer *leaf, - struct btrfs_extent_ref *disk_ref, - struct btrfs_extent_ref *cpu_ref) -{ - int ret; - int len; - - if (cpu_ref->objectid) - len = sizeof(*cpu_ref); - else - len = 2 * sizeof(u64); - ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref, - len); - return ret == 0; -} - /* simple helper to search for an existing extent at a given offset */ -int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path, - u64 start, u64 len) +int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) { int ret; struct btrfs_key key; + struct btrfs_path *path; + path = btrfs_alloc_path(); + BUG_ON(!path); maybe_lock_mutex(root); key.objectid = start; key.offset = len; @@ -536,72 +518,7 @@ int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path, ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 0, 0); maybe_unlock_mutex(root); - return ret; -} - -static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, u64 bytenr, - u64 root_objectid, - u64 ref_generation, u64 owner, - u64 owner_offset, int del) -{ - u64 hash; - struct btrfs_key key; - struct btrfs_key found_key; - struct btrfs_extent_ref ref; - struct extent_buffer *leaf; - struct btrfs_extent_ref *disk_ref; - int ret; - int ret2; - - btrfs_set_stack_ref_root(&ref, root_objectid); - btrfs_set_stack_ref_generation(&ref, ref_generation); - btrfs_set_stack_ref_objectid(&ref, owner); - btrfs_set_stack_ref_offset(&ref, owner_offset); - - hash = hash_extent_ref(root_objectid, ref_generation, owner, - owner_offset); - key.offset = hash; - key.objectid = bytenr; - key.type = BTRFS_EXTENT_REF_KEY; - - while (1) { - ret = btrfs_search_slot(trans, root, &key, path, - del ? -1 : 0, del); - if (ret < 0) - goto out; - leaf = path->nodes[0]; - if (ret != 0) { - u32 nritems = btrfs_header_nritems(leaf); - if (path->slots[0] >= nritems) { - ret2 = btrfs_next_leaf(root, path); - if (ret2) - goto out; - leaf = path->nodes[0]; - } - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != bytenr || - found_key.type != BTRFS_EXTENT_REF_KEY) - goto out; - key.offset = found_key.offset; - if (del) { - btrfs_release_path(root, path); - continue; - } - } - disk_ref = btrfs_item_ptr(path->nodes[0], - path->slots[0], - struct btrfs_extent_ref); - if (match_extent_ref(path->nodes[0], disk_ref, &ref)) { - ret = 0; - goto out; - } - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - key.offset = found_key.offset + 1; - btrfs_release_path(root, path); - } -out: + btrfs_free_path(path); return ret; } @@ -622,7 +539,7 @@ out: * File extents can be referenced by: * * - multiple snapshots, subvolumes, or different generations in one subvol - * - different files inside a single subvolume (in theory, not implemented yet) + * - different files inside a single subvolume * - different offsets inside a file (bookend extents in file.c) * * The extent ref structure has fields for: @@ -631,119 +548,284 @@ out: * - Generation number of the tree holding the reference * - objectid of the file holding the reference * - offset in the file corresponding to the key holding the reference + * - number of references holding by parent node (alway 1 for tree blocks) + * + * Btree leaf may hold multiple references to a file extent. In most cases, + * these references are from same file and the corresponding offsets inside + * the file are close together. So inode objectid and offset in file are + * just hints, they provide hints about where in the btree the references + * can be found and when we can stop searching. * * When a file extent is allocated the fields are filled in: - * (root_key.objectid, trans->transid, inode objectid, offset in file) + * (root_key.objectid, trans->transid, inode objectid, offset in file, 1) * * When a leaf is cow'd new references are added for every file extent found - * in the leaf. It looks the same as the create case, but trans->transid - * will be different when the block is cow'd. + * in the leaf. It looks similar to the create case, but trans->transid will + * be different when the block is cow'd. * - * (root_key.objectid, trans->transid, inode objectid, offset in file) + * (root_key.objectid, trans->transid, inode objectid, offset in file, + * number of references in the leaf) * - * When a file extent is removed either during snapshot deletion or file - * truncation, the corresponding back reference is found - * by searching for: + * Because inode objectid and offset in file are just hints, they are not + * used when backrefs are deleted. When a file extent is removed either + * during snapshot deletion or file truncation, we find the corresponding + * back back reference and check the following fields. * - * (btrfs_header_owner(leaf), btrfs_header_generation(leaf), - * inode objectid, offset in file) + * (btrfs_header_owner(leaf), btrfs_header_generation(leaf)) * * Btree extents can be referenced by: * * - Different subvolumes * - Different generations of the same subvolume * - * Storing sufficient information for a full reverse mapping of a btree - * block would require storing the lowest key of the block in the backref, - * and it would require updating that lowest key either before write out or - * every time it changed. Instead, the objectid of the lowest key is stored - * along with the level of the tree block. This provides a hint - * about where in the btree the block can be found. Searches through the - * btree only need to look for a pointer to that block, so they stop one - * level higher than the level recorded in the backref. - * - * Some btrees do not do reference counting on their extents. These - * include the extent tree and the tree of tree roots. Backrefs for these - * trees always have a generation of zero. - * * When a tree block is created, back references are inserted: * - * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid) + * (root->root_key.objectid, trans->transid, level, 0, 1) * - * When a tree block is cow'd in a reference counted root, - * new back references are added for all the blocks it points to. - * These are of the form (trans->transid will have increased since creation): + * When a tree block is cow'd, new back references are added for all the + * blocks it points to. If the tree block isn't in reference counted root, + * the old back references are removed. These new back references are of + * the form (trans->transid will have increased since creation): * - * (root->root_key.objectid, trans->transid, level, lowest_key_objectid) + * (root->root_key.objectid, trans->transid, level, 0, 1) * - * Because the lowest_key_objectid and the level are just hints - * they are not used when backrefs are deleted. When a backref is deleted: + * When a backref is in deleting, the following fields are checked: * * if backref was for a tree root: - * root_objectid = root->root_key.objectid + * (btrfs_header_owner(itself), btrfs_header_generation(itself)) * else - * root_objectid = btrfs_header_owner(parent) + * (btrfs_header_owner(parent), btrfs_header_generation(parent)) * - * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0) + * Back Reference Key composing: * - * Back Reference Key hashing: - * - * Back references have four fields, each 64 bits long. Unfortunately, - * This is hashed into a single 64 bit number and placed into the key offset. - * The key objectid corresponds to the first byte in the extent, and the - * key type is set to BTRFS_EXTENT_REF_KEY + * The key objectid corresponds to the first byte in the extent, the key + * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first + * byte of parent extent. If a extent is tree root, the key offset is set + * to the key objectid. */ -int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct btrfs_path *path, u64 bytenr, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset) + +static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, u64 bytenr, + u64 parent, u64 ref_root, + u64 ref_generation, int del) { - u64 hash; struct btrfs_key key; - struct btrfs_extent_ref ref; - struct btrfs_extent_ref *disk_ref; + struct btrfs_extent_ref *ref; + struct extent_buffer *leaf; int ret; - btrfs_set_stack_ref_root(&ref, root_objectid); - btrfs_set_stack_ref_generation(&ref, ref_generation); - btrfs_set_stack_ref_objectid(&ref, owner); - btrfs_set_stack_ref_offset(&ref, owner_offset); + key.objectid = bytenr; + key.type = BTRFS_EXTENT_REF_KEY; + key.offset = parent; + + ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1); + if (ret < 0) + goto out; + if (ret > 0) { + ret = -ENOENT; + goto out; + } + + leaf = path->nodes[0]; + ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); + if (btrfs_ref_root(leaf, ref) != ref_root || + btrfs_ref_generation(leaf, ref) != ref_generation) { + ret = -EIO; + WARN_ON(1); + goto out; + } + ret = 0; +out: + return ret; +} + +static int noinline insert_extent_backref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path, + u64 bytenr, u64 parent, + u64 ref_root, u64 ref_generation, + u64 owner_objectid, u64 owner_offset) +{ + struct btrfs_key key; + struct extent_buffer *leaf; + struct btrfs_extent_ref *ref; + u32 num_refs; + int ret; - hash = hash_extent_ref(root_objectid, ref_generation, owner, - owner_offset); - key.offset = hash; key.objectid = bytenr; key.type = BTRFS_EXTENT_REF_KEY; + key.offset = parent; - ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref)); - while (ret == -EEXIST) { - disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_extent_ref); - if (match_extent_ref(path->nodes[0], disk_ref, &ref)) + ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref)); + if (ret == 0) { + leaf = path->nodes[0]; + ref = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_ref); + btrfs_set_ref_root(leaf, ref, ref_root); + btrfs_set_ref_generation(leaf, ref, ref_generation); + btrfs_set_ref_objectid(leaf, ref, owner_objectid); + btrfs_set_ref_offset(leaf, ref, owner_offset); + btrfs_set_ref_num_refs(leaf, ref, 1); + } else if (ret == -EEXIST) { + u64 existing_owner; + BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID); + leaf = path->nodes[0]; + ref = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_ref); + if (btrfs_ref_root(leaf, ref) != ref_root || + btrfs_ref_generation(leaf, ref) != ref_generation) { + ret = -EIO; + WARN_ON(1); goto out; - key.offset++; - btrfs_release_path(root, path); - ret = btrfs_insert_empty_item(trans, root, path, &key, - sizeof(ref)); - } - if (ret) + } + + num_refs = btrfs_ref_num_refs(leaf, ref); + BUG_ON(num_refs == 0); + btrfs_set_ref_num_refs(leaf, ref, num_refs + 1); + + existing_owner = btrfs_ref_objectid(leaf, ref); + if (existing_owner == owner_objectid && + btrfs_ref_offset(leaf, ref) > owner_offset) { + btrfs_set_ref_offset(leaf, ref, owner_offset); + } else if (existing_owner != owner_objectid && + existing_owner != BTRFS_MULTIPLE_OBJECTIDS) { + btrfs_set_ref_objectid(leaf, ref, + BTRFS_MULTIPLE_OBJECTIDS); + btrfs_set_ref_offset(leaf, ref, 0); + } + ret = 0; + } else { goto out; - disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], - struct btrfs_extent_ref); - write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref, - sizeof(ref)); + } btrfs_mark_buffer_dirty(path->nodes[0]); out: btrfs_release_path(root, path); return ret; } +static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_path *path) +{ + struct extent_buffer *leaf; + struct btrfs_extent_ref *ref; + u32 num_refs; + int ret = 0; + + leaf = path->nodes[0]; + ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref); + num_refs = btrfs_ref_num_refs(leaf, ref); + BUG_ON(num_refs == 0); + num_refs -= 1; + if (num_refs == 0) { + ret = btrfs_del_item(trans, root, path); + } else { + btrfs_set_ref_num_refs(leaf, ref, num_refs); + btrfs_mark_buffer_dirty(leaf); + } + btrfs_release_path(root, path); + return ret; +} + +static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 orig_parent, u64 parent, + u64 orig_root, u64 ref_root, + u64 orig_generation, u64 ref_generation, + u64 owner_objectid, u64 owner_offset) +{ + int ret; + struct btrfs_root *extent_root = root->fs_info->extent_root; + struct btrfs_path *path; + + if (root == root->fs_info->extent_root) { + struct pending_extent_op *extent_op; + u64 num_bytes; + + BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL); + num_bytes = btrfs_level_size(root, (int)owner_objectid); + if (test_range_bit(&root->fs_info->extent_ins, bytenr, + bytenr + num_bytes - 1, EXTENT_LOCKED, 0)) { + u64 priv; + ret = get_state_private(&root->fs_info->extent_ins, + bytenr, &priv); + BUG_ON(ret); + extent_op = (struct pending_extent_op *) + (unsigned long)priv; + BUG_ON(extent_op->parent != orig_parent); + BUG_ON(extent_op->generation != orig_generation); + extent_op->parent = parent; + extent_op->generation = ref_generation; + } else { + extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); + BUG_ON(!extent_op); + + extent_op->type = PENDING_BACKREF_UPDATE; + extent_op->bytenr = bytenr; + extent_op->num_bytes = num_bytes; + extent_op->parent = parent; + extent_op->orig_parent = orig_parent; + extent_op->generation = ref_generation; + extent_op->orig_generation = orig_generation; + extent_op->level = (int)owner_objectid; + + set_extent_bits(&root->fs_info->extent_ins, + bytenr, bytenr + num_bytes - 1, + EXTENT_LOCKED, GFP_NOFS); + set_state_private(&root->fs_info->extent_ins, + bytenr, (unsigned long)extent_op); + } + return 0; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ret = lookup_extent_backref(trans, extent_root, path, + bytenr, orig_parent, orig_root, + orig_generation, 1); + if (ret) + goto out; + ret = remove_extent_backref(trans, extent_root, path); + if (ret) + goto out; + ret = insert_extent_backref(trans, extent_root, path, bytenr, + parent, ref_root, ref_generation, + owner_objectid, owner_offset); + BUG_ON(ret); + finish_current_insert(trans, extent_root); + del_pending_extents(trans, extent_root); +out: + btrfs_free_path(path); + return ret; +} + +int btrfs_update_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 orig_parent, u64 parent, + u64 ref_root, u64 ref_generation, + u64 owner_objectid, u64 owner_offset) +{ + int ret; + if (ref_root == BTRFS_TREE_LOG_OBJECTID && + owner_objectid < BTRFS_FIRST_FREE_OBJECTID) + return 0; + maybe_lock_mutex(root); + ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent, + parent, ref_root, ref_root, + ref_generation, ref_generation, + owner_objectid, owner_offset); + maybe_unlock_mutex(root); + return ret; +} + static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 bytenr, u64 num_bytes, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset) + struct btrfs_root *root, u64 bytenr, + u64 orig_parent, u64 parent, + u64 orig_root, u64 ref_root, + u64 orig_generation, u64 ref_generation, + u64 owner_objectid, u64 owner_offset) { struct btrfs_path *path; int ret; @@ -752,24 +834,28 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_extent_item *item; u32 refs; - WARN_ON(num_bytes < root->sectorsize); path = btrfs_alloc_path(); if (!path) return -ENOMEM; path->reada = 1; key.objectid = bytenr; - btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); - key.offset = num_bytes; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)-1; + ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path, 0, 1); if (ret < 0) return ret; - if (ret != 0) { - BUG(); - } - BUG_ON(ret != 0); + BUG_ON(ret == 0 || path->slots[0] == 0); + + path->slots[0]--; l = path->nodes[0]; + + btrfs_item_key_to_cpu(l, &key, path->slots[0]); + BUG_ON(key.objectid != bytenr); + BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); + item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item); refs = btrfs_extent_refs(l, item); btrfs_set_extent_refs(l, item, refs + 1); @@ -778,9 +864,10 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, btrfs_release_path(root->fs_info->extent_root, path); path->reada = 1; - ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root, - path, bytenr, root_objectid, - ref_generation, owner, owner_offset); + ret = insert_extent_backref(trans, root->fs_info->extent_root, + path, bytenr, parent, + ref_root, ref_generation, + owner_objectid, owner_offset); BUG_ON(ret); finish_current_insert(trans, root->fs_info->extent_root); del_pending_extents(trans, root->fs_info->extent_root); @@ -790,18 +877,20 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, } int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 bytenr, u64 num_bytes, - u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset) + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, + u64 ref_root, u64 ref_generation, + u64 owner_objectid, u64 owner_offset) { int ret; - - mutex_lock(&root->fs_info->alloc_mutex); - ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, - root_objectid, ref_generation, - owner, owner_offset); - mutex_unlock(&root->fs_info->alloc_mutex); + if (ref_root == BTRFS_TREE_LOG_OBJECTID && + owner_objectid < BTRFS_FIRST_FREE_OBJECTID) + return 0; + maybe_lock_mutex(root); + ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent, + 0, ref_root, 0, ref_generation, + owner_objectid, owner_offset); + maybe_unlock_mutex(root); return ret; } @@ -813,9 +902,9 @@ int btrfs_extent_post_op(struct btrfs_trans_handle *trans, return 0; } -static int lookup_extent_ref(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 bytenr, - u64 num_bytes, u32 *refs) +int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u32 *refs) { struct btrfs_path *path; int ret; @@ -846,7 +935,6 @@ out: return 0; } - static int get_reference_status(struct btrfs_root *root, u64 bytenr, u64 parent_gen, u64 ref_objectid, u64 *min_generation, u32 *ref_count) @@ -863,7 +951,7 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, int ret; key.objectid = bytenr; - key.offset = 0; + key.offset = (u64)-1; key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); @@ -872,7 +960,10 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, if (ret < 0) goto out; BUG_ON(ret == 0); + if (ret < 0 || path->slots[0] == 0) + goto out; + path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); @@ -909,7 +1000,7 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, struct btrfs_extent_ref); ref_generation = btrfs_ref_generation(leaf, ref_item); /* - * For (parent_gen > 0 && parent_gen > ref_gen): + * For (parent_gen > 0 && parent_gen > ref_generation): * * we reach here through the oldest root, therefore * all other reference from same snapshot should have @@ -919,8 +1010,7 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, (parent_gen > 0 && parent_gen > ref_generation) || (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID && ref_objectid != btrfs_ref_objectid(leaf, ref_item))) { - if (ref_count) - *ref_count = 2; + *ref_count = 2; break; } @@ -1020,80 +1110,29 @@ out: return ret; } -int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, - struct extent_buffer *buf, int cache_ref) +int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct extent_buffer *buf, u32 nr_extents) { - u64 bytenr; u32 nritems; struct btrfs_key key; struct btrfs_file_extent_item *fi; int i; int level; - int ret; - int faili; - int nr_file_extents = 0; + int ret = 0; if (!root->ref_cows) return 0; level = btrfs_header_level(buf); nritems = btrfs_header_nritems(buf); - for (i = 0; i < nritems; i++) { - cond_resched(); - if (level == 0) { - u64 disk_bytenr; - btrfs_item_key_to_cpu(buf, &key, i); - if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) - continue; - fi = btrfs_item_ptr(buf, i, - struct btrfs_file_extent_item); - if (btrfs_file_extent_type(buf, fi) == - BTRFS_FILE_EXTENT_INLINE) - continue; - disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); - if (disk_bytenr == 0) - continue; - - if (buf != root->commit_root) - nr_file_extents++; - - mutex_lock(&root->fs_info->alloc_mutex); - ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr, - btrfs_file_extent_disk_num_bytes(buf, fi), - root->root_key.objectid, trans->transid, - key.objectid, key.offset); - mutex_unlock(&root->fs_info->alloc_mutex); - if (ret) { - faili = i; - WARN_ON(1); - goto fail; - } - } else { - bytenr = btrfs_node_blockptr(buf, i); - btrfs_node_key_to_cpu(buf, &key, i); - mutex_lock(&root->fs_info->alloc_mutex); - ret = __btrfs_inc_extent_ref(trans, root, bytenr, - btrfs_level_size(root, level - 1), - root->root_key.objectid, - trans->transid, - level - 1, key.objectid); - mutex_unlock(&root->fs_info->alloc_mutex); - if (ret) { - faili = i; - WARN_ON(1); - goto fail; - } - } - } - /* cache orignal leaf block's references */ - if (level == 0 && cache_ref && buf != root->commit_root) { + if (level == 0) { struct btrfs_leaf_ref *ref; struct btrfs_extent_info *info; - ref = btrfs_alloc_leaf_ref(root, nr_file_extents); + ref = btrfs_alloc_leaf_ref(root, nr_extents); if (!ref) { - WARN_ON(1); + ret = -ENOMEM; goto out; } @@ -1101,10 +1140,10 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, ref->bytenr = buf->start; ref->owner = btrfs_header_owner(buf); ref->generation = btrfs_header_generation(buf); - ref->nritems = nr_file_extents; + ref->nritems = nr_extents; info = ref->extents; - for (i = 0; nr_file_extents > 0 && i < nritems; i++) { + for (i = 0; nr_extents > 0 && i < nritems; i++) { u64 disk_bytenr; btrfs_item_key_to_cpu(buf, &key, i); if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) @@ -1132,13 +1171,52 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_free_leaf_ref(root, ref); } out: - return 0; -fail: - WARN_ON(1); -#if 0 - for (i =0; i < faili; i++) { + return ret; +} + +int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, + struct extent_buffer *orig_buf, struct extent_buffer *buf, + u32 *nr_extents) +{ + u64 bytenr; + u64 ref_root; + u64 orig_root; + u64 ref_generation; + u64 orig_generation; + u32 nritems; + u32 nr_file_extents = 0; + struct btrfs_key key; + struct btrfs_file_extent_item *fi; + int i; + int level; + int ret = 0; + int faili = 0; + int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *, + u64, u64, u64, u64, u64, u64, u64, u64, u64); + + ref_root = btrfs_header_owner(buf); + ref_generation = btrfs_header_generation(buf); + orig_root = btrfs_header_owner(orig_buf); + orig_generation = btrfs_header_generation(orig_buf); + + nritems = btrfs_header_nritems(buf); + level = btrfs_header_level(buf); + + if (root->ref_cows) { + process_func = __btrfs_inc_extent_ref; + } else { + if (level == 0 && + root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + goto out; + if (level != 0 && + root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) + goto out; + process_func = __btrfs_update_extent_ref; + } + + for (i = 0; i < nritems; i++) { + cond_resched(); if (level == 0) { - u64 disk_bytenr; btrfs_item_key_to_cpu(buf, &key, i); if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) continue; @@ -1147,24 +1225,131 @@ fail: if (btrfs_file_extent_type(buf, fi) == BTRFS_FILE_EXTENT_INLINE) continue; - disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); - if (disk_bytenr == 0) + bytenr = btrfs_file_extent_disk_bytenr(buf, fi); + if (bytenr == 0) continue; - err = btrfs_free_extent(trans, root, disk_bytenr, - btrfs_file_extent_disk_num_bytes(buf, - fi), 0); - BUG_ON(err); + + nr_file_extents++; + + maybe_lock_mutex(root); + ret = process_func(trans, root, bytenr, + orig_buf->start, buf->start, + orig_root, ref_root, + orig_generation, ref_generation, + key.objectid, key.offset); + maybe_unlock_mutex(root); + + if (ret) { + faili = i; + WARN_ON(1); + goto fail; + } } else { bytenr = btrfs_node_blockptr(buf, i); - err = btrfs_free_extent(trans, root, bytenr, - btrfs_level_size(root, level - 1), 0); - BUG_ON(err); + maybe_lock_mutex(root); + ret = process_func(trans, root, bytenr, + orig_buf->start, buf->start, + orig_root, ref_root, + orig_generation, ref_generation, + level - 1, 0); + maybe_unlock_mutex(root); + if (ret) { + faili = i; + WARN_ON(1); + goto fail; + } } } -#endif +out: + if (nr_extents) { + if (level == 0) + *nr_extents = nr_file_extents; + else + *nr_extents = nritems; + } + return 0; +fail: + WARN_ON(1); return ret; } +int btrfs_update_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct extent_buffer *orig_buf, + struct extent_buffer *buf, int start_slot, int nr) + +{ + u64 bytenr; + u64 ref_root; + u64 orig_root; + u64 ref_generation; + u64 orig_generation; + struct btrfs_key key; + struct btrfs_file_extent_item *fi; + int i; + int ret; + int slot; + int level; + + BUG_ON(start_slot < 0); + BUG_ON(start_slot + nr > btrfs_header_nritems(buf)); + + ref_root = btrfs_header_owner(buf); + ref_generation = btrfs_header_generation(buf); + orig_root = btrfs_header_owner(orig_buf); + orig_generation = btrfs_header_generation(orig_buf); + level = btrfs_header_level(buf); + + if (!root->ref_cows) { + if (level == 0 && + root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + return 0; + if (level != 0 && + root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) + return 0; + } + + for (i = 0, slot = start_slot; i < nr; i++, slot++) { + cond_resched(); + if (level == 0) { + btrfs_item_key_to_cpu(buf, &key, slot); + if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) + continue; + fi = btrfs_item_ptr(buf, slot, + struct btrfs_file_extent_item); + if (btrfs_file_extent_type(buf, fi) == + BTRFS_FILE_EXTENT_INLINE) + continue; + bytenr = btrfs_file_extent_disk_bytenr(buf, fi); + if (bytenr == 0) + continue; + maybe_lock_mutex(root); + ret = __btrfs_update_extent_ref(trans, root, bytenr, + orig_buf->start, buf->start, + orig_root, ref_root, + orig_generation, ref_generation, + key.objectid, key.offset); + maybe_unlock_mutex(root); + if (ret) + goto fail; + } else { + bytenr = btrfs_node_blockptr(buf, slot); + maybe_lock_mutex(root); + ret = __btrfs_update_extent_ref(trans, root, bytenr, + orig_buf->start, buf->start, + orig_root, ref_root, + orig_generation, ref_generation, + level - 1, 0); + maybe_unlock_mutex(root); + if (ret) + goto fail; + } + } + return 0; +fail: + WARN_ON(1); + return -1; +} + static int write_one_cache_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -1539,19 +1724,18 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, { u64 start; u64 end; + u64 priv; struct btrfs_fs_info *info = extent_root->fs_info; - struct extent_buffer *eb; struct btrfs_path *path; - struct btrfs_key ins; - struct btrfs_disk_key first; + struct btrfs_extent_ref *ref; + struct pending_extent_op *extent_op; + struct btrfs_key key; struct btrfs_extent_item extent_item; int ret; - int level; int err = 0; WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex)); btrfs_set_stack_extent_refs(&extent_item, 1); - btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY); path = btrfs_alloc_path(); while(1) { @@ -1560,37 +1744,54 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, if (ret) break; - ins.objectid = start; - ins.offset = end + 1 - start; - err = btrfs_insert_item(trans, extent_root, &ins, + ret = get_state_private(&info->extent_ins, start, &priv); + BUG_ON(ret); + extent_op = (struct pending_extent_op *)(unsigned long)priv; + + if (extent_op->type == PENDING_EXTENT_INSERT) { + key.objectid = start; + key.offset = end + 1 - start; + key.type = BTRFS_EXTENT_ITEM_KEY; + err = btrfs_insert_item(trans, extent_root, &key, &extent_item, sizeof(extent_item)); - clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED, - GFP_NOFS); + BUG_ON(err); - eb = btrfs_find_create_tree_block(extent_root, ins.objectid, - ins.offset); + clear_extent_bits(&info->extent_ins, start, end, + EXTENT_LOCKED, GFP_NOFS); - if (!btrfs_buffer_uptodate(eb, trans->transid)) - btrfs_read_buffer(eb, trans->transid); + err = insert_extent_backref(trans, extent_root, path, + start, extent_op->parent, + extent_root->root_key.objectid, + extent_op->generation, + extent_op->level, 0); + BUG_ON(err); + } else if (extent_op->type == PENDING_BACKREF_UPDATE) { + err = lookup_extent_backref(trans, extent_root, path, + start, extent_op->orig_parent, + extent_root->root_key.objectid, + extent_op->orig_generation, 0); + BUG_ON(err); - btrfs_tree_lock(eb); - level = btrfs_header_level(eb); - if (level == 0) { - btrfs_item_key(eb, &first, 0); + clear_extent_bits(&info->extent_ins, start, end, + EXTENT_LOCKED, GFP_NOFS); + + key.objectid = start; + key.offset = extent_op->parent; + key.type = BTRFS_EXTENT_REF_KEY; + err = btrfs_set_item_key_safe(trans, extent_root, path, + &key); + BUG_ON(err); + ref = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_extent_ref); + btrfs_set_ref_generation(path->nodes[0], ref, + extent_op->generation); + btrfs_mark_buffer_dirty(path->nodes[0]); + btrfs_release_path(extent_root, path); } else { - btrfs_node_key(eb, &first, 0); + BUG_ON(1); } - btrfs_tree_unlock(eb); - free_extent_buffer(eb); - /* - * the first key is just a hint, so the race we've created - * against reading it is fine - */ - err = btrfs_insert_extent_backref(trans, extent_root, path, - start, extent_root->root_key.objectid, - 0, level, - btrfs_disk_key_objectid(&first)); - BUG_ON(err); + kfree(extent_op); + if (need_resched()) { mutex_unlock(&extent_root->fs_info->alloc_mutex); cond_resched(); @@ -1601,52 +1802,44 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, return 0; } -static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes, - int is_data, int pending) +static int pin_down_bytes(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, int is_data) { int err = 0; + struct extent_buffer *buf; WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); - if (!pending) { - struct extent_buffer *buf; - - if (is_data) - goto pinit; - - buf = btrfs_find_tree_block(root, bytenr, num_bytes); - if (buf) { - /* we can reuse a block if it hasn't been written - * and it is from this transaction. We can't - * reuse anything from the tree log root because - * it has tiny sub-transactions. - */ - if (btrfs_buffer_uptodate(buf, 0) && - btrfs_try_tree_lock(buf)) { - u64 transid = - root->fs_info->running_transaction->transid; - u64 header_transid = - btrfs_header_generation(buf); - if (btrfs_header_owner(buf) != - BTRFS_TREE_LOG_OBJECTID && - header_transid == transid && - !btrfs_header_flag(buf, - BTRFS_HEADER_FLAG_WRITTEN)) { - clean_tree_block(NULL, root, buf); - btrfs_tree_unlock(buf); - free_extent_buffer(buf); - return 1; - } - btrfs_tree_unlock(buf); - } + if (is_data) + goto pinit; + + buf = btrfs_find_tree_block(root, bytenr, num_bytes); + if (!buf) + goto pinit; + + /* we can reuse a block if it hasn't been written + * and it is from this transaction. We can't + * reuse anything from the tree log root because + * it has tiny sub-transactions. + */ + if (btrfs_buffer_uptodate(buf, 0) && + btrfs_try_tree_lock(buf)) { + u64 header_owner = btrfs_header_owner(buf); + u64 header_transid = btrfs_header_generation(buf); + if (header_owner != BTRFS_TREE_LOG_OBJECTID && + header_transid == trans->transid && + !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { + clean_tree_block(NULL, root, buf); + btrfs_tree_unlock(buf); free_extent_buffer(buf); + return 1; } -pinit: - btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); - } else { - set_extent_bits(&root->fs_info->pending_del, - bytenr, bytenr + num_bytes - 1, - EXTENT_LOCKED, GFP_NOFS); + btrfs_tree_unlock(buf); } + free_extent_buffer(buf); +pinit: + btrfs_update_pinned_extents(root, bytenr, num_bytes, 1); + BUG_ON(err < 0); return 0; } @@ -1654,11 +1847,12 @@ pinit: /* * remove an extent from the root, returns 0 on success */ -static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root - *root, u64 bytenr, u64 num_bytes, +static int __free_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 ref_generation, - u64 owner_objectid, u64 owner_offset, int pin, - int mark_free) + u64 owner_objectid, u64 owner_offset, + int pin, int mark_free) { struct btrfs_path *path; struct btrfs_key key; @@ -1681,10 +1875,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root return -ENOMEM; path->reada = 1; - ret = lookup_extent_backref(trans, extent_root, path, - bytenr, root_objectid, - ref_generation, - owner_objectid, owner_offset, 1); + ret = lookup_extent_backref(trans, extent_root, path, bytenr, parent, + root_objectid, ref_generation, 1); if (ret == 0) { struct btrfs_key found_key; extent_slot = path->slots[0]; @@ -1702,8 +1894,15 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root if (path->slots[0] - extent_slot > 5) break; } - if (!found_extent) - ret = btrfs_del_item(trans, extent_root, path); + if (!found_extent) { + ret = remove_extent_backref(trans, extent_root, path); + BUG_ON(ret); + btrfs_release_path(extent_root, path); + ret = btrfs_search_slot(trans, extent_root, + &key, path, -1, 1); + BUG_ON(ret); + extent_slot = path->slots[0]; + } } else { btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); @@ -1712,14 +1911,6 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root root_objectid, ref_generation, owner_objectid, owner_offset); } - if (!found_extent) { - btrfs_release_path(extent_root, path); - ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); - if (ret < 0) - return ret; - BUG_ON(ret); - extent_slot = path->slots[0]; - } leaf = path->nodes[0]; ei = btrfs_item_ptr(leaf, extent_slot, @@ -1732,6 +1923,10 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_mark_buffer_dirty(leaf); if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) { + struct btrfs_extent_ref *ref; + ref = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_extent_ref); + BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1); /* if the back ref and the extent are next to each other * they get deleted below in one shot */ @@ -1739,15 +1934,13 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root num_to_del = 2; } else if (found_extent) { /* otherwise delete the extent back ref */ - ret = btrfs_del_item(trans, extent_root, path); + ret = remove_extent_backref(trans, extent_root, path); BUG_ON(ret); /* if refs are 0, we need to setup the path for deletion */ if (refs == 0) { btrfs_release_path(extent_root, path); ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1); - if (ret < 0) - return ret; BUG_ON(ret); } } @@ -1761,8 +1954,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root #endif if (pin) { - ret = pin_down_bytes(root, bytenr, num_bytes, - owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0); + ret = pin_down_bytes(trans, root, bytenr, num_bytes, + owner_objectid >= BTRFS_FIRST_FREE_OBJECTID); if (ret > 0) mark_free = 1; BUG_ON(ret < 0); @@ -1781,9 +1974,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root root_used - num_bytes); ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); - if (ret) { - return ret; - } + BUG_ON(ret); ret = update_block_group(trans, root, bytenr, num_bytes, 0, mark_free); BUG_ON(ret); @@ -1822,33 +2013,61 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct { int ret; int err = 0; + int mark_free = 0; u64 start; u64 end; + u64 priv; struct extent_io_tree *pending_del; - struct extent_io_tree *pinned_extents; + struct extent_io_tree *extent_ins; + struct pending_extent_op *extent_op; WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex)); + extent_ins = &extent_root->fs_info->extent_ins; pending_del = &extent_root->fs_info->pending_del; - pinned_extents = &extent_root->fs_info->pinned_extents; while(1) { ret = find_first_extent_bit(pending_del, 0, &start, &end, EXTENT_LOCKED); if (ret) break; + + ret = get_state_private(pending_del, start, &priv); + BUG_ON(ret); + extent_op = (struct pending_extent_op *)(unsigned long)priv; + clear_extent_bits(pending_del, start, end, EXTENT_LOCKED, GFP_NOFS); - if (!test_range_bit(&extent_root->fs_info->extent_ins, - start, end, EXTENT_LOCKED, 0)) { - btrfs_update_pinned_extents(extent_root, start, - end + 1 - start, 1); + + ret = pin_down_bytes(trans, extent_root, start, + end + 1 - start, 0); + mark_free = ret > 0; + if (!test_range_bit(extent_ins, start, end, + EXTENT_LOCKED, 0)) { +free_extent: ret = __free_extent(trans, extent_root, - start, end + 1 - start, - extent_root->root_key.objectid, - 0, 0, 0, 0, 0); + start, end + 1 - start, + extent_op->orig_parent, + extent_root->root_key.objectid, + extent_op->orig_generation, + extent_op->level, 0, 0, mark_free); + kfree(extent_op); } else { - clear_extent_bits(&extent_root->fs_info->extent_ins, - start, end, EXTENT_LOCKED, GFP_NOFS); + kfree(extent_op); + ret = get_state_private(extent_ins, start, &priv); + BUG_ON(ret); + extent_op = (struct pending_extent_op *) + (unsigned long)priv; + + clear_extent_bits(extent_ins, start, end, + EXTENT_LOCKED, GFP_NOFS); + + if (extent_op->type == PENDING_BACKREF_UPDATE) + goto free_extent; + + ret = update_block_group(trans, extent_root, start, + end + 1 - start, 0, mark_free); + BUG_ON(ret); + kfree(extent_op); } if (ret) err = ret; @@ -1866,21 +2085,36 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct * remove an extent from the root, returns 0 on success */ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 bytenr, - u64 num_bytes, u64 root_objectid, - u64 ref_generation, u64 owner_objectid, - u64 owner_offset, int pin) + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, + u64 root_objectid, u64 ref_generation, + u64 owner_objectid, u64 owner_offset, int pin) { struct btrfs_root *extent_root = root->fs_info->extent_root; int pending_ret; int ret; WARN_ON(num_bytes < root->sectorsize); - if (!root->ref_cows) - ref_generation = 0; - if (root == extent_root) { - pin_down_bytes(root, bytenr, num_bytes, 0, 1); + struct pending_extent_op *extent_op; + + extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); + BUG_ON(!extent_op); + + extent_op->type = PENDING_EXTENT_DELETE; + extent_op->bytenr = bytenr; + extent_op->num_bytes = num_bytes; + extent_op->parent = parent; + extent_op->orig_parent = parent; + extent_op->generation = ref_generation; + extent_op->orig_generation = ref_generation; + extent_op->level = (int)owner_objectid; + + set_extent_bits(&root->fs_info->pending_del, + bytenr, bytenr + num_bytes - 1, + EXTENT_LOCKED, GFP_NOFS); + set_state_private(&root->fs_info->pending_del, + bytenr, (unsigned long)extent_op); return 0; } /* if metadata always pin */ @@ -1901,9 +2135,9 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, if (ref_generation != trans->transid) pin = 1; - ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid, - ref_generation, owner_objectid, owner_offset, - pin, pin == 0); + ret = __free_extent(trans, root, bytenr, num_bytes, parent, + root_objectid, ref_generation, owner_objectid, + owner_offset, pin, pin == 0); finish_current_insert(trans, root->fs_info->extent_root); pending_ret = del_pending_extents(trans, root->fs_info->extent_root); @@ -1911,15 +2145,15 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } int btrfs_free_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 bytenr, - u64 num_bytes, u64 root_objectid, - u64 ref_generation, u64 owner_objectid, - u64 owner_offset, int pin) + struct btrfs_root *root, + u64 bytenr, u64 num_bytes, u64 parent, + u64 root_objectid, u64 ref_generation, + u64 owner_objectid, u64 owner_offset, int pin) { int ret; maybe_lock_mutex(root); - ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, + ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent, root_objectid, ref_generation, owner_objectid, owner_offset, pin); maybe_unlock_mutex(root); @@ -2271,7 +2505,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, } static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, struct btrfs_key *ins) @@ -2289,6 +2523,9 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path; struct btrfs_key keys[2]; + if (parent == 0) + parent = ins->objectid; + /* block accounting for super block */ spin_lock_irq(&info->delalloc_lock); super_used = btrfs_super_bytes_used(&info->super_copy); @@ -2300,17 +2537,32 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, btrfs_set_root_used(&root->root_item, root_used + num_bytes); if (root == extent_root) { + struct pending_extent_op *extent_op; + + extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS); + BUG_ON(!extent_op); + + extent_op->type = PENDING_EXTENT_INSERT; + extent_op->bytenr = ins->objectid; + extent_op->num_bytes = ins->offset; + extent_op->parent = parent; + extent_op->orig_parent = 0; + extent_op->generation = ref_generation; + extent_op->orig_generation = 0; + extent_op->level = (int)owner; + set_extent_bits(&root->fs_info->extent_ins, ins->objectid, ins->objectid + ins->offset - 1, EXTENT_LOCKED, GFP_NOFS); + set_state_private(&root->fs_info->extent_ins, + ins->objectid, (unsigned long)extent_op); goto update_block; } memcpy(&keys[0], ins, sizeof(*ins)); - keys[1].offset = hash_extent_ref(root_objectid, ref_generation, - owner, owner_offset); keys[1].objectid = ins->objectid; keys[1].type = BTRFS_EXTENT_REF_KEY; + keys[1].offset = parent; sizes[0] = sizeof(*extent_item); sizes[1] = sizeof(*ref); @@ -2331,6 +2583,7 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, btrfs_set_ref_generation(path->nodes[0], ref, ref_generation); btrfs_set_ref_objectid(path->nodes[0], ref, owner); btrfs_set_ref_offset(path->nodes[0], ref, owner_offset); + btrfs_set_ref_num_refs(path->nodes[0], ref, 1); btrfs_mark_buffer_dirty(path->nodes[0]); @@ -2359,7 +2612,7 @@ out: } int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, struct btrfs_key *ins) @@ -2369,9 +2622,9 @@ int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, if (root_objectid == BTRFS_TREE_LOG_OBJECTID) return 0; maybe_lock_mutex(root); - ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, - ref_generation, owner, - owner_offset, ins); + ret = __btrfs_alloc_reserved_extent(trans, root, parent, + root_objectid, ref_generation, + owner, owner_offset, ins); maybe_unlock_mutex(root); return ret; } @@ -2382,7 +2635,7 @@ int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, * space cache bits as well */ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, + struct btrfs_root *root, u64 parent, u64 root_objectid, u64 ref_generation, u64 owner, u64 owner_offset, struct btrfs_key *ins) @@ -2396,10 +2649,9 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); BUG_ON(ret); - - ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, - ref_generation, owner, - owner_offset, ins); + ret = __btrfs_alloc_reserved_extent(trans, root, parent, + root_objectid, ref_generation, + owner, owner_offset, ins); maybe_unlock_mutex(root); return ret; } @@ -2413,9 +2665,9 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, */ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 num_bytes, u64 min_alloc_size, + u64 num_bytes, u64 parent, u64 min_alloc_size, u64 root_objectid, u64 ref_generation, - u64 owner, u64 owner_offset, + u64 owner_objectid, u64 owner_offset, u64 empty_size, u64 hint_byte, u64 search_end, struct btrfs_key *ins, u64 data) { @@ -2428,9 +2680,9 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, search_end, ins, data); BUG_ON(ret); if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { - ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid, - ref_generation, owner, - owner_offset, ins); + ret = __btrfs_alloc_reserved_extent(trans, root, parent, + root_objectid, ref_generation, + owner_objectid, owner_offset, ins); BUG_ON(ret); } @@ -2468,10 +2720,9 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, */ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u32 blocksize, + u32 blocksize, u64 parent, u64 root_objectid, u64 ref_generation, - u64 first_objectid, int level, u64 hint, u64 empty_size) @@ -2480,10 +2731,9 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, int ret; struct extent_buffer *buf; - ret = btrfs_alloc_extent(trans, root, blocksize, blocksize, - root_objectid, ref_generation, - level, first_objectid, empty_size, hint, - (u64)-1, &ins, 0); + ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize, + root_objectid, ref_generation, level, 0, + empty_size, hint, (u64)-1, &ins, 0); if (ret) { BUG_ON(ret > 0); return ERR_PTR(ret); @@ -2531,15 +2781,14 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(leaf, fi), - leaf_owner, leaf_generation, + leaf->start, leaf_owner, leaf_generation, key.objectid, key.offset, 0); mutex_unlock(&root->fs_info->alloc_mutex); + BUG_ON(ret); atomic_inc(&root->fs_info->throttle_gen); wake_up(&root->fs_info->transaction_throttle); cond_resched(); - - BUG_ON(ret); } return 0; } @@ -2554,10 +2803,10 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, for (i = 0; i < ref->nritems; i++) { mutex_lock(&root->fs_info->alloc_mutex); - ret = __btrfs_free_extent(trans, root, - info->bytenr, info->num_bytes, - ref->owner, ref->generation, - info->objectid, info->offset, 0); + ret = __btrfs_free_extent(trans, root, info->bytenr, + info->num_bytes, ref->bytenr, + ref->owner, ref->generation, + info->objectid, info->offset, 0); mutex_unlock(&root->fs_info->alloc_mutex); atomic_inc(&root->fs_info->throttle_gen); @@ -2576,7 +2825,7 @@ int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, { int ret; - ret = lookup_extent_ref(NULL, root, start, len, refs); + ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs); BUG_ON(ret); #if 0 // some debugging code in case we see problems here @@ -2672,8 +2921,8 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, bytenr, - blocksize, root_owner, - root_gen, 0, 0, 1); + blocksize, parent->start, + root_owner, root_gen, 0, 0, 1); BUG_ON(ret); mutex_unlock(&root->fs_info->alloc_mutex); @@ -2690,8 +2939,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, * So, we don't need to check it again */ if (*level == 1) { - struct btrfs_key key; - btrfs_node_key_to_cpu(cur, &key, path->slots[*level]); ref = btrfs_lookup_leaf_ref(root, bytenr); if (ref) { ret = cache_drop_leaf_ref(trans, root, ref); @@ -2750,12 +2997,13 @@ out: mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, bytenr, blocksize, - root_owner, root_gen, 0, 0, 1); + parent->start, root_owner, root_gen, + 0, 0, 1); + mutex_unlock(&root->fs_info->alloc_mutex); free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; *level += 1; BUG_ON(ret); - mutex_unlock(&root->fs_info->alloc_mutex); cond_resched(); return 0; @@ -2792,19 +3040,18 @@ static int noinline walk_up_tree(struct btrfs_trans_handle *trans, root_item->drop_level = i; return 0; } else { - if (path->nodes[*level] == root->node) { - root_owner = root->root_key.objectid; - root_gen = - btrfs_header_generation(path->nodes[*level]); - } else { - struct extent_buffer *node; - node = path->nodes[*level + 1]; - root_owner = btrfs_header_owner(node); - root_gen = btrfs_header_generation(node); - } + struct extent_buffer *parent; + if (path->nodes[*level] == root->node) + parent = path->nodes[*level]; + else + parent = path->nodes[*level + 1]; + + root_owner = btrfs_header_owner(parent); + root_gen = btrfs_header_generation(parent); ret = btrfs_free_extent(trans, root, path->nodes[*level]->start, path->nodes[*level]->len, + parent->start, root_owner, root_gen, 0, 0, 1); BUG_ON(ret); free_extent_buffer(path->nodes[*level]); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8624f3e8803..58ad25838a4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2201,9 +2201,10 @@ retry: } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; - +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) if (wbc->range_cont) wbc->range_start = index << PAGE_CACHE_SHIFT; +#endif return ret; } EXPORT_SYMBOL(extent_write_cache_pages); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 58b329ddb42..48a702d41c8 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -524,6 +524,9 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, { u64 extent_end = 0; u64 search_start = start; + u64 leaf_start; + u64 root_gen; + u64 root_owner; struct extent_buffer *leaf; struct btrfs_file_extent_item *extent; struct btrfs_path *path; @@ -562,6 +565,9 @@ next_slot: bookend = 0; found_extent = 0; found_inline = 0; + leaf_start = 0; + root_gen = 0; + root_owner = 0; extent = NULL; leaf = path->nodes[0]; slot = path->slots[0]; @@ -628,27 +634,18 @@ next_slot: search_start = extent_end; if (end <= extent_end && start >= key.offset && found_inline) { *hint_byte = EXTENT_MAP_INLINE; - continue; + goto out; + } + + if (found_extent) { + read_extent_buffer(leaf, &old, (unsigned long)extent, + sizeof(old)); + root_gen = btrfs_header_generation(leaf); + root_owner = btrfs_header_owner(leaf); + leaf_start = leaf->start; } + if (end < extent_end && end >= key.offset) { - if (found_extent) { - u64 disk_bytenr = - btrfs_file_extent_disk_bytenr(leaf, extent); - u64 disk_num_bytes = - btrfs_file_extent_disk_num_bytes(leaf, - extent); - read_extent_buffer(leaf, &old, - (unsigned long)extent, - sizeof(old)); - if (disk_bytenr != 0) { - ret = btrfs_inc_extent_ref(trans, root, - disk_bytenr, disk_num_bytes, - root->root_key.objectid, - trans->transid, - key.objectid, end); - BUG_ON(ret); - } - } bookend = 1; if (found_inline && start <= key.offset) keep = 1; @@ -687,49 +684,12 @@ next_slot: } /* delete the entire extent */ if (!keep) { - u64 disk_bytenr = 0; - u64 disk_num_bytes = 0; - u64 extent_num_bytes = 0; - u64 root_gen; - u64 root_owner; - - root_gen = btrfs_header_generation(leaf); - root_owner = btrfs_header_owner(leaf); - if (found_extent) { - disk_bytenr = - btrfs_file_extent_disk_bytenr(leaf, - extent); - disk_num_bytes = - btrfs_file_extent_disk_num_bytes(leaf, - extent); - extent_num_bytes = - btrfs_file_extent_num_bytes(leaf, extent); - *hint_byte = - btrfs_file_extent_disk_bytenr(leaf, - extent); - } ret = btrfs_del_item(trans, root, path); /* TODO update progress marker and return */ BUG_ON(ret); - btrfs_release_path(root, path); extent = NULL; - if (found_extent && disk_bytenr != 0) { - dec_i_blocks(inode, extent_num_bytes); - ret = btrfs_free_extent(trans, root, - disk_bytenr, - disk_num_bytes, - root_owner, - root_gen, inode->i_ino, - key.offset, 0); - } - - BUG_ON(ret); - if (!bookend && search_start >= end) { - ret = 0; - goto out; - } - if (!bookend) - continue; + btrfs_release_path(root, path); + /* the extent will be freed later */ } if (bookend && found_inline && start <= key.offset) { u32 new_size; @@ -737,10 +697,13 @@ next_slot: extent_end - end); dec_i_blocks(inode, (extent_end - key.offset) - (extent_end - end)); - btrfs_truncate_item(trans, root, path, new_size, 0); + ret = btrfs_truncate_item(trans, root, path, + new_size, 0); + BUG_ON(ret); } /* create bookend, splitting the extent in two */ if (bookend && found_extent) { + u64 disk_bytenr; struct btrfs_key ins; ins.objectid = inode->i_ino; ins.offset = end; @@ -748,13 +711,9 @@ next_slot: btrfs_release_path(root, path); ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*extent)); + BUG_ON(ret); leaf = path->nodes[0]; - if (ret) { - btrfs_print_leaf(root, leaf); - printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep); - } - BUG_ON(ret); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); write_extent_buffer(leaf, &old, @@ -770,11 +729,43 @@ next_slot: BTRFS_FILE_EXTENT_REG); btrfs_mark_buffer_dirty(path->nodes[0]); - if (le64_to_cpu(old.disk_bytenr) != 0) { + + disk_bytenr = le64_to_cpu(old.disk_bytenr); + if (disk_bytenr != 0) { + ret = btrfs_inc_extent_ref(trans, root, + disk_bytenr, + le64_to_cpu(old.disk_num_bytes), + leaf->start, + root->root_key.objectid, + trans->transid, + ins.objectid, ins.offset); + BUG_ON(ret); + } + btrfs_release_path(root, path); + if (disk_bytenr != 0) { inode->i_blocks += btrfs_file_extent_num_bytes(leaf, extent) >> 9; } + } + + if (found_extent && !keep) { + u64 disk_bytenr = le64_to_cpu(old.disk_bytenr); + + if (disk_bytenr != 0) { + dec_i_blocks(inode, le64_to_cpu(old.num_bytes)); + ret = btrfs_free_extent(trans, root, + disk_bytenr, + le64_to_cpu(old.disk_num_bytes), + leaf_start, root_owner, + root_gen, key.objectid, + key.offset, 0); + BUG_ON(ret); + *hint_byte = disk_bytenr; + } + } + + if (search_start >= end) { ret = 0; goto out; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 65b4f864b0d..2e7d82ec5d1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -528,6 +528,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct btrfs_file_extent_item *extent_item; + struct btrfs_path *path = NULL; + struct extent_buffer *leaf; u64 alloc_hint = 0; struct list_head list; struct btrfs_key ins; @@ -544,20 +547,15 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) goto nocow; + path = btrfs_alloc_path(); + BUG_ON(!path); + lock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); INIT_LIST_HEAD(&list); - ins.objectid = ordered_extent->start; - ins.offset = ordered_extent->len; - ins.type = BTRFS_EXTENT_ITEM_KEY; - - ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid, - trans->transid, inode->i_ino, - ordered_extent->file_offset, &ins); - BUG_ON(ret); mutex_lock(&BTRFS_I(inode)->extent_mutex); ret = btrfs_drop_extents(trans, root, inode, @@ -566,18 +564,42 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->len, ordered_extent->file_offset, &alloc_hint); BUG_ON(ret); - ret = btrfs_insert_file_extent(trans, root, inode->i_ino, - ordered_extent->file_offset, - ordered_extent->start, - ordered_extent->len, - ordered_extent->len, 0); + + ins.objectid = inode->i_ino; + ins.offset = ordered_extent->file_offset; + ins.type = BTRFS_EXTENT_DATA_KEY; + ret = btrfs_insert_empty_item(trans, root, path, &ins, + sizeof(*extent_item)); BUG_ON(ret); + leaf = path->nodes[0]; + extent_item = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, extent_item, trans->transid); + btrfs_set_file_extent_type(leaf, extent_item, BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_disk_bytenr(leaf, extent_item, + ordered_extent->start); + btrfs_set_file_extent_disk_num_bytes(leaf, extent_item, + ordered_extent->len); + btrfs_set_file_extent_offset(leaf, extent_item, 0); + btrfs_set_file_extent_num_bytes(leaf, extent_item, + ordered_extent->len); + btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1); mutex_unlock(&BTRFS_I(inode)->extent_mutex); + ins.objectid = ordered_extent->start; + ins.offset = ordered_extent->len; + ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, + root->root_key.objectid, + trans->transid, inode->i_ino, + ordered_extent->file_offset, &ins); + BUG_ON(ret); + btrfs_release_path(root, path); + inode->i_blocks += ordered_extent->len >> 9; unlock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, @@ -596,6 +618,8 @@ nocow: btrfs_put_ordered_extent(ordered_extent); btrfs_end_transaction(trans, root); + if (path) + btrfs_free_path(path); return 0; } @@ -1433,10 +1457,7 @@ search_again: if (root->ref_cows) dec_i_blocks(inode, num_dec); } - if (root->ref_cows) { - root_gen = - btrfs_header_generation(leaf); - } + root_gen = btrfs_header_generation(leaf); root_owner = btrfs_header_owner(leaf); } } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { @@ -1477,7 +1498,7 @@ delete: if (found_extent) { ret = btrfs_free_extent(trans, root, extent_start, extent_num_bytes, - root_owner, + leaf->start, root_owner, root_gen, inode->i_ino, found_key.offset, 0); BUG_ON(ret); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f84b5f6991c..4c6e0c15754 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -76,9 +76,8 @@ static noinline int create_subvol(struct btrfs_root *root, char *name, if (ret) goto fail; - leaf = btrfs_alloc_free_block(trans, root, root->leafsize, - objectid, trans->transid, 0, 0, - 0, 0); + leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, + objectid, trans->transid, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); goto fail; @@ -525,13 +524,10 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) struct file *src_file; struct inode *src; struct btrfs_trans_handle *trans; - struct btrfs_ordered_extent *ordered; struct btrfs_path *path; struct extent_buffer *leaf; char *buf; struct btrfs_key key; - struct btrfs_key new_key; - u32 size; u32 nritems; int slot; int ret; @@ -576,6 +572,7 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) /* do any pending delalloc/csum calc on src, one way or another, and lock file content */ while (1) { + struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS); ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); if (BTRFS_I(src)->delalloc_bytes == 0 && !ordered) @@ -619,6 +616,32 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) key.objectid != src->i_ino) break; + if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY || + btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) { + u32 size; + struct btrfs_key new_key; + + size = btrfs_item_size_nr(leaf, slot); + read_extent_buffer(leaf, buf, + btrfs_item_ptr_offset(leaf, slot), + size); + btrfs_release_path(root, path); + + memcpy(&new_key, &key, sizeof(new_key)); + new_key.objectid = inode->i_ino; + ret = btrfs_insert_empty_item(trans, root, path, + &new_key, size); + if (ret) + goto out; + + leaf = path->nodes[0]; + slot = path->slots[0]; + write_extent_buffer(leaf, buf, + btrfs_item_ptr_offset(leaf, slot), + size); + btrfs_mark_buffer_dirty(leaf); + } + if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { struct btrfs_file_extent_item *extent; int found_type; @@ -634,31 +657,15 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) /* ds == 0 means there's a hole */ if (ds != 0) { ret = btrfs_inc_extent_ref(trans, root, - ds, dl, + ds, dl, leaf->start, root->root_key.objectid, trans->transid, inode->i_ino, key.offset); - if (ret) - goto out; + BUG_ON(ret); } } } - - if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY || - btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) { - size = btrfs_item_size_nr(leaf, slot); - read_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - btrfs_release_path(root, path); - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; - ret = btrfs_insert_item(trans, root, &new_key, - buf, size); - BUG_ON(ret); - } else { - btrfs_release_path(root, path); - } + btrfs_release_path(root, path); key.offset++; } ret = 0; diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index f1374d597a1..3577badfa5b 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -102,11 +102,12 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) case BTRFS_EXTENT_REF_KEY: ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref); printk("\t\textent back ref root %llu gen %llu " - "owner %llu offset %llu\n", + "owner %llu offset %llu num_refs %lu\n", (unsigned long long)btrfs_ref_root(l, ref), (unsigned long long)btrfs_ref_generation(l, ref), (unsigned long long)btrfs_ref_objectid(l, ref), - (unsigned long long)btrfs_ref_offset(l, ref)); + (unsigned long long)btrfs_ref_offset(l, ref), + (unsigned long)btrfs_ref_num_refs(l, ref)); break; case BTRFS_EXTENT_DATA_KEY: diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 56de3fb2d8d..88bbfd959f1 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -89,9 +89,9 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans, int ret; u64 objectid = root->root_key.objectid; - leaf = btrfs_alloc_free_block(trans, root, root->leafsize, + leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, BTRFS_TREE_LOG_OBJECTID, - 0, 0, 0, 0, 0); + trans->transid, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); return ret; @@ -433,6 +433,49 @@ insert: trans->transid); } } + + if (overwrite_root && + key->type == BTRFS_EXTENT_DATA_KEY) { + int extent_type; + struct btrfs_file_extent_item *fi; + + fi = (struct btrfs_file_extent_item *)dst_ptr; + extent_type = btrfs_file_extent_type(path->nodes[0], fi); + if (extent_type == BTRFS_FILE_EXTENT_REG) { + struct btrfs_key ins; + ins.objectid = btrfs_file_extent_disk_bytenr( + path->nodes[0], fi); + ins.offset = btrfs_file_extent_disk_num_bytes( + path->nodes[0], fi); + ins.type = BTRFS_EXTENT_ITEM_KEY; + + /* + * is this extent already allocated in the extent + * allocation tree? If so, just add a reference + */ + ret = btrfs_lookup_extent(root, ins.objectid, + ins.offset); + if (ret == 0) { + ret = btrfs_inc_extent_ref(trans, root, + ins.objectid, ins.offset, + path->nodes[0]->start, + root->root_key.objectid, + trans->transid, + key->objectid, key->offset); + } else { + /* + * insert the extent pointer in the extent + * allocation tree + */ + ret = btrfs_alloc_logged_extent(trans, root, + path->nodes[0]->start, + root->root_key.objectid, + trans->transid, key->objectid, + key->offset, &ins); + BUG_ON(ret); + } + } + } no_copy: btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_release_path(root, path); @@ -551,45 +594,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, start, extent_end, start, &alloc_hint); BUG_ON(ret); + /* insert the extent */ + ret = overwrite_item(trans, root, path, eb, slot, key); BUG_ON(ret); - if (found_type == BTRFS_FILE_EXTENT_REG) { - struct btrfs_key ins; - - ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); - ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); - ins.type = BTRFS_EXTENT_ITEM_KEY; - - /* insert the extent pointer in the file */ - ret = overwrite_item(trans, root, path, eb, slot, key); - BUG_ON(ret); - /* - * is this extent already allocated in the extent - * allocation tree? If so, just add a reference - */ - ret = btrfs_lookup_extent(root, path, ins.objectid, ins.offset); - btrfs_release_path(root, path); - if (ret == 0) { - ret = btrfs_inc_extent_ref(trans, root, - ins.objectid, ins.offset, - root->root_key.objectid, - trans->transid, key->objectid, start); - } else { - /* - * insert the extent pointer in the extent - * allocation tree - */ - ret = btrfs_alloc_logged_extent(trans, root, - root->root_key.objectid, - trans->transid, key->objectid, - start, &ins); - BUG_ON(ret); - } - } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { - /* inline extents are easy, we just overwrite them */ - ret = overwrite_item(trans, root, path, eb, slot, key); - BUG_ON(ret); - } /* btrfs_drop_extents changes i_blocks, update it here */ inode->i_blocks += (extent_end - start) >> 9; btrfs_update_inode(trans, root, inode); @@ -1806,16 +1814,14 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level == 0); return 0; } else { - if (path->nodes[*level] == root->node) { - root_owner = root->root_key.objectid; - root_gen = - btrfs_header_generation(path->nodes[*level]); - } else { - struct extent_buffer *node; - node = path->nodes[*level + 1]; - root_owner = btrfs_header_owner(node); - root_gen = btrfs_header_generation(node); - } + struct extent_buffer *parent; + if (path->nodes[*level] == root->node) + parent = path->nodes[*level]; + else + parent = path->nodes[*level + 1]; + + root_owner = btrfs_header_owner(parent); + root_gen = btrfs_header_generation(parent); wc->process_func(root, path->nodes[*level], wc, btrfs_header_generation(path->nodes[*level])); if (wc->free) { @@ -2525,8 +2531,10 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, if (ds != 0) { ret = btrfs_inc_extent_ref(trans, log, ds, dl, + dst_path->nodes[0]->start, BTRFS_TREE_LOG_OBJECTID, - 0, ins_keys[i].objectid, + trans->transid, + ins_keys[i].objectid, ins_keys[i].offset); BUG_ON(ret); } -- cgit v1.2.3-70-g09d2 From 2b1f55b0f0d0d1a66470ef4ea2696cd5dd741a12 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 24 Sep 2008 11:48:04 -0400 Subject: Remove Btrfs compat code for older kernels Btrfs had compatibility code for kernels back to 2.6.18. These have been removed, and will be maintained in a separate backport git tree from now on. Signed-off-by: Chris Mason --- fs/btrfs/async-thread.c | 6 --- fs/btrfs/compat.h | 40 +------------- fs/btrfs/crc32c.h | 6 --- fs/btrfs/ctree.h | 7 +-- fs/btrfs/disk-io.c | 28 ---------- fs/btrfs/export.c | 6 --- fs/btrfs/extent_io.c | 66 ++--------------------- fs/btrfs/file.c | 24 +-------- fs/btrfs/inode.c | 135 +----------------------------------------------- fs/btrfs/super.c | 7 --- fs/btrfs/sysfs.c | 33 ------------ fs/btrfs/volumes.c | 20 ------- 12 files changed, 10 insertions(+), 368 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 2ee30174019..4e780b279de 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -20,13 +20,7 @@ #include #include #include - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) # include -#else -# include -#endif - #include "async-thread.h" /* diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h index b0ed1887d9b..cd6598b169d 100644 --- a/fs/btrfs/compat.h +++ b/fs/btrfs/compat.h @@ -1,9 +1,8 @@ #ifndef _COMPAT_H_ #define _COMPAT_H_ -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26) -#define trylock_page(page) (!TestSetPageLocked(page)) -#endif +#define btrfs_drop_nlink(inode) drop_nlink(inode) +#define btrfs_inc_nlink(inode) inc_nlink(inode) #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) static inline struct dentry *d_obtain_alias(struct inode *inode) @@ -22,39 +21,4 @@ static inline struct dentry *d_obtain_alias(struct inode *inode) } #endif -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) -static inline void btrfs_drop_nlink(struct inode *inode) -{ - inode->i_nlink--; -} - -static inline void btrfs_inc_nlink(struct inode *inode) -{ - inode->i_nlink++; -} -#else -# define btrfs_drop_nlink(inode) drop_nlink(inode) -# define btrfs_inc_nlink(inode) inc_nlink(inode) -#endif - -/* - * Even if AppArmor isn't enabled, it still has different prototypes. - * Add more distro/version pairs here to declare which has AppArmor applied. - */ -#if defined(CONFIG_SUSE_KERNEL) -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) -# define REMOVE_SUID_PATH 1 -# endif -#endif - -/* - * catch any other distros that have patched in apparmor. This isn't - * 100% reliable because it won't catch people that hand compile their - * own distro kernels without apparmor compiled in. But, it is better - * than nothing. - */ -#ifdef CONFIG_SECURITY_APPARMOR -# define REMOVE_SUID_PATH 1 -#endif - #endif /* _COMPAT_H_ */ diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h index bf6c12e8573..4f0fefed132 100644 --- a/fs/btrfs/crc32c.h +++ b/fs/btrfs/crc32c.h @@ -96,13 +96,7 @@ static inline u32 __btrfs_crc32c(u32 crc, unsigned char const *address, * We must workaround older implementations of crc32c_le() * found on older kernel versions. */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) -#define btrfs_crc32c(seed, data, length) \ - __cpu_to_le32( __btrfs_crc32c( __le32_to_cpu(seed), \ - (unsigned char const *)data, length) ) -#else #define btrfs_crc32c(seed, data, length) \ __btrfs_crc32c(seed, (unsigned char const *)data, length) #endif -#endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 138c157bbc4..3b3c1ca50c5 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1472,12 +1472,9 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { ((unsigned long)(btrfs_leaf_data(leaf) + \ btrfs_item_offset_nr(leaf, slot))) -static inline struct dentry *fdentry(struct file *file) { -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - return file->f_dentry; -#else +static inline struct dentry *fdentry(struct file *file) +{ return file->f_path.dentry; -#endif } /* extent-tree.c */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d35ca6a3f51..dffb8dabd53 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -26,11 +26,7 @@ #include // for block_sync_page #include #include -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) # include -#else -# include -#endif #include "crc32c.h" #include "ctree.h" #include "disk-io.h" @@ -373,21 +369,11 @@ out: return ret; } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void end_workqueue_bio(struct bio *bio, int err) -#else -static int end_workqueue_bio(struct bio *bio, - unsigned int bytes_done, int err) -#endif { struct end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - if (bio->bi_size) - return 1; -#endif - fs_info = end_io_wq->info; end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; @@ -397,10 +383,6 @@ static int end_workqueue_bio(struct bio *bio, &end_io_wq->work); else btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); - -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - return 0; -#endif } int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, @@ -1161,9 +1143,7 @@ void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) { -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) bdi_init(bdi); -#endif bdi->ra_pages = default_backing_dev_info.ra_pages; bdi->state = 0; bdi->capabilities = default_backing_dev_info.capabilities; @@ -1242,11 +1222,7 @@ static void end_workqueue_fn(struct btrfs_work *work) bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; kfree(end_io_wq); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - bio_endio(bio, bio->bi_size, error); -#else bio_endio(bio, error); -#endif } static int cleaner_kthread(void *arg) @@ -1673,9 +1649,7 @@ fail: kfree(extent_root); kfree(tree_root); -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) bdi_destroy(&fs_info->bdi); -#endif kfree(fs_info); return ERR_PTR(err); } @@ -1936,9 +1910,7 @@ int close_ctree(struct btrfs_root *root) btrfs_close_devices(fs_info->fs_devices); btrfs_mapping_tree_free(&fs_info->mapping_tree); -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) bdi_destroy(&fs_info->bdi); -#endif kfree(fs_info->extent_root); kfree(fs_info->tree_root); diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 2b357a6d240..48b82cd7583 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -7,12 +7,6 @@ #include "export.h" #include "compat.h" -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) -#define FILEID_BTRFS_WITHOUT_PARENT 0x4d -#define FILEID_BTRFS_WITH_PARENT 0x4e -#define FILEID_BTRFS_WITH_PARENT_ROOT 0x4f -#endif - #define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) #define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) #define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 58ad25838a4..e3a25be5c66 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1397,12 +1397,7 @@ static int check_page_writeback(struct extent_io_tree *tree, * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void end_bio_extent_writepage(struct bio *bio, int err) -#else -static int end_bio_extent_writepage(struct bio *bio, - unsigned int bytes_done, int err) -#endif { int uptodate = err == 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -1412,10 +1407,6 @@ static int end_bio_extent_writepage(struct bio *bio, int whole_page; int ret; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - if (bio->bi_size) - return 1; -#endif do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1461,10 +1452,8 @@ static int end_bio_extent_writepage(struct bio *bio, else check_page_writeback(tree, page); } while (bvec >= bio->bi_io_vec); + bio_put(bio); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - return 0; -#endif } /* @@ -1478,12 +1467,7 @@ static int end_bio_extent_writepage(struct bio *bio, * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void end_bio_extent_readpage(struct bio *bio, int err) -#else -static int end_bio_extent_readpage(struct bio *bio, - unsigned int bytes_done, int err) -#endif { int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -1493,11 +1477,6 @@ static int end_bio_extent_readpage(struct bio *bio, int whole_page; int ret; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - if (bio->bi_size) - return 1; -#endif - do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1556,9 +1535,6 @@ static int end_bio_extent_readpage(struct bio *bio, } while (bvec >= bio->bi_io_vec); bio_put(bio); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - return 0; -#endif } /* @@ -1566,12 +1542,7 @@ static int end_bio_extent_readpage(struct bio *bio, * the structs in the extent tree when done, and set the uptodate bits * as appropriate. */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void end_bio_extent_preparewrite(struct bio *bio, int err) -#else -static int end_bio_extent_preparewrite(struct bio *bio, - unsigned int bytes_done, int err) -#endif { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -1579,11 +1550,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, u64 start; u64 end; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - if (bio->bi_size) - return 1; -#endif - do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1607,9 +1573,6 @@ static int end_bio_extent_preparewrite(struct bio *bio, } while (bvec >= bio->bi_io_vec); bio_put(bio); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - return 0; -#endif } static struct bio * @@ -2079,12 +2042,6 @@ done: return 0; } -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) -/* Taken directly from 2.6.23 with a mod for a lockpage hook */ -typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, - void *data); -#endif - /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. * @mapping: address space structure to write @@ -2201,10 +2158,9 @@ retry: } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + if (wbc->range_cont) wbc->range_start = index << PAGE_CACHE_SHIFT; -#endif return ret; } EXPORT_SYMBOL(extent_write_cache_pages); @@ -2560,18 +2516,10 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb, * by increasing the reference count. So we know the page must * be in the radix tree. */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) rcu_read_lock(); -#else - read_lock_irq(&mapping->tree_lock); -#endif p = radix_tree_lookup(&mapping->page_tree, i); - -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) rcu_read_unlock(); -#else - read_unlock_irq(&mapping->tree_lock); -#endif + return p; } @@ -2773,21 +2721,13 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, } } clear_page_dirty_for_io(page); -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) spin_lock_irq(&page->mapping->tree_lock); -#else - read_lock_irq(&page->mapping->tree_lock); -#endif if (!PageDirty(page)) { radix_tree_tag_clear(&page->mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) spin_unlock_irq(&page->mapping->tree_lock); -#else - read_unlock_irq(&page->mapping->tree_lock); -#endif unlock_page(page); } return 0; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 48a702d41c8..8856570a0eb 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -871,15 +871,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, goto out_nolock; if (count == 0) goto out_nolock; -#ifdef REMOVE_SUID_PATH - err = remove_suid(&file->f_path); -#else -# if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) + err = file_remove_suid(file); -# else - err = remove_suid(fdentry(file)); -# endif -#endif if (err) goto out_nolock; file_update_time(file); @@ -1003,17 +996,10 @@ out_nolock: btrfs_commit_transaction(trans, root); } } else if (num_written > 0 && (file->f_flags & O_DIRECT)) { -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) - do_sync_file_range(file, start_pos, - start_pos + num_written - 1, - SYNC_FILE_RANGE_WRITE | - SYNC_FILE_RANGE_WAIT_AFTER); -#else do_sync_mapping_range(inode->i_mapping, start_pos, start_pos + num_written - 1, SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER); -#endif invalidate_mapping_pages(inode->i_mapping, start_pos >> PAGE_CACHE_SHIFT, (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); @@ -1097,12 +1083,7 @@ out: } static struct vm_operations_struct btrfs_file_vm_ops = { -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) - .nopage = filemap_nopage, - .populate = filemap_populate, -#else .fault = filemap_fault, -#endif .page_mkwrite = btrfs_page_mkwrite, }; @@ -1118,9 +1099,6 @@ struct file_operations btrfs_file_operations = { .read = do_sync_read, .aio_read = generic_file_aio_read, .splice_read = generic_file_splice_read, -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) - .sendfile = generic_file_sendfile, -#endif .write = btrfs_file_write, .mmap = btrfs_file_mmap, .open = generic_file_open, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index adb169d739c..48a3dc03080 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2073,104 +2073,6 @@ err: return ret; } -/* Kernels earlier than 2.6.28 still have the NFS deadlock where nfsd - will call the file system's ->lookup() method from within its - filldir callback, which in turn was called from the file system's - ->readdir() method. And will deadlock for many file systems. */ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) - -struct nfshack_dirent { - u64 ino; - loff_t offset; - int namlen; - unsigned int d_type; - char name[]; -}; - -struct nfshack_readdir { - char *dirent; - size_t used; - int full; -}; - - - -static int btrfs_nfshack_filldir(void *__buf, const char *name, int namlen, - loff_t offset, u64 ino, unsigned int d_type) -{ - struct nfshack_readdir *buf = __buf; - struct nfshack_dirent *de = (void *)(buf->dirent + buf->used); - unsigned int reclen; - - reclen = ALIGN(sizeof(struct nfshack_dirent) + namlen, sizeof(u64)); - if (buf->used + reclen > PAGE_SIZE) { - buf->full = 1; - return -EINVAL; - } - - de->namlen = namlen; - de->offset = offset; - de->ino = ino; - de->d_type = d_type; - memcpy(de->name, name, namlen); - buf->used += reclen; - - return 0; -} - -static int btrfs_nfshack_readdir(struct file *file, void *dirent, - filldir_t filldir) -{ - struct nfshack_readdir buf; - struct nfshack_dirent *de; - int err; - int size; - loff_t offset; - - buf.dirent = (void *)__get_free_page(GFP_KERNEL); - if (!buf.dirent) - return -ENOMEM; - - offset = file->f_pos; - - do { - unsigned int reclen; - - buf.used = 0; - buf.full = 0; - err = btrfs_real_readdir(file, &buf, btrfs_nfshack_filldir); - if (err) - break; - - size = buf.used; - - if (!size) - break; - - de = (struct nfshack_dirent *)buf.dirent; - while (size > 0) { - offset = de->offset; - - if (filldir(dirent, de->name, de->namlen, de->offset, - de->ino, de->d_type)) - goto done; - offset = file->f_pos; - - reclen = ALIGN(sizeof(*de) + de->namlen, - sizeof(u64)); - size -= reclen; - de = (struct nfshack_dirent *)((char *)de + reclen); - } - } while (buf.full); - - done: - free_page((unsigned long)buf.dirent); - file->f_pos = offset; - - return err; -} -#endif - int btrfs_write_inode(struct inode *inode, int wait) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -3311,13 +3213,8 @@ unsigned long btrfs_force_ra(struct address_space *mapping, { pgoff_t req_size = last_index - offset + 1; -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) - offset = page_cache_readahead(mapping, ra, file, offset, req_size); - return offset; -#else page_cache_sync_readahead(mapping, ra, file, offset, req_size); return offset + req_size; -#endif } struct inode *btrfs_alloc_inode(struct super_block *sb) @@ -3373,14 +3270,7 @@ void btrfs_destroy_inode(struct inode *inode) kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) static void init_once(void *foo) -#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) -static void init_once(struct kmem_cache * cachep, void *foo) -#else -static void init_once(void * foo, struct kmem_cache * cachep, - unsigned long flags) -#endif { struct btrfs_inode *ei = (struct btrfs_inode *) foo; @@ -3403,22 +3293,10 @@ void btrfs_destroy_cachep(void) struct kmem_cache *btrfs_cache_create(const char *name, size_t size, unsigned long extra_flags, -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) - void (*ctor)(void *) -#elif LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) - void (*ctor)(struct kmem_cache *, void *) -#else - void (*ctor)(void *, struct kmem_cache *, - unsigned long) -#endif - ) + void (*ctor)(void *)) { return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD | extra_flags), ctor -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) - ,NULL -#endif - ); + SLAB_MEM_SPREAD | extra_flags), ctor); } int btrfs_init_cachep(void) @@ -3666,12 +3544,7 @@ static int btrfs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) static int btrfs_permission(struct inode *inode, int mask) -#else -static int btrfs_permission(struct inode *inode, int mask, - struct nameidata *nd) -#endif { if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE)) return -EACCES; @@ -3702,11 +3575,7 @@ static struct inode_operations btrfs_dir_ro_inode_operations = { static struct file_operations btrfs_dir_file_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) - .readdir = btrfs_nfshack_readdir, -#else /* NFSd readdir/lookup deadlock is fixed */ .readdir = btrfs_real_readdir, -#endif .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_ioctl, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f7b3eac7ac6..8399d6d05d6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -349,10 +349,7 @@ static int btrfs_fill_super(struct super_block * sb, sb->s_root = root_dentry; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) save_mount_options(sb, data); -#endif - return 0; fail_close: @@ -566,11 +563,7 @@ static struct super_operations btrfs_super_ops = { .put_super = btrfs_put_super, .write_super = btrfs_write_super, .sync_fs = btrfs_sync_fs, -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) - .read_inode = btrfs_read_locked_inode, -#else .show_options = generic_show_options, -#endif .write_inode = btrfs_write_inode, .dirty_inode = btrfs_dirty_inode, .alloc_inode = btrfs_alloc_inode, diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 61af5d8446e..300076e6676 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -28,7 +28,6 @@ #include "disk-io.h" #include "transaction.h" -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) { return snprintf(buf, PAGE_SIZE, "%llu\n", @@ -267,35 +266,3 @@ void btrfs_exit_sysfs(void) kset_unregister(btrfs_kset); } -#else - -int btrfs_sysfs_add_super(struct btrfs_fs_info *fs) -{ - return 0; -} - -int btrfs_sysfs_add_root(struct btrfs_root *root) -{ - return 0; -} - -void btrfs_sysfs_del_root(struct btrfs_root *root) -{ - return; -} - -void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) -{ - return; -} - -int btrfs_init_sysfs(void) -{ - return 0; -} - -void btrfs_exit_sysfs(void) -{ - return; -} -#endif diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b9e5c2d82dd..ddf89626498 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2080,20 +2080,11 @@ int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23) static void end_bio_multi_stripe(struct bio *bio, int err) -#else -static int end_bio_multi_stripe(struct bio *bio, - unsigned int bytes_done, int err) -#endif { struct btrfs_multi_bio *multi = bio->bi_private; int is_orig_bio = 0; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - if (bio->bi_size) - return 1; -#endif if (err) atomic_inc(&multi->error); @@ -2122,17 +2113,10 @@ static int end_bio_multi_stripe(struct bio *bio, } kfree(multi); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - bio_endio(bio, bio->bi_size, err); -#else bio_endio(bio, err); -#endif } else if (!is_orig_bio) { bio_put(bio); } -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - return 0; -#endif } struct async_sched { @@ -2248,11 +2232,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, } else { bio->bi_bdev = root->fs_info->fs_devices->latest_bdev; bio->bi_sector = logical >> 9; -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23) - bio_endio(bio, bio->bi_size, -EIO); -#else bio_endio(bio, -EIO); -#endif } dev_nr++; } -- cgit v1.2.3-70-g09d2 From 5b21f2ed3f2947b5195b65c9fdbdd9e52904cc03 Mon Sep 17 00:00:00 2001 From: Zheng Yan Date: Fri, 26 Sep 2008 10:05:38 -0400 Subject: Btrfs: extent_map and data=ordered fixes for space balancing * Add an EXTENT_BOUNDARY state bit to keep the writepage code from merging data extents that are in the process of being relocated. This allows us to do accounting for them properly. * The balancing code relocates data extents indepdent of the underlying inode. The extent_map code was modified to properly account for things moving around (invalidating extent_map caches in the inode). * Don't take the drop_mutex in the create_subvol ioctl. It isn't required. * Fix walking of the ordered extent list to avoid races with sys_unlink * Change the lock ordering rules. Transaction start goes outside the drop_mutex. This allows btrfs_commit_transaction to directly drop the relocation trees. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 9 +++++---- fs/btrfs/ctree.h | 11 ++++++++++- fs/btrfs/extent_io.c | 13 +++++++++---- fs/btrfs/extent_io.h | 1 + fs/btrfs/file.c | 31 +++++++++++++++++++++++------ fs/btrfs/inode-map.c | 4 ++++ fs/btrfs/inode.c | 52 +++++++++++++++++++++++++++++++++++-------------- fs/btrfs/ioctl.c | 2 -- fs/btrfs/ordered-data.c | 26 ++++++++++++------------- fs/btrfs/transaction.c | 8 ++++---- 10 files changed, 108 insertions(+), 49 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50aea8cb653..f9cd40967d0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -290,7 +290,6 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, struct extent_buffer **cow_ret, u64 prealloc_dest) { u64 search_start; - u64 header_trans; int ret; if (trans->transaction != root->fs_info->running_transaction) { @@ -304,9 +303,9 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, WARN_ON(1); } - header_trans = btrfs_header_generation(buf); spin_lock(&root->fs_info->hash_lock); - if (header_trans == trans->transid && + if (btrfs_header_generation(buf) == trans->transid && + btrfs_header_owner(buf) == root->root_key.objectid && !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { *cow_ret = buf; spin_unlock(&root->fs_info->hash_lock); @@ -1300,6 +1299,7 @@ again: /* is a cow on this block not required */ spin_lock(&root->fs_info->hash_lock); if (btrfs_header_generation(b) == trans->transid && + btrfs_header_owner(b) == root->root_key.objectid && !btrfs_header_flag(b, BTRFS_HEADER_FLAG_WRITTEN)) { spin_unlock(&root->fs_info->hash_lock); goto cow_done; @@ -1396,7 +1396,8 @@ cow_done: /* this is only true while dropping a snapshot */ if (level == lowest_level) { - break; + ret = 0; + goto done; } blocknr = btrfs_node_blockptr(b, slot); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b9f9f815ed0..3e62a1b0a1f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1486,6 +1486,9 @@ static inline struct dentry *fdentry(struct file *file) /* extent-tree.c */ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); +int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, u32 *refs); int btrfs_update_pinned_extents(struct btrfs_root *root, u64 bytenr, u64 num, int pin); int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, @@ -1812,6 +1815,8 @@ void btrfs_destroy_inode(struct inode *inode); int btrfs_init_cachep(void); void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); +struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, + struct btrfs_root *root, int wait); struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid, struct btrfs_root *root); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, @@ -1824,13 +1829,17 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); +int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); +int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode); +void btrfs_orphan_cleanup(struct btrfs_root *root); /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* file.c */ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync); -int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end); +int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, + int skip_pinned); int btrfs_check_file(struct btrfs_root *root, struct inode *inode); extern struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e3a25be5c66..8bd1b402f3f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -292,7 +292,7 @@ static int merge_state(struct extent_io_tree *tree, struct extent_state *other; struct rb_node *other_node; - if (state->state & EXTENT_IOBITS) + if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) return 0; other_node = rb_prev(&state->rb_node); @@ -1070,7 +1070,8 @@ search_again: while(1) { state = rb_entry(node, struct extent_state, rb_node); - if (found && state->start != cur_start) { + if (found && (state->start != cur_start || + (state->state & EXTENT_BOUNDARY))) { goto out; } if (!(state->state & EXTENT_DELALLOC)) { @@ -1078,7 +1079,7 @@ search_again: *end = state->end; goto out; } - if (!found) { + if (!found && !(state->state & EXTENT_BOUNDARY)) { struct extent_state *prev_state; struct rb_node *prev_node = node; while(1) { @@ -1088,7 +1089,11 @@ search_again: prev_state = rb_entry(prev_node, struct extent_state, rb_node); - if (!(prev_state->state & EXTENT_DELALLOC)) + if ((prev_state->end + 1 != state->start) || + !(prev_state->state & EXTENT_DELALLOC)) + break; + if ((cur_start - prev_state->start) * 2 > + max_bytes) break; state = prev_state; node = prev_node; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 3cb411a5f4d..c9d1908a1ae 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -15,6 +15,7 @@ #define EXTENT_BUFFER_FILLED (1 << 8) #define EXTENT_ORDERED (1 << 9) #define EXTENT_ORDERED_METADATA (1 << 10) +#define EXTENT_BOUNDARY (1 << 11) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) /* diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8856570a0eb..1b7e51a9db0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -294,7 +294,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, last_pos_in_file, 0, 0, hole_size, 0); btrfs_drop_extent_cache(inode, last_pos_in_file, - last_pos_in_file + hole_size -1); + last_pos_in_file + hole_size - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); btrfs_check_file(root, inode); } @@ -337,7 +337,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, inline_size -= start_pos; err = insert_inline_extent(trans, root, inode, start_pos, inline_size, pages, 0, num_pages); - btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1); + btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1, 0); BUG_ON(err); mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -362,7 +362,8 @@ out_unlock: return err; } -int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) +int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, + int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; @@ -371,6 +372,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) u64 len = end - start + 1; int ret; int testend = 1; + unsigned long flags; WARN_ON(end < start); if (end == (u64)-1) { @@ -389,6 +391,23 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) spin_unlock(&em_tree->lock); break; } + flags = em->flags; + if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { + spin_unlock(&em_tree->lock); + if (em->start <= start && + (!testend || em->start + em->len >= start + len)) { + free_extent_map(em); + break; + } + if (start < em->start) { + len = em->start - start; + } else { + len = start + len - (em->start + em->len); + start = em->start + em->len; + } + free_extent_map(em); + continue; + } clear_bit(EXTENT_FLAG_PINNED, &em->flags); remove_extent_mapping(em_tree, em); @@ -398,7 +417,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) split->len = start - em->start; split->block_start = em->block_start; split->bdev = em->bdev; - split->flags = em->flags; + split->flags = flags; ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); @@ -412,7 +431,7 @@ int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) split->start = start + len; split->len = em->start + em->len - (start + len); split->bdev = em->bdev; - split->flags = em->flags; + split->flags = flags; split->block_start = em->block_start + diff; @@ -541,7 +560,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int recow; int ret; - btrfs_drop_extent_cache(inode, start, end - 1); + btrfs_drop_extent_cache(inode, start, end - 1, 0); path = btrfs_alloc_path(); if (!path) diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index cd6171c2da4..80038c5ef7c 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -117,10 +117,14 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, *objectid = last_ino; goto found; } + } else if (key.objectid > search_start) { + *objectid = search_start; + goto found; } } if (key.objectid >= BTRFS_LAST_FREE_OBJECTID) break; + start_found = 1; last_ino = key.objectid + 1; path->slots[0]++; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 48a3dc03080..4516fbf0167 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -135,7 +135,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); mutex_lock(&BTRFS_I(inode)->extent_mutex); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1); + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); while(num_bytes > 0) { @@ -163,7 +163,7 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) break; } btrfs_drop_extent_cache(inode, start, - start + ins.offset - 1); + start + ins.offset - 1, 0); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -587,7 +587,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + - ordered_extent->len - 1); + ordered_extent->len - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); ins.objectid = ordered_extent->start; @@ -880,7 +880,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) int ret = 0, nr_unlink = 0, nr_truncate = 0; /* don't do orphan cleanup if the fs is readonly. */ - if (root->inode->i_sb->s_flags & MS_RDONLY) + if (root->fs_info->sb->s_flags & MS_RDONLY) return; path = btrfs_alloc_path(); @@ -892,8 +892,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); key.offset = (u64)-1; - trans = btrfs_start_transaction(root, 1); - btrfs_set_trans_block_group(trans, root->inode); while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -933,7 +931,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * crossing root thing. we store the inode number in the * offset of the orphan item. */ - inode = btrfs_iget_locked(root->inode->i_sb, + inode = btrfs_iget_locked(root->fs_info->sb, found_key.offset, root); if (!inode) break; @@ -965,7 +963,9 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) * do a destroy_inode */ if (is_bad_inode(inode)) { + trans = btrfs_start_transaction(root, 1); btrfs_orphan_del(trans, inode); + btrfs_end_transaction(trans, root); iput(inode); continue; } @@ -988,7 +988,6 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate); btrfs_free_path(path); - btrfs_end_transaction(trans, root); } void btrfs_read_locked_inode(struct inode *inode) @@ -1343,8 +1342,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, u64 mask = root->sectorsize - 1; if (root->ref_cows) - btrfs_drop_extent_cache(inode, - new_size & (~mask), (u64)-1); + btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); path = btrfs_alloc_path(); path->reada = -1; BUG_ON(!path); @@ -1677,7 +1675,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) hole_start, 0, 0, hole_size, 0); btrfs_drop_extent_cache(inode, hole_start, - (u64)-1); + (u64)-1, 0); btrfs_check_file(root, inode); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -1843,6 +1841,24 @@ static int btrfs_find_actor(struct inode *inode, void *opaque) args->root == BTRFS_I(inode)->root); } +struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, + struct btrfs_root *root, int wait) +{ + struct inode *inode; + struct btrfs_iget_args args; + args.ino = objectid; + args.root = root; + + if (wait) { + inode = ilookup5(s, objectid, btrfs_find_actor, + (void *)&args); + } else { + inode = ilookup5_nowait(s, objectid, btrfs_find_actor, + (void *)&args); + } + return inode; +} + struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid, struct btrfs_root *root) { @@ -3266,7 +3282,7 @@ void btrfs_destroy_inode(struct inode *inode) btrfs_put_ordered_extent(ordered); } } - btrfs_drop_extent_cache(inode, 0, (u64)-1); + btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } @@ -3412,16 +3428,22 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) { struct list_head *head = &root->fs_info->delalloc_inodes; struct btrfs_inode *binode; + struct inode *inode; unsigned long flags; spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); while(!list_empty(head)) { binode = list_entry(head->next, struct btrfs_inode, delalloc_inodes); - atomic_inc(&binode->vfs_inode.i_count); + inode = igrab(&binode->vfs_inode); + if (!inode) + list_del_init(&binode->delalloc_inodes); spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); - filemap_write_and_wait(binode->vfs_inode.i_mapping); - iput(&binode->vfs_inode); + if (inode) { + filemap_write_and_wait(inode->i_mapping); + iput(inode); + } + cond_resched(); spin_lock_irqsave(&root->fs_info->delalloc_lock, flags); } spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4c6e0c15754..04de767a8db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -444,12 +444,10 @@ static noinline int btrfs_ioctl_snap_create(struct btrfs_root *root, goto out; } - mutex_lock(&root->fs_info->drop_mutex); if (root == root->fs_info->tree_root) ret = create_subvol(root, vol_args->name, namelen); else ret = create_snapshot(root, vol_args->name, namelen); - mutex_unlock(&root->fs_info->drop_mutex); out: kfree(vol_args); return ret; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index da6d43eb41d..951eacff242 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -309,7 +309,6 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) { struct list_head splice; struct list_head *cur; - struct list_head *tmp; struct btrfs_ordered_extent *ordered; struct inode *inode; @@ -317,37 +316,38 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) spin_lock(&root->fs_info->ordered_extent_lock); list_splice_init(&root->fs_info->ordered_extents, &splice); - list_for_each_safe(cur, tmp, &splice) { + while (!list_empty(&splice)) { cur = splice.next; ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); if (nocow_only && !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + list_move(&ordered->root_extent_list, + &root->fs_info->ordered_extents); cond_resched_lock(&root->fs_info->ordered_extent_lock); continue; } list_del_init(&ordered->root_extent_list); atomic_inc(&ordered->refs); - inode = ordered->inode; /* - * the inode can't go away until all the pages are gone - * and the pages won't go away while there is still - * an ordered extent and the ordered extent won't go - * away until it is off this list. So, we can safely - * increment i_count here and call iput later + * the inode may be getting freed (in sys_unlink path). */ - atomic_inc(&inode->i_count); + inode = igrab(ordered->inode); + spin_unlock(&root->fs_info->ordered_extent_lock); - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - iput(inode); + if (inode) { + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + iput(inode); + } else { + btrfs_put_ordered_extent(ordered); + } spin_lock(&root->fs_info->ordered_extent_lock); } - list_splice_init(&splice, &root->fs_info->ordered_extents); spin_unlock(&root->fs_info->ordered_extent_lock); return 0; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 656baefa525..8c83cf464c8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -109,6 +109,7 @@ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) spin_lock_init(&dirty->root->node_lock); spin_lock_init(&dirty->root->list_lock); mutex_init(&dirty->root->objectid_mutex); + mutex_init(&dirty->root->log_mutex); INIT_LIST_HEAD(&dirty->root->dead_list); dirty->root->node = root->commit_root; dirty->root->commit_root = NULL; @@ -590,13 +591,14 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, root = dirty->latest_root; atomic_inc(&root->fs_info->throttles); - mutex_lock(&root->fs_info->drop_mutex); while(1) { trans = btrfs_start_transaction(tree_root, 1); + mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, dirty->root); if (ret != -EAGAIN) { break; } + mutex_unlock(&root->fs_info->drop_mutex); err = btrfs_update_root(trans, tree_root, @@ -608,10 +610,8 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, ret = btrfs_end_transaction(trans, tree_root); BUG_ON(ret); - mutex_unlock(&root->fs_info->drop_mutex); btrfs_btree_balance_dirty(tree_root, nr); cond_resched(); - mutex_lock(&root->fs_info->drop_mutex); } BUG_ON(ret); atomic_dec(&root->fs_info->throttles); @@ -689,7 +689,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); key.objectid = objectid; - key.offset = 1; + key.offset = trans->transid; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); old = btrfs_lock_root_node(root); -- cgit v1.2.3-70-g09d2 From d352ac68148b69937d39ca5d48bcc4478e118dbf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 29 Sep 2008 15:18:18 -0400 Subject: Btrfs: add and improve comments This improves the comments at the top of many functions. It didn't dive into the guts of functions because I was trying to avoid merging problems with the new allocator and back reference work. extent-tree.c and volumes.c were both skipped, and there is definitely more work todo in cleaning and commenting the code. Signed-off-by: Chris Mason --- fs/btrfs/Makefile | 2 +- fs/btrfs/TODO | 20 ----- fs/btrfs/async-thread.c | 10 ++- fs/btrfs/async-thread.h | 7 +- fs/btrfs/bit-radix.c | 130 --------------------------------- fs/btrfs/bit-radix.h | 33 --------- fs/btrfs/btrfs_inode.h | 54 +++++++++++++- fs/btrfs/crc32c.h | 18 +++++ fs/btrfs/ctree.c | 127 +++++++++++++++++++++++++++++--- fs/btrfs/ctree.h | 1 - fs/btrfs/dir-item.c | 41 +++++++++++ fs/btrfs/disk-io.c | 33 ++++++++- fs/btrfs/extent_io.c | 34 ++++++++- fs/btrfs/extent_map.c | 10 +++ fs/btrfs/file.c | 44 ++++++++++- fs/btrfs/inode.c | 189 +++++++++++++++++++++++++++++++----------------- fs/btrfs/locking.c | 13 ++++ fs/btrfs/ordered-data.c | 19 ++++- fs/btrfs/ref-cache.c | 26 +++++++ fs/btrfs/ref-cache.h | 3 + fs/btrfs/root-tree.c | 21 +++++- fs/btrfs/struct-funcs.c | 21 ++++++ fs/btrfs/super.c | 3 + fs/btrfs/transaction.c | 67 ++++++++++++++++- fs/btrfs/tree-defrag.c | 4 + 25 files changed, 653 insertions(+), 277 deletions(-) delete mode 100644 fs/btrfs/TODO delete mode 100644 fs/btrfs/bit-radix.c delete mode 100644 fs/btrfs/bit-radix.h (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index d5c28557fba..48b7909ca8d 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -4,7 +4,7 @@ ifneq ($(KERNELRELEASE),) obj-m := btrfs.o btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ file-item.o inode-item.o inode-map.o disk-io.o \ - transaction.o bit-radix.o inode.o file.o tree-defrag.o \ + transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ ref-cache.o export.o tree-log.o acl.o free-space-cache.o diff --git a/fs/btrfs/TODO b/fs/btrfs/TODO deleted file mode 100644 index d9b6d38c603..00000000000 --- a/fs/btrfs/TODO +++ /dev/null @@ -1,20 +0,0 @@ -* cleanup, add more error checking, get rid of BUG_ONs -* Fix ENOSPC handling -* Make allocator smarter -* add a block group to struct inode -* Do actual block accounting -* Check compat and incompat flags on the inode -* Get rid of struct ctree_path, limiting tree levels held at one time -* Add generation number to key pointer in nodes -* Add generation number to inode -* forbid cross subvolume renames and hardlinks -* Release -* Do real tree locking -* Add extent mirroring (backup copies of blocks) -* Add fancy interface to get access to incremental backups -* Add fancy striped extents to make big reads faster -* Use relocation to try and fix write errors -* Make allocator much smarter -* xattrs (directory streams for regular files) -* Scrub & defrag - diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 4e780b279de..04fb9702d14 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -231,17 +231,25 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers) /* * if we pick a busy task, move the task to the end of the list. - * hopefully this will keep things somewhat evenly balanced + * hopefully this will keep things somewhat evenly balanced. + * Do the move in batches based on the sequence number. This groups + * requests submitted at roughly the same time onto the same worker. */ next = workers->worker_list.next; worker = list_entry(next, struct btrfs_worker_thread, worker_list); atomic_inc(&worker->num_pending); worker->sequence++; + if (worker->sequence % workers->idle_thresh == 0) list_move_tail(next, &workers->worker_list); return worker; } +/* + * selects a worker thread to take the next job. This will either find + * an idle worker, start a new worker up to the max count, or just return + * one of the existing busy workers. + */ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) { struct btrfs_worker_thread *worker; diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h index 43e44d115dd..4ec9a2ee0f9 100644 --- a/fs/btrfs/async-thread.h +++ b/fs/btrfs/async-thread.h @@ -63,14 +63,17 @@ struct btrfs_workers { /* once a worker has this many requests or fewer, it is idle */ int idle_thresh; - /* list with all the work threads */ + /* list with all the work threads. The workers on the idle thread + * may be actively servicing jobs, but they haven't yet hit the + * idle thresh limit above. + */ struct list_head worker_list; struct list_head idle_list; /* lock for finding the next worker thread to queue on */ spinlock_t lock; - /* extra name for this worker */ + /* extra name for this worker, used for current->name */ char *name; }; diff --git a/fs/btrfs/bit-radix.c b/fs/btrfs/bit-radix.c deleted file mode 100644 index e8bf876db39..00000000000 --- a/fs/btrfs/bit-radix.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2007 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - */ - -#include "bit-radix.h" - -#define BIT_ARRAY_BYTES 256 -#define BIT_RADIX_BITS_PER_ARRAY ((BIT_ARRAY_BYTES - sizeof(unsigned long)) * 8) - -extern struct kmem_cache *btrfs_bit_radix_cachep; -int set_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - int ret; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) { - bits = kmem_cache_alloc(btrfs_bit_radix_cachep, GFP_NOFS); - if (!bits) - return -ENOMEM; - memset(bits + 1, 0, BIT_ARRAY_BYTES - sizeof(unsigned long)); - bits[0] = slot; - ret = radix_tree_insert(radix, slot, bits); - if (ret) - return ret; - } - ret = test_and_set_bit(bit_slot, bits + 1); - if (ret < 0) - ret = 1; - return ret; -} - -int test_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) - return 0; - return test_bit(bit_slot, bits + 1); -} - -int clear_radix_bit(struct radix_tree_root *radix, unsigned long bit) -{ - unsigned long *bits; - unsigned long slot; - int bit_slot; - int i; - int empty = 1; - - slot = bit / BIT_RADIX_BITS_PER_ARRAY; - bit_slot = bit % BIT_RADIX_BITS_PER_ARRAY; - - bits = radix_tree_lookup(radix, slot); - if (!bits) - return 0; - clear_bit(bit_slot, bits + 1); - for (i = 1; i < BIT_ARRAY_BYTES / sizeof(unsigned long); i++) { - if (bits[i]) { - empty = 0; - break; - } - } - if (empty) { - bits = radix_tree_delete(radix, slot); - BUG_ON(!bits); - kmem_cache_free(btrfs_bit_radix_cachep, bits); - } - return 0; -} - -int find_first_radix_bit(struct radix_tree_root *radix, unsigned long *retbits, - unsigned long start, int nr) -{ - unsigned long *bits; - unsigned long *gang[4]; - int found; - int ret; - int i; - int total_found = 0; - unsigned long slot; - - slot = start / BIT_RADIX_BITS_PER_ARRAY; - ret = radix_tree_gang_lookup(radix, (void **)gang, slot, - ARRAY_SIZE(gang)); - found = start % BIT_RADIX_BITS_PER_ARRAY; - for (i = 0; i < ret && nr > 0; i++) { - bits = gang[i]; - while(nr > 0) { - found = find_next_bit(bits + 1, - BIT_RADIX_BITS_PER_ARRAY, - found); - if (found < BIT_RADIX_BITS_PER_ARRAY) { - *retbits = bits[0] * - BIT_RADIX_BITS_PER_ARRAY + found; - retbits++; - nr--; - total_found++; - found++; - } else - break; - } - found = 0; - } - return total_found; -} diff --git a/fs/btrfs/bit-radix.h b/fs/btrfs/bit-radix.h deleted file mode 100644 index c100f54d5c3..00000000000 --- a/fs/btrfs/bit-radix.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright (C) 2007 Oracle. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public - * License v2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this program; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 021110-1307, USA. - */ - -#ifndef __BIT_RADIX__ -#define __BIT_RADIX__ -#include - -int set_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int test_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int clear_radix_bit(struct radix_tree_root *radix, unsigned long bit); -int find_first_radix_bit(struct radix_tree_root *radix, unsigned long *retbits, - unsigned long start, int nr); - -static inline void init_bit_radix(struct radix_tree_root *radix) -{ - INIT_RADIX_TREE(radix, GFP_NOFS); -} -#endif diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 0577fda2168..0b2e623cf42 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -25,27 +25,58 @@ /* in memory btrfs inode */ struct btrfs_inode { + /* which subvolume this inode belongs to */ struct btrfs_root *root; + + /* the block group preferred for allocations. This pointer is buggy + * and needs to be replaced with a bytenr instead + */ struct btrfs_block_group_cache *block_group; + + /* key used to find this inode on disk. This is used by the code + * to read in roots of subvolumes + */ struct btrfs_key location; + + /* the extent_tree has caches of all the extent mappings to disk */ struct extent_map_tree extent_tree; + + /* the io_tree does range state (DIRTY, LOCKED etc) */ struct extent_io_tree io_tree; + + /* special utility tree used to record which mirrors have already been + * tried when checksums fail for a given block + */ struct extent_io_tree io_failure_tree; + + /* held while inserting checksums to avoid races */ struct mutex csum_mutex; + + /* held while inesrting or deleting extents from files */ struct mutex extent_mutex; + + /* held while logging the inode in tree-log.c */ struct mutex log_mutex; - struct inode vfs_inode; + + /* used to order data wrt metadata */ struct btrfs_ordered_inode_tree ordered_tree; + /* standard acl pointers */ struct posix_acl *i_acl; struct posix_acl *i_default_acl; /* for keeping track of orphaned inodes */ struct list_head i_orphan; + /* list of all the delalloc inodes in the FS. There are times we need + * to write all the delalloc pages to disk, and this list is used + * to walk them all. + */ struct list_head delalloc_inodes; - /* full 64 bit generation number */ + /* full 64 bit generation number, struct vfs_inode doesn't have a big + * enough field for this. + */ u64 generation; /* @@ -57,10 +88,25 @@ struct btrfs_inode { */ u64 logged_trans; - /* trans that last made a change that should be fully fsync'd */ + /* + * trans that last made a change that should be fully fsync'd. This + * gets reset to zero each time the inode is logged + */ u64 log_dirty_trans; + + /* total number of bytes pending delalloc, used by stat to calc the + * real block usage of the file + */ u64 delalloc_bytes; + + /* + * the size of the file stored in the metadata on disk. data=ordered + * means the in-memory i_size might be larger than the size on disk + * because not all the blocks are written yet. + */ u64 disk_i_size; + + /* flags field from the on disk inode */ u32 flags; /* @@ -68,6 +114,8 @@ struct btrfs_inode { * number for new files that are created */ u64 index_cnt; + + struct inode vfs_inode; }; static inline struct btrfs_inode *BTRFS_I(struct inode *inode) diff --git a/fs/btrfs/crc32c.h b/fs/btrfs/crc32c.h index 4f0fefed132..1eaf11d334f 100644 --- a/fs/btrfs/crc32c.h +++ b/fs/btrfs/crc32c.h @@ -1,3 +1,21 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + #ifndef __BTRFS_CRC32C__ #define __BTRFS_CRC32C__ #include diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50e81f43e6d..ff3261ff2e1 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Oracle. All rights reserved. + * Copyright (C) 2007,2008 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public @@ -54,12 +54,19 @@ struct btrfs_path *btrfs_alloc_path(void) return path; } +/* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { btrfs_release_path(NULL, p); kmem_cache_free(btrfs_path_cachep, p); } +/* + * path release drops references on the extent buffers in the path + * and it drops any locks held by this path + * + * It is safe to call this on paths that no locks or extent buffers held. + */ void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) { int i; @@ -77,6 +84,16 @@ void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) } } +/* + * safely gets a reference on the root node of a tree. A lock + * is not taken, so a concurrent writer may put a different node + * at the root of the tree. See btrfs_lock_root_node for the + * looping required. + * + * The extent buffer returned by this has a reference taken, so + * it won't disappear. It may stop being the root of the tree + * at any time because there are no locks held. + */ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) { struct extent_buffer *eb; @@ -87,6 +104,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root) return eb; } +/* loop around taking references on and locking the root node of the + * tree until you end up with a lock on the root. A locked buffer + * is returned, with a reference held. + */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; @@ -108,6 +129,10 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) return eb; } +/* cowonly root (everything not a reference counted cow subvolume), just get + * put onto a simple dirty list. transaction.c walks this to make sure they + * get properly updated on disk. + */ static void add_root_to_dirty_list(struct btrfs_root *root) { if (root->track_dirty && list_empty(&root->dirty_list)) { @@ -116,6 +141,11 @@ static void add_root_to_dirty_list(struct btrfs_root *root) } } +/* + * used by snapshot creation to make a copy of a root for a tree with + * a given objectid. The buffer with the new root node is returned in + * cow_ret, and this func returns zero on success or a negative error code. + */ int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -167,6 +197,22 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, return 0; } +/* + * does the dirty work in cow of a single block. The parent block + * (if supplied) is updated to point to the new cow copy. The new + * buffer is marked dirty and returned locked. If you modify the block + * it needs to be marked dirty again. + * + * search_start -- an allocation hint for the new block + * + * empty_size -- a hint that you plan on doing more cow. This is the size in bytes + * the allocator should try to find free next to the block it returns. This is + * just a hint and may be ignored by the allocator. + * + * prealloc_dest -- if you have already reserved a destination for the cow, + * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent + * is used to finish the allocation. + */ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, @@ -311,6 +357,11 @@ int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, return 0; } +/* + * cows a single block, see __btrfs_cow_block for the real work. + * This version of it has extra checks so that a block isn't cow'd more than + * once per transaction, as long as it hasn't been written yet + */ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, @@ -347,6 +398,10 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, return ret; } +/* + * helper function for defrag to decide if two blocks pointed to by a + * node are actually close by + */ static int close_blocks(u64 blocknr, u64 other, u32 blocksize) { if (blocknr < other && other - (blocknr + blocksize) < 32768) @@ -381,6 +436,11 @@ static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2) } +/* + * this is used by the defrag code to go through all the + * leaves pointed to by a node and reallocate them so that + * disk order is close to key order + */ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *parent, int start_slot, int cache_only, u64 *last_ret, @@ -521,6 +581,10 @@ static inline unsigned int leaf_data_end(struct btrfs_root *root, return btrfs_item_offset_nr(leaf, nr - 1); } +/* + * extra debugging checks to make sure all the items in a key are + * well formed and in the proper order + */ static int check_node(struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -561,6 +625,10 @@ static int check_node(struct btrfs_root *root, struct btrfs_path *path, return 0; } +/* + * extra checking to make sure all the items in a leaf are + * well formed and in the proper order + */ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -782,6 +850,10 @@ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, return -1; } +/* given a node and slot number, this reads the blocks it points to. The + * extent buffer is returned with a reference taken (but unlocked). + * NULL is returned on error. + */ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, struct extent_buffer *parent, int slot) { @@ -798,6 +870,11 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root, btrfs_node_ptr_generation(parent, slot)); } +/* + * node level balancing, used to make sure nodes are in proper order for + * item deletion. We balance from the top down, so we have to make sure + * that a deletion won't leave an node completely empty later on. + */ static noinline int balance_level(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) @@ -1024,7 +1101,10 @@ enospc: return ret; } -/* returns zero if the push worked, non-zero otherwise */ +/* Node balancing for insertion. Here we only split or push nodes around + * when they are completely full. This is also done top down, so we + * have to be pessimistic. + */ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) @@ -1150,7 +1230,8 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, } /* - * readahead one full node of leaves + * readahead one full node of leaves, finding things that are close + * to the block in 'slot', and triggering ra on them. */ static noinline void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, @@ -1226,6 +1307,19 @@ static noinline void reada_for_search(struct btrfs_root *root, } } +/* + * when we walk down the tree, it is usually safe to unlock the higher layers in + * the tree. The exceptions are when our path goes through slot 0, because operations + * on the tree might require changing key pointers higher up in the tree. + * + * callers might also have set path->keep_locks, which tells this code to + * keep the lock if the path points to the last slot in the block. This is + * part of walking through the tree, and selecting the next slot in the higher + * block. + * + * lowest_unlock sets the lowest level in the tree we're allowed to unlock. + * so if lowest_unlock is 1, level 0 won't be unlocked + */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) { @@ -2705,6 +2799,12 @@ again: return ret; } +/* + * make the item pointed to by the path smaller. new_size indicates + * how small to make it, and from_end tells us if we just chop bytes + * off the end of the item or if we shift the item to chop bytes off + * the front. + */ int btrfs_truncate_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -2818,6 +2918,9 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, return ret; } +/* + * make the item pointed to by the path bigger, data_size is the new size. + */ int btrfs_extend_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u32 data_size) @@ -2897,7 +3000,7 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, } /* - * Given a key and some data, insert an item into the tree. + * Given a key and some data, insert items into the tree. * This does all the path init required, making room in the tree if needed. */ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, @@ -3046,9 +3149,8 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root /* * delete the pointer from a given node. * - * If the delete empties a node, the node is removed from the tree, - * continuing all the way the root if required. The root is converted into - * a leaf if all the nodes are emptied. + * the tree should have been previously balanced so the deletion does not + * empty a node. */ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level, int slot) @@ -3233,6 +3335,9 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. + * + * This may release the path, and so you may lose any locks held at the + * time you call it. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { @@ -3265,9 +3370,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) /* * A helper function to walk down the tree starting at min_key, and looking * for nodes or leaves that are either in cache or have a minimum - * transaction id. This is used by the btree defrag code, but could - * also be used to search for blocks that have changed since a given - * transaction id. + * transaction id. This is used by the btree defrag code, and tree logging * * This does not cow, but it does stuff the starting key it finds back * into min_key, so you can call btrfs_search_slot with cow=1 on the @@ -3279,6 +3382,10 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) * This honors path->lowest_level to prevent descent past a given level * of the tree. * + * min_trans indicates the oldest transaction that you are interested + * in walking through. Any nodes or leaves older than min_trans are + * skipped over (without reading them). + * * returns zero if something useful was found, < 0 on error and 1 if there * was nothing in the tree that matched the search criteria. */ diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0079b60b18f..ded1643c027 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,7 +27,6 @@ #include #include #include -#include "bit-radix.h" #include "extent_io.h" #include "extent_map.h" #include "async-thread.h" diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index e4f30090d64..5040b71f190 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -21,6 +21,14 @@ #include "hash.h" #include "transaction.h" +/* + * insert a name into a directory, doing overflow properly if there is a hash + * collision. data_size indicates how big the item inserted should be. On + * success a struct btrfs_dir_item pointer is returned, otherwise it is + * an ERR_PTR. + * + * The name is not copied into the dir item, you have to do that yourself. + */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -55,6 +63,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle return (struct btrfs_dir_item *)ptr; } +/* + * xattrs work a lot like directories, this inserts an xattr item + * into the tree + */ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, u16 name_len, const void *data, u16 data_len, @@ -109,6 +121,13 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, return ret; } +/* + * insert a directory item in the tree, doing all the magic for + * both indexes. 'dir' indicates which objectid to insert it into, + * 'location' is the key to stuff into the directory item, 'type' is the + * type of the inode we're pointing to, and 'index' is the sequence number + * to use for the second index (if one is created). + */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, u64 dir, struct btrfs_key *location, u8 type, u64 index) @@ -184,6 +203,11 @@ out: return 0; } +/* + * lookup a directory item based on name. 'dir' is the objectid + * we're searching in, and 'mod' tells us if you plan on deleting the + * item (use mod < 0) or changing the options (use mod > 0) + */ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, @@ -222,6 +246,14 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, return btrfs_match_dir_item_name(root, path, name, name_len); } +/* + * lookup a directory item based on index. 'dir' is the objectid + * we're searching in, and 'mod' tells us if you plan on deleting the + * item (use mod < 0) or changing the options (use mod > 0) + * + * The name is used to make sure the index really points to the name you were + * looking for. + */ struct btrfs_dir_item * btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -282,6 +314,11 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, return btrfs_match_dir_item_name(root, path, name, name_len); } +/* + * helper function to look at the directory item pointed to by 'path' + * this walks through all the entries in a dir item and finds one + * for a specific name. + */ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, struct btrfs_path *path, const char *name, int name_len) @@ -313,6 +350,10 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, return NULL; } +/* + * given a pointer into a directory item, delete it. This + * handles items that have more than one entry in them. + */ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 45b4f728527..5ee10d3136f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -55,6 +55,11 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); +/* + * end_io_wq structs are used to do processing in task context when an IO is + * complete. This is used during reads to verify checksums, and it is used + * by writes to insert metadata for new file extents after IO is complete. + */ struct end_io_wq { struct bio *bio; bio_end_io_t *end_io; @@ -66,6 +71,11 @@ struct end_io_wq { struct btrfs_work work; }; +/* + * async submit bios are used to offload expensive checksumming + * onto the worker threads. They checksum file and metadata bios + * just before they are sent down the IO stack. + */ struct async_submit_bio { struct inode *inode; struct bio *bio; @@ -76,6 +86,10 @@ struct async_submit_bio { struct btrfs_work work; }; +/* + * extents on the btree inode are pretty simple, there's one extent + * that covers the entire device + */ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, size_t page_offset, u64 start, u64 len, int create) @@ -151,6 +165,10 @@ void btrfs_csum_final(u32 crc, char *result) *(__le32 *)result = ~cpu_to_le32(crc); } +/* + * compute the csum for a btree block, and either verify it or write it + * into the csum field of the block. + */ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, int verify) { @@ -204,6 +222,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, return 0; } +/* + * we can't consider a given block up to date unless the transid of the + * block matches the transid in the parent node's pointer. This is how we + * detect blocks that either didn't get written at all or got written + * in the wrong place. + */ static int verify_parent_transid(struct extent_io_tree *io_tree, struct extent_buffer *eb, u64 parent_transid) { @@ -228,9 +252,12 @@ out: unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); return ret; - } +/* + * helper to read a given tree block, doing retries as required when + * the checksums don't match and we have alternate mirrors to try. + */ static int btree_read_extent_buffer_pages(struct btrfs_root *root, struct extent_buffer *eb, u64 start, u64 parent_transid) @@ -260,6 +287,10 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror return -EIO; } +/* + * checksum a dirty tree block before IO. This has extra checks to make + * sure we only fill in the checksum field in the first page of a multi-page block + */ int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8bd1b402f3f..563b2d12f4f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -914,6 +914,10 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(wait_on_extent_writeback); +/* + * either insert or lock state struct between start and end use mask to tell + * us if waiting is desired. + */ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { int err; @@ -982,6 +986,13 @@ int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(set_range_writeback); +/* + * find the first offset in the io tree with 'bits' set. zero is + * returned if we find something, and *start_ret and *end_ret are + * set to reflect the state struct that was found. + * + * If nothing was found, 1 is returned, < 0 on error + */ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, int bits) { @@ -1017,6 +1028,10 @@ out: } EXPORT_SYMBOL(find_first_extent_bit); +/* find the first state struct with 'bits' set after 'start', and + * return it. tree->lock must be held. NULL will returned if + * nothing was found after 'start' + */ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, int bits) { @@ -1046,8 +1061,14 @@ out: } EXPORT_SYMBOL(find_first_extent_bit_state); -u64 find_lock_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes) +/* + * find a contiguous range of bytes in the file marked as delalloc, not + * more than 'max_bytes'. start and end are used to return the range, + * + * 1 is returned if we find something, 0 if nothing was in the tree + */ +static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, + u64 *start, u64 *end, u64 max_bytes) { struct rb_node *node; struct extent_state *state; @@ -1130,6 +1151,11 @@ out: return found; } +/* + * count the number of bytes in the tree that have a given bit(s) + * set. This can be fairly slow, except for EXTENT_DIRTY which is + * cached. The total number found is returned. + */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, unsigned long bits) @@ -1245,6 +1271,10 @@ int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) } EXPORT_SYMBOL(unlock_range); +/* + * set the private field for a given byte offset in the tree. If there isn't + * an extent_state there already, this does nothing. + */ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) { struct rb_node *node; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 78ced11d18c..74b2a29880d 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -114,6 +114,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset, return NULL; } +/* + * search through the tree for an extent_map with a given offset. If + * it can't be found, try to find some neighboring extents + */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) @@ -160,6 +164,10 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, return NULL; } +/* + * look for an offset in the tree, and if it can't be found, return + * the first offset we can find smaller than 'offset'. + */ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) { struct rb_node *prev; @@ -170,6 +178,7 @@ static inline struct rb_node *tree_search(struct rb_root *root, u64 offset) return ret; } +/* check to see if two extent_map structs are adjacent and safe to merge */ static int mergable_maps(struct extent_map *prev, struct extent_map *next) { if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) @@ -250,6 +259,7 @@ out: } EXPORT_SYMBOL(add_extent_mapping); +/* simple helper to do math around the end of an extent, handling wrap */ static u64 range_end(u64 start, u64 len) { if (start + len < start) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1b7e51a9db0..3088a118448 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -41,6 +41,9 @@ #include "compat.h" +/* simple helper to fault in pages and copy. This should go away + * and be replaced with calls into generic code. + */ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, @@ -72,12 +75,19 @@ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, return page_fault ? -EFAULT : 0; } +/* + * unlocks pages after btrfs_file_write is done with them + */ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { if (!pages[i]) break; + /* page checked is some magic around finding pages that + * have been modified without going through btrfs_set_page_dirty + * clear it here + */ ClearPageChecked(pages[i]); unlock_page(pages[i]); mark_page_accessed(pages[i]); @@ -85,6 +95,10 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) } } +/* this does all the hard work for inserting an inline extent into + * the btree. Any existing inline extent is extended as required to make room, + * otherwise things are inserted as required into the btree + */ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 offset, size_t size, @@ -228,6 +242,14 @@ fail: return err; } +/* + * after copy_from_user, pages need to be dirtied and we need to make + * sure holes are created between the current EOF and the start of + * any next extents (if required). + * + * this also makes the decision about creating an inline extent vs + * doing real data extents, marking pages dirty and delalloc as required. + */ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct file *file, @@ -362,6 +384,10 @@ out_unlock: return err; } +/* + * this drops all the extents in the cache that intersect the range + * [start, end]. Existing extents are split as required. + */ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned) { @@ -536,6 +562,9 @@ out: * If an extent intersects the range but is not entirely inside the range * it is either truncated or split. Anything entirely inside the range * is deleted from the tree. + * + * inline_limit is used to tell this code which offsets in the file to keep + * if they contain inline extents. */ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, @@ -796,7 +825,9 @@ out: } /* - * this gets pages into the page cache and locks them down + * this gets pages into the page cache and locks them down, it also properly + * waits for data=ordered extents to finish before allowing the pages to be + * modified. */ static int noinline prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, @@ -1034,6 +1065,17 @@ int btrfs_release_file(struct inode * inode, struct file * filp) return 0; } +/* + * fsync call for both files and directories. This logs the inode into + * the tree log instead of forcing full commits whenever possible. + * + * It needs to call filemap_fdatawait so that all ordered extent updates are + * in the metadata btree are up to date for copying to the log. + * + * It drops the inode mutex before doing the tree log commit. This is an + * important optimization for directories because holding the mutex prevents + * new operations on the dir while we write to disk. + */ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 404704d2682..f3abecc2d14 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -83,6 +83,10 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static void btrfs_truncate(struct inode *inode); +/* + * a very lame attempt at stopping writes when the FS is 85% full. There + * are countless ways this is incorrect, but it is better than nothing. + */ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, int for_del) { @@ -108,6 +112,12 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, return ret; } +/* + * when extent_io.c finds a delayed allocation range in the file, + * the call backs end up in this code. The basic idea is to + * allocate extents on disk for the range, and create ordered data structs + * in ram to track those extents. + */ static int cow_file_range(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -185,6 +195,13 @@ out: return ret; } +/* + * when nowcow writeback call back. This checks for snapshots or COW copies + * of the extents that exist in the file, and COWs the file as required. + * + * If no cow copies or snapshots exist, we write directly to the existing + * blocks on disk + */ static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) { u64 extent_start; @@ -291,6 +308,9 @@ out: return err; } +/* + * extent_io.c call back to do delayed allocation processing + */ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -305,6 +325,11 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end) return ret; } +/* + * extent_io.c set_bit_hook, used to track delayed allocation + * bytes in this file, and to maintain the list of inodes that + * have pending delalloc work to be done. + */ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { @@ -323,6 +348,9 @@ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +/* + * extent_io.c clear_bit_hook, see set_bit_hook for why + */ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { @@ -349,6 +377,10 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, return 0; } +/* + * extent_io.c merge_bio_hook, this must check the chunk tree to make sure + * we don't create bios that span stripes or chunks + */ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, size_t size, struct bio *bio) { @@ -371,6 +403,14 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, return 0; } +/* + * in order to insert checksums into the metadata in large chunks, + * we wait until bio submission time. All the pages in the bio are + * checksummed and sums are attached onto the ordered extent record. + * + * At IO completion time the cums attached on the ordered extent record + * are inserted into the btree + */ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { @@ -383,6 +423,10 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, return btrfs_map_bio(root, rw, bio, mirror_num, 1); } +/* + * extent_io.c submission hook. This does the right thing for csum calculation on write, + * or reading the csums from the tree before a read + */ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num) { @@ -408,6 +452,10 @@ mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } +/* + * given a list of ordered sums record them in the inode. This happens + * at IO completion time based on sums calculated at bio submission time. + */ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, struct inode *inode, u64 file_offset, struct list_head *list) @@ -430,12 +478,12 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) GFP_NOFS); } +/* see btrfs_writepage_start_hook for details on why this is required */ struct btrfs_writepage_fixup { struct page *page; struct btrfs_work work; }; -/* see btrfs_writepage_start_hook for details on why this is required */ void btrfs_writepage_fixup_worker(struct btrfs_work *work) { struct btrfs_writepage_fixup *fixup; @@ -522,6 +570,10 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; } +/* as ordered data IO finishes, this gets called so we can finish + * an ordered extent if the range of bytes in the file it covers are + * fully written. + */ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -631,6 +683,14 @@ int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, return btrfs_finish_ordered_io(page->mapping->host, start, end); } +/* + * When IO fails, either with EIO or csum verification fails, we + * try other mirrors that might have a good copy of the data. This + * io_failure_record is used to record state as we go through all the + * mirrors. If another mirror has good data, the page is set up to date + * and things continue. If a good mirror can't be found, the original + * bio end_io callback is called to indicate things have failed. + */ struct io_failure_record { struct page *page; u64 start; @@ -725,6 +785,10 @@ int btrfs_io_failed_hook(struct bio *failed_bio, return 0; } +/* + * each time an IO finishes, we do a fast check in the IO failure tree + * to see if we need to process or clean up an io_failure_record + */ int btrfs_clean_io_failures(struct inode *inode, u64 start) { u64 private; @@ -753,6 +817,11 @@ int btrfs_clean_io_failures(struct inode *inode, u64 start) return 0; } +/* + * when reads are done, we need to check csums to verify the data is correct + * if there's a match, we allow the bio to finish. If not, we go through + * the io_failure_record routines to find good copies + */ int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { @@ -990,6 +1059,9 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) btrfs_free_path(path); } +/* + * read an inode from the btree into the in-memory inode + */ void btrfs_read_locked_inode(struct inode *inode) { struct btrfs_path *path; @@ -1083,6 +1155,9 @@ make_bad: make_bad_inode(inode); } +/* + * given a leaf and an inode, copy the inode fields into the leaf + */ static void fill_inode_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf, struct btrfs_inode_item *item, @@ -1118,6 +1193,9 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, BTRFS_I(inode)->block_group->key.objectid); } +/* + * copy everything in the in-memory inode into the btree. + */ int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) @@ -1151,6 +1229,11 @@ failed: } +/* + * unlink helper that gets used here in inode.c and in the tree logging + * recovery code. It remove a link in a directory with a given name, and + * also drops the back refs in the inode to the directory + */ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, struct inode *inode, @@ -1309,7 +1392,7 @@ fail: /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find - * any higher than i_size. + * any higher than new_size * * csum items that cross the new i_size are truncated to the new size * as well. @@ -2123,6 +2206,11 @@ void btrfs_dirty_inode(struct inode *inode) btrfs_end_transaction(trans, root); } +/* + * find the highest existing sequence number in a directory + * and then set the in-memory index_cnt variable to reflect + * free sequence numbers + */ static int btrfs_set_inode_index_count(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -2175,6 +2263,10 @@ out: return ret; } +/* + * helper to find a free sequence number in a given directory. This current + * code is very simple, later versions will do smarter things in the btree + */ static int btrfs_set_inode_index(struct inode *dir, struct inode *inode, u64 *index) { @@ -2305,6 +2397,12 @@ static inline u8 btrfs_inode_type(struct inode *inode) return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT]; } +/* + * utility function to add 'inode' into 'parent_inode' with + * a give name and a given sequence number. + * if 'add_backref' is true, also insert a backref from the + * inode to the parent directory. + */ int btrfs_add_link(struct btrfs_trans_handle *trans, struct inode *parent_inode, struct inode *inode, const char *name, int name_len, int add_backref, u64 index) @@ -2611,6 +2709,10 @@ out_unlock: return err; } +/* helper for btfs_get_extent. Given an existing extent in the tree, + * and an extent that you want to insert, deal with overlap and insert + * the new extent into the tree. + */ static int merge_extent_mapping(struct extent_map_tree *em_tree, struct extent_map *existing, struct extent_map *em, @@ -2627,6 +2729,14 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree, return add_extent_mapping(em_tree, em); } +/* + * a bit scary, this does extent mapping from logical file offset to the disk. + * the ugly parts come from merging extents from the disk with the + * in-ram representation. This gets more complex because of the data=ordered code, + * where the in-ram extents might be locked pending data=ordered completion. + * + * This also copies inline extents directly into the page. + */ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -2869,76 +2979,11 @@ out: return em; } -#if 0 /* waiting for O_DIRECT reads */ -static int btrfs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) -{ - struct extent_map *em; - u64 start = (u64)iblock << inode->i_blkbits; - struct btrfs_multi_bio *multi = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; - u64 len; - u64 logical; - u64 map_length; - int ret = 0; - - em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0); - - if (!em || IS_ERR(em)) - goto out; - - if (em->start > start || em->start + em->len <= start) { - goto out; - } - - if (em->block_start == EXTENT_MAP_INLINE) { - ret = -EINVAL; - goto out; - } - - len = em->start + em->len - start; - len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size))); - - if (em->block_start == EXTENT_MAP_HOLE || - em->block_start == EXTENT_MAP_DELALLOC) { - bh_result->b_size = len; - goto out; - } - - logical = start - em->start; - logical = em->block_start + logical; - - map_length = len; - ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, - logical, &map_length, &multi, 0); - BUG_ON(ret); - bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits; - bh_result->b_size = min(map_length, len); - - bh_result->b_bdev = multi->stripes[0].dev->bdev; - set_buffer_mapped(bh_result); - kfree(multi); -out: - free_extent_map(em); - return ret; -} -#endif - static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { return -EINVAL; -#if 0 - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - - if (rw == WRITE) - return -EINVAL; - - return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, btrfs_get_block, NULL); -#endif } static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock) @@ -3202,6 +3247,9 @@ void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, } } +/* + * create a new subvolume directory/inode (helper for the ioctl). + */ int btrfs_create_subvol_root(struct btrfs_root *new_root, struct btrfs_trans_handle *trans, u64 new_dirid, struct btrfs_block_group_cache *block_group) @@ -3223,6 +3271,9 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, return btrfs_update_inode(trans, new_root, inode); } +/* helper function for file defrag and space balancing. This + * forces readahead on a given range of bytes in an inode + */ unsigned long btrfs_force_ra(struct address_space *mapping, struct file_ra_state *ra, struct file *file, pgoff_t offset, pgoff_t last_index) @@ -3424,6 +3475,10 @@ out_unlock: return ret; } +/* + * some fairly slow code that needs optimization. This walks the list + * of all the inodes with pending delalloc and forces them to disk. + */ int btrfs_start_delalloc_inodes(struct btrfs_root *root) { struct list_head *head = &root->fs_info->delalloc_inodes; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 0cc314c10d6..e30aa6e2958 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -25,6 +25,15 @@ #include "extent_io.h" #include "locking.h" +/* + * locks the per buffer mutex in an extent buffer. This uses adaptive locks + * and the spin is not tuned very extensively. The spinning does make a big + * difference in almost every workload, but spinning for the right amount of + * time needs some help. + * + * In general, we want to spin as long as the lock holder is doing btree searches, + * and we should give up if they are in more expensive code. + */ int btrfs_tree_lock(struct extent_buffer *eb) { int i; @@ -57,6 +66,10 @@ int btrfs_tree_locked(struct extent_buffer *eb) return mutex_is_locked(&eb->mutex); } +/* + * btrfs_search_slot uses this to decide if it should drop its locks + * before doing something expensive like allocating free blocks for cow. + */ int btrfs_path_lock_waiting(struct btrfs_path *path, int level) { int i; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 951eacff242..dcc1730dd83 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -26,7 +26,6 @@ #include "btrfs_inode.h" #include "extent_io.h" - static u64 entry_end(struct btrfs_ordered_extent *entry) { if (entry->file_offset + entry->len < entry->file_offset) @@ -34,6 +33,9 @@ static u64 entry_end(struct btrfs_ordered_extent *entry) return entry->file_offset + entry->len; } +/* returns NULL if the insertion worked, or it returns the node it did find + * in the tree + */ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { @@ -58,6 +60,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, return NULL; } +/* + * look for a given offset in the tree, and if it can't be found return the + * first lesser offset + */ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { @@ -108,6 +114,9 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, return NULL; } +/* + * helper to check if a given offset is inside a given entry + */ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) { if (file_offset < entry->file_offset || @@ -116,6 +125,10 @@ static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) return 1; } +/* + * look find the first ordered struct that has this offset, otherwise + * the first one less than this offset + */ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { @@ -305,6 +318,10 @@ int btrfs_remove_ordered_extent(struct inode *inode, return 0; } +/* + * wait for all the ordered extents in a root. This is done when balancing + * space between drives. + */ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) { struct list_head splice; diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index 30fcb7aea5b..a50ebb67055 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -21,6 +21,16 @@ #include "ref-cache.h" #include "transaction.h" +/* + * leaf refs are used to cache the information about which extents + * a given leaf has references on. This allows us to process that leaf + * in btrfs_drop_snapshot without needing to read it back from disk. + */ + +/* + * kmalloc a leaf reference struct and update the counters for the + * total ref cache size + */ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, int nr_extents) { @@ -40,6 +50,10 @@ struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, return ref; } +/* + * free a leaf reference struct and update the counters for the + * total ref cache size + */ void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { if (!ref) @@ -135,6 +149,10 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, return 0; } +/* + * find the leaf ref for a given extent. This returns the ref struct with + * a usage reference incremented + */ struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, u64 bytenr) { @@ -160,6 +178,10 @@ again: return NULL; } +/* + * add a fully filled in leaf ref struct + * remove all the refs older than a given root generation + */ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, int shared) { @@ -184,6 +206,10 @@ int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, return ret; } +/* + * remove a single leaf ref from the tree. This drops the ref held by the tree + * only + */ int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) { struct btrfs_leaf_ref_tree *tree; diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index 617564787f5..16f3183d7c5 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h @@ -19,8 +19,11 @@ #define __REFCACHE__ struct btrfs_extent_info { + /* bytenr and num_bytes find the extent in the extent allocation tree */ u64 bytenr; u64 num_bytes; + + /* objectid and offset find the back reference for the file */ u64 objectid; u64 offset; }; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 0091c01abb0..eb7f7655e9d 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -22,8 +22,10 @@ #include "print-tree.h" /* - * returns 0 on finding something, 1 if no more roots are there - * and < 0 on error + * search forward for a root, starting with objectid 'search_start' + * if a root key is found, the objectid we find is filled into 'found_objectid' + * and 0 is returned. < 0 is returned on error, 1 if there is nothing + * left in the tree. */ int btrfs_search_root(struct btrfs_root *root, u64 search_start, u64 *found_objectid) @@ -66,6 +68,11 @@ out: return ret; } +/* + * lookup the root with the highest offset for a given objectid. The key we do + * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 + * on error. + */ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct btrfs_root_item *item, struct btrfs_key *key) { @@ -104,6 +111,9 @@ out: return ret; } +/* + * copy the data in 'item' into the btree + */ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item) @@ -147,6 +157,12 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root return ret; } +/* + * at mount time we want to find all the old transaction snapshots that were in + * the process of being deleted if we crashed. This is any root item with an offset + * lower than the latest root. They need to be queued for deletion to finish + * what was happening when we crashed. + */ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest) { @@ -227,6 +243,7 @@ err: return ret; } +/* drop the root item for 'key' from 'root' */ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key) { diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index ad03a32d111..cdedbe144d4 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -17,6 +17,27 @@ */ #include + +/* this is some deeply nasty code. ctree.h has a different + * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef + * + * The end result is that anyone who #includes ctree.h gets a + * declaration for the btrfs_set_foo functions and btrfs_foo functions + * + * This file declares the macros and then #includes ctree.h, which results + * in cpp creating the function here based on the template below. + * + * These setget functions do all the extent_buffer related mapping + * required to efficiently read and write specific fields in the extent + * buffers. Every pointer to metadata items in btrfs is really just + * an unsigned long offset into the extent buffer which has been + * cast to a specific type. This gives us all the gcc type checking. + * + * The extent buffer api is used to do all the kmapping and page + * spanning work required to get extent buffers in highmem and have + * a metadata blocksize different from the page size. + */ + #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ u##bits btrfs_##name(struct extent_buffer *eb, \ type *s) \ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8399d6d05d6..2e6039825b7 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -519,6 +519,9 @@ static struct file_system_type btrfs_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; +/* + * used by btrfsctl to scan devices when no FS is mounted + */ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 444abe0796a..11266d68a6c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -46,6 +46,9 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) } } +/* + * either allocate a new transaction or hop into the existing one + */ static noinline int join_transaction(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; @@ -85,6 +88,12 @@ static noinline int join_transaction(struct btrfs_root *root) return 0; } +/* + * this does all the record keeping required to make sure that a + * reference counted root is properly recorded in a given transaction. + * This is required to make sure the old root from before we joined the transaction + * is deleted when the transaction commits + */ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) { struct btrfs_dirty_root *dirty; @@ -127,6 +136,10 @@ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) return 0; } +/* wait for commit against the current transaction to become unblocked + * when this is done, it is safe to start a new transaction, but the current + * transaction might not be fully on disk. + */ static void wait_current_trans(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; @@ -198,7 +211,7 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, return start_transaction(r, num_blocks, 2); } - +/* wait for a transaction commit to be fully complete */ static noinline int wait_for_commit(struct btrfs_root *root, struct btrfs_transaction *commit) { @@ -218,6 +231,10 @@ static noinline int wait_for_commit(struct btrfs_root *root, return 0; } +/* + * rate limit against the drop_snapshot code. This helps to slow down new operations + * if the drop_snapshot code isn't able to keep up. + */ static void throttle_on_drops(struct btrfs_root *root) { struct btrfs_fs_info *info = root->fs_info; @@ -302,7 +319,11 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, return __btrfs_end_transaction(trans, root, 1); } - +/* + * when btree blocks are allocated, they have some corresponding bits set for + * them in one of two extent_io trees. This is used to make sure all of + * those extents are on disk for transaction or log commit + */ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, struct extent_io_tree *dirty_pages) { @@ -393,6 +414,16 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, &trans->transaction->dirty_pages); } +/* + * this is used to update the root pointer in the tree of tree roots. + * + * But, in the case of the extent allocation tree, updating the root + * pointer may allocate blocks which may change the root of the extent + * allocation tree. + * + * So, this loops and repeats and makes sure the cowonly root didn't + * change while the root pointer was being updated in the metadata. + */ static int update_cowonly_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -418,6 +449,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, return 0; } +/* + * update all the cowonly tree roots on disk + */ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, struct btrfs_root *root) { @@ -433,6 +467,11 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, return 0; } +/* + * dead roots are old snapshots that need to be deleted. This allocates + * a dirty root struct and adds it into the list of dead roots that need to + * be deleted + */ int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) { struct btrfs_dirty_root *dirty; @@ -449,6 +488,12 @@ int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest) return 0; } +/* + * at transaction commit time we need to schedule the old roots for + * deletion via btrfs_drop_snapshot. This runs through all the + * reference counted roots that were modified in the current + * transaction and puts them into the drop list + */ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, struct radix_tree_root *radix, struct list_head *list) @@ -541,6 +586,10 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, return err; } +/* + * defrag a given btree. If cacheonly == 1, this won't read from the disk, + * otherwise every leaf in the btree is read and defragged. + */ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) { struct btrfs_fs_info *info = root->fs_info; @@ -570,6 +619,10 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) return 0; } +/* + * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on + * all of them + */ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, struct list_head *list) { @@ -664,6 +717,10 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, return ret; } +/* + * new snapshots need to be created at a very specific time in the + * transaction commit. This does the actual creation + */ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, struct btrfs_pending_snapshot *pending) @@ -734,6 +791,9 @@ fail: return ret; } +/* + * create all the snapshots we've scheduled for creation + */ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { @@ -944,6 +1004,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, return ret; } +/* + * interface function to delete all the snapshots we have scheduled for deletion + */ int btrfs_clean_old_snapshots(struct btrfs_root *root) { struct list_head dirty_roots; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index b3bb5bbad76..6f57d0889b1 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -23,6 +23,10 @@ #include "transaction.h" #include "locking.h" +/* defrag all the leaves in a given btree. If cache_only == 1, don't read things + * from disk, otherwise read all the leaves and try to get key order to + * better reflect disk order + */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root, int cache_only) { -- cgit v1.2.3-70-g09d2 From c8b978188c9a0fd3d535c13debd19d522b726f1f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 29 Oct 2008 14:49:59 -0400 Subject: Btrfs: Add zlib compression support This is a large change for adding compression on reading and writing, both for inline and regular extents. It does some fairly large surgery to the writeback paths. Compression is off by default and enabled by mount -o compress. Even when the -o compress mount option is not used, it is possible to read compressed extents off the disk. If compression for a given set of pages fails to make them smaller, the file is flagged to avoid future compression attempts later. * While finding delalloc extents, the pages are locked before being sent down to the delalloc handler. This allows the delalloc handler to do complex things such as cleaning the pages, marking them writeback and starting IO on their behalf. * Inline extents are inserted at delalloc time now. This allows us to compress the data before inserting the inline extent, and it allows us to insert an inline extent that spans multiple pages. * All of the in-memory extent representations (extent_map.c, ordered-data.c etc) are changed to record both an in-memory size and an on disk size, as well as a flag for compression. From a disk format point of view, the extent pointers in the file are changed to record the on disk size of a given extent and some encoding flags. Space in the disk format is allocated for compression encoding, as well as encryption and a generic 'other' field. Neither the encryption or the 'other' field are currently used. In order to limit the amount of data read for a single random read in the file, the size of a compressed extent is limited to 128k. This is a software only limit, the disk format supports u64 sized compressed extents. In order to limit the ram consumed while processing extents, the uncompressed size of a compressed extent is limited to 256k. This is a software only limit and will be subject to tuning later. Checksumming is still done on compressed extents, and it is done on the uncompressed version of the data. This way additional encodings can be layered on without having to figure out which encoding to checksum. Compression happens at delalloc time, which is basically singled threaded because it is usually done by a single pdflush thread. This makes it tricky to spread the compression load across all the cpus on the box. We'll have to look at parallel pdflush walks of dirty inodes at a later time. Decompression is hooked into readpages and it does spread across CPUs nicely. Signed-off-by: Chris Mason --- fs/Kconfig | 2 + fs/btrfs/Makefile | 3 +- fs/btrfs/compression.c | 454 ++++++++++++++++++++++++++++++++++ fs/btrfs/compression.h | 47 ++++ fs/btrfs/ctree.h | 99 ++++++-- fs/btrfs/disk-io.c | 18 +- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 27 +- fs/btrfs/extent_io.c | 411 ++++++++++++++++++++++++------- fs/btrfs/extent_io.h | 17 +- fs/btrfs/extent_map.c | 9 + fs/btrfs/extent_map.h | 6 +- fs/btrfs/file-item.c | 75 +++++- fs/btrfs/file.c | 263 +++++--------------- fs/btrfs/inode.c | 584 ++++++++++++++++++++++++++++++++++++++++---- fs/btrfs/ordered-data.c | 9 +- fs/btrfs/ordered-data.h | 10 +- fs/btrfs/print-tree.c | 7 +- fs/btrfs/super.c | 10 +- fs/btrfs/tree-log.c | 3 +- fs/btrfs/volumes.c | 2 + fs/btrfs/zlib.c | 637 ++++++++++++++++++++++++++++++++++++++++++++++++ 22 files changed, 2315 insertions(+), 379 deletions(-) create mode 100644 fs/btrfs/compression.c create mode 100644 fs/btrfs/compression.h create mode 100644 fs/btrfs/zlib.c (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/Kconfig b/fs/Kconfig index 18f5a85b47c..31cce5d88b1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -501,6 +501,8 @@ config BTRFS_FS tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format" depends on EXPERIMENTAL select LIBCRC32C + select ZLIB_INFLATE + select ZLIB_DEFLATE help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 7125716e142..d2cf5a54a4b 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -7,7 +7,8 @@ btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ - ref-cache.o export.o tree-log.o acl.o free-space-cache.o + ref-cache.o export.o tree-log.o acl.o free-space-cache.o zlib.o \ + compression.o else # Normal Makefile diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c new file mode 100644 index 00000000000..c5470367ca5 --- /dev/null +++ b/fs/btrfs/compression.c @@ -0,0 +1,454 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ctree.h" +#include "disk-io.h" +#include "transaction.h" +#include "btrfs_inode.h" +#include "volumes.h" +#include "ordered-data.h" +#include "compat.h" +#include "compression.h" +#include "extent_io.h" +#include "extent_map.h" + +struct compressed_bio { + /* number of bios pending for this compressed extent */ + atomic_t pending_bios; + + /* the pages with the compressed data on them */ + struct page **compressed_pages; + + /* inode that owns this data */ + struct inode *inode; + + /* starting offset in the inode for our pages */ + u64 start; + + /* number of bytes in the inode we're working on */ + unsigned long len; + + /* number of bytes on disk */ + unsigned long compressed_len; + + /* number of compressed pages in the array */ + unsigned long nr_pages; + + /* IO errors */ + int errors; + + /* for reads, this is the bio we are copying the data into */ + struct bio *orig_bio; +}; + +static struct bio *compressed_bio_alloc(struct block_device *bdev, + u64 first_byte, gfp_t gfp_flags) +{ + struct bio *bio; + int nr_vecs; + + nr_vecs = bio_get_nr_vecs(bdev); + bio = bio_alloc(gfp_flags, nr_vecs); + + if (bio == NULL && (current->flags & PF_MEMALLOC)) { + while (!bio && (nr_vecs /= 2)) + bio = bio_alloc(gfp_flags, nr_vecs); + } + + if (bio) { + bio->bi_size = 0; + bio->bi_bdev = bdev; + bio->bi_sector = first_byte >> 9; + } + return bio; +} + +/* when we finish reading compressed pages from the disk, we + * decompress them and then run the bio end_io routines on the + * decompressed pages (in the inode address space). + * + * This allows the checksumming and other IO error handling routines + * to work normally + * + * The compressed pages are freed here, and it must be run + * in process context + */ +static void end_compressed_bio_read(struct bio *bio, int err) +{ + struct extent_io_tree *tree; + struct compressed_bio *cb = bio->bi_private; + struct inode *inode; + struct page *page; + unsigned long index; + int ret; + + if (err) + cb->errors = 1; + + /* if there are more bios still pending for this compressed + * extent, just exit + */ + if (!atomic_dec_and_test(&cb->pending_bios)) + goto out; + + /* ok, we're the last bio for this extent, lets start + * the decompression. + */ + inode = cb->inode; + tree = &BTRFS_I(inode)->io_tree; + ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, + cb->start, + cb->orig_bio->bi_io_vec, + cb->orig_bio->bi_vcnt, + cb->compressed_len); + if (ret) + cb->errors = 1; + + /* release the compressed pages */ + index = 0; + for (index = 0; index < cb->nr_pages; index++) { + page = cb->compressed_pages[index]; + page->mapping = NULL; + page_cache_release(page); + } + + /* do io completion on the original bio */ + if (cb->errors) + bio_io_error(cb->orig_bio); + else + bio_endio(cb->orig_bio, 0); + + /* finally free the cb struct */ + kfree(cb->compressed_pages); + kfree(cb); +out: + bio_put(bio); +} + +/* + * Clear the writeback bits on all of the file + * pages for a compressed write + */ +static noinline int end_compressed_writeback(struct inode *inode, u64 start, + unsigned long ram_size) +{ + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; + struct page *pages[16]; + unsigned long nr_pages = end_index - index + 1; + int i; + int ret; + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + if (ret == 0) { + nr_pages -= 1; + index += 1; + continue; + } + for (i = 0; i < ret; i++) { + end_page_writeback(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + } + /* the inode may be gone now */ + return 0; +} + +/* + * do the cleanup once all the compressed pages hit the disk. + * This will clear writeback on the file pages and free the compressed + * pages. + * + * This also calls the writeback end hooks for the file pages so that + * metadata and checksums can be updated in the file. + */ +static void end_compressed_bio_write(struct bio *bio, int err) +{ + struct extent_io_tree *tree; + struct compressed_bio *cb = bio->bi_private; + struct inode *inode; + struct page *page; + unsigned long index; + + if (err) + cb->errors = 1; + + /* if there are more bios still pending for this compressed + * extent, just exit + */ + if (!atomic_dec_and_test(&cb->pending_bios)) + goto out; + + /* ok, we're the last bio for this extent, step one is to + * call back into the FS and do all the end_io operations + */ + inode = cb->inode; + tree = &BTRFS_I(inode)->io_tree; + tree->ops->writepage_end_io_hook(cb->compressed_pages[0], + cb->start, + cb->start + cb->len - 1, + NULL, 1); + + end_compressed_writeback(inode, cb->start, cb->len); + /* note, our inode could be gone now */ + + /* + * release the compressed pages, these came from alloc_page and + * are not attached to the inode at all + */ + index = 0; + for (index = 0; index < cb->nr_pages; index++) { + page = cb->compressed_pages[index]; + page->mapping = NULL; + page_cache_release(page); + } + + /* finally free the cb struct */ + kfree(cb->compressed_pages); + kfree(cb); +out: + bio_put(bio); +} + +/* + * worker function to build and submit bios for previously compressed pages. + * The corresponding pages in the inode should be marked for writeback + * and the compressed pages should have a reference on them for dropping + * when the IO is complete. + * + * This also checksums the file bytes and gets things ready for + * the end io hooks. + */ +int btrfs_submit_compressed_write(struct inode *inode, u64 start, + unsigned long len, u64 disk_start, + unsigned long compressed_len, + struct page **compressed_pages, + unsigned long nr_pages) +{ + struct bio *bio = NULL; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct compressed_bio *cb; + unsigned long bytes_left; + struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + int page_index = 0; + struct page *page; + u64 first_byte = disk_start; + struct block_device *bdev; + int ret; + + WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); + cb = kmalloc(sizeof(*cb), GFP_NOFS); + atomic_set(&cb->pending_bios, 0); + cb->errors = 0; + cb->inode = inode; + cb->start = start; + cb->len = len; + cb->compressed_pages = compressed_pages; + cb->compressed_len = compressed_len; + cb->orig_bio = NULL; + cb->nr_pages = nr_pages; + + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + + ret = btrfs_csum_file_bytes(root, inode, start, len); + BUG_ON(ret); + + bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + atomic_inc(&cb->pending_bios); + + /* create and submit bios for the compressed pages */ + bytes_left = compressed_len; + while(bytes_left > 0) { + page = compressed_pages[page_index]; + page->mapping = inode->i_mapping; + if (bio->bi_size) + ret = io_tree->ops->merge_bio_hook(page, 0, + PAGE_CACHE_SIZE, + bio, 0); + else + ret = 0; + + if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + bio_get(bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); + BUG_ON(ret); + + bio_put(bio); + + bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); + atomic_inc(&cb->pending_bios); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + } + page_index++; + bytes_left -= PAGE_CACHE_SIZE; + first_byte += PAGE_CACHE_SIZE; + } + bio_get(bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); + BUG_ON(ret); + + bio_put(bio); + return 0; +} + +/* + * for a compressed read, the bio we get passed has all the inode pages + * in it. We don't actually do IO on those pages but allocate new ones + * to hold the compressed pages on disk. + * + * bio->bi_sector points to the compressed extent on disk + * bio->bi_io_vec points to all of the inode pages + * bio->bi_vcnt is a count of pages + * + * After the compressed pages are read, we copy the bytes into the + * bio we were passed and then call the bio end_io calls + */ +int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags) +{ + struct extent_io_tree *tree; + struct extent_map_tree *em_tree; + struct compressed_bio *cb; + struct btrfs_root *root = BTRFS_I(inode)->root; + unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + unsigned long compressed_len; + unsigned long nr_pages; + unsigned long page_index; + struct page *page; + struct block_device *bdev; + struct bio *comp_bio; + u64 cur_disk_byte = (u64)bio->bi_sector << 9; + struct extent_map *em; + int ret; + + tree = &BTRFS_I(inode)->io_tree; + em_tree = &BTRFS_I(inode)->extent_tree; + + /* we need the actual starting offset of this extent in the file */ + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, + page_offset(bio->bi_io_vec->bv_page), + PAGE_CACHE_SIZE); + spin_unlock(&em_tree->lock); + + cb = kmalloc(sizeof(*cb), GFP_NOFS); + atomic_set(&cb->pending_bios, 0); + cb->errors = 0; + cb->inode = inode; + + cb->start = em->start; + compressed_len = em->block_len; + free_extent_map(em); + + cb->len = uncompressed_len; + cb->compressed_len = compressed_len; + cb->orig_bio = bio; + + nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + GFP_NOFS); + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + + for (page_index = 0; page_index < nr_pages; page_index++) { + cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | + __GFP_HIGHMEM); + } + cb->nr_pages = nr_pages; + + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + comp_bio->bi_private = cb; + comp_bio->bi_end_io = end_compressed_bio_read; + atomic_inc(&cb->pending_bios); + + for (page_index = 0; page_index < nr_pages; page_index++) { + page = cb->compressed_pages[page_index]; + page->mapping = inode->i_mapping; + if (comp_bio->bi_size) + ret = tree->ops->merge_bio_hook(page, 0, + PAGE_CACHE_SIZE, + comp_bio, 0); + else + ret = 0; + + if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < + PAGE_CACHE_SIZE) { + bio_get(comp_bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + BUG_ON(ret); + + bio_put(comp_bio); + + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, + GFP_NOFS); + atomic_inc(&cb->pending_bios); + bio->bi_private = cb; + bio->bi_end_io = end_compressed_bio_write; + bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + } + cur_disk_byte += PAGE_CACHE_SIZE; + } + bio_get(comp_bio); + + ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); + BUG_ON(ret); + + ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + BUG_ON(ret); + + bio_put(comp_bio); + return 0; +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h new file mode 100644 index 00000000000..421f5b4aa71 --- /dev/null +++ b/fs/btrfs/compression.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#ifndef __BTRFS_COMPRESSION_ +#define __BTRFS_COMPRESSION_ + +int btrfs_zlib_decompress(unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen); +int btrfs_zlib_compress_pages(struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); +int btrfs_zlib_decompress_biovec(struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen); +void btrfs_zlib_exit(void); +int btrfs_submit_compressed_write(struct inode *inode, u64 start, + unsigned long len, u64 disk_start, + unsigned long compressed_len, + struct page **compressed_pages, + unsigned long nr_pages); +int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, + int mirror_num, unsigned long bio_flags); +#endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8559f39fd47..793d8fdda24 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -400,10 +400,18 @@ struct btrfs_timespec { __le32 nsec; } __attribute__ ((__packed__)); -/* - * there is no padding here on purpose. If you want to extent the inode, - * make a new item type - */ +typedef enum { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LAST = 2, +} btrfs_compression_type; + +/* we don't understand any encryption methods right now */ +typedef enum { + BTRFS_ENCRYPTION_NONE = 0, + BTRFS_ENCRYPTION_LAST = 1, +} btrfs_encryption_type; + struct btrfs_inode_item { /* nfs style generation number */ __le64 generation; @@ -419,6 +427,7 @@ struct btrfs_inode_item { __le64 rdev; __le16 flags; __le16 compat_flags; + struct btrfs_timespec atime; struct btrfs_timespec ctime; struct btrfs_timespec mtime; @@ -454,8 +463,33 @@ struct btrfs_root_item { #define BTRFS_FILE_EXTENT_INLINE 1 struct btrfs_file_extent_item { + /* + * transaction id that created this extent + */ __le64 generation; + /* + * max number of bytes to hold this extent in ram + * when we split a compressed extent we can't know how big + * each of the resulting pieces will be. So, this is + * an upper limit on the size of the extent in ram instead of + * an exact limit. + */ + __le64 ram_bytes; + + /* + * 32 bits for the various ways we might encode the data, + * including compression and encryption. If any of these + * are set to something a given disk format doesn't understand + * it is treated like an incompat flag for reading and writing, + * but not for stat. + */ + u8 compression; + u8 encryption; + __le16 other_encoding; /* spare for later use */ + + /* are we inline data or a real extent? */ u8 type; + /* * disk space consumed by the extent, checksum blocks are included * in these numbers @@ -471,9 +505,11 @@ struct btrfs_file_extent_item { */ __le64 offset; /* - * the logical number of file blocks (no csums included) + * the logical number of file blocks (no csums included). This + * always reflects the size uncompressed and without encoding. */ __le64 num_bytes; + } __attribute__ ((__packed__)); struct btrfs_csum_item { @@ -814,6 +850,7 @@ struct btrfs_root { #define BTRFS_MOUNT_NOBARRIER (1 << 2) #define BTRFS_MOUNT_SSD (1 << 3) #define BTRFS_MOUNT_DEGRADED (1 << 4) +#define BTRFS_MOUNT_COMPRESS (1 << 5) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) @@ -825,6 +862,7 @@ struct btrfs_root { #define BTRFS_INODE_NODATASUM (1 << 0) #define BTRFS_INODE_NODATACOW (1 << 1) #define BTRFS_INODE_READONLY (1 << 2) +#define BTRFS_INODE_NOCOMPRESS (1 << 3) #define btrfs_clear_flag(inode, flag) (BTRFS_I(inode)->flags &= \ ~BTRFS_INODE_##flag) #define btrfs_set_flag(inode, flag) (BTRFS_I(inode)->flags |= \ @@ -1424,14 +1462,6 @@ static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) return offsetof(struct btrfs_file_extent_item, disk_bytenr) + datasize; } -static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, - struct btrfs_item *e) -{ - unsigned long offset; - offset = offsetof(struct btrfs_file_extent_item, disk_bytenr); - return btrfs_item_size(eb, e) - offset; -} - BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, disk_bytenr, 64); BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, @@ -1442,6 +1472,36 @@ BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, offset, 64); BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, num_bytes, 64); +BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, + ram_bytes, 64); +BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, + compression, 8); +BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, + encryption, 8); +BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, + other_encoding, 16); + +/* this returns the number of file bytes represented by the inline item. + * If an item is compressed, this is the uncompressed size + */ +static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, + struct btrfs_file_extent_item *e) +{ + return btrfs_file_extent_ram_bytes(eb, e); +} + +/* + * this returns the number of bytes used by the item on disk, minus the + * size of any extent headers. If a file is compressed on disk, this is + * the compressed size + */ +static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, + struct btrfs_item *e) +{ + unsigned long offset; + offset = offsetof(struct btrfs_file_extent_item, disk_bytenr); + return btrfs_item_size(eb, e) - offset; +} static inline struct btrfs_root *btrfs_sb(struct super_block *sb) { @@ -1745,10 +1805,11 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - u64 objectid, u64 pos, u64 disk_offset, - u64 disk_num_bytes, - u64 num_bytes, u64 offset); + struct btrfs_root *root, + u64 objectid, u64 pos, + u64 disk_offset, u64 disk_num_bytes, + u64 num_bytes, u64 offset, u64 ram_bytes, + u8 compression, u8 encryption, u16 other_encoding); int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, @@ -1758,6 +1819,8 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio); +int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, + u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, @@ -1799,7 +1862,7 @@ void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name, int namelen); int btrfs_merge_bio_hook(struct page *page, unsigned long offset, - size_t size, struct bio *bio); + size_t size, struct bio *bio, unsigned long bio_flags); unsigned long btrfs_force_ra(struct address_space *mapping, struct file_ra_state *ra, struct file *file, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0be044bb619..dc95f636a11 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -83,6 +83,7 @@ struct async_submit_bio { extent_submit_bio_hook_t *submit_bio_hook; int rw; int mirror_num; + unsigned long bio_flags; struct btrfs_work work; }; @@ -115,6 +116,7 @@ struct extent_map *btree_get_extent(struct inode *inode, struct page *page, } em->start = 0; em->len = (u64)-1; + em->block_len = (u64)-1; em->block_start = 0; em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; @@ -469,12 +471,13 @@ static void run_one_async_submit(struct btrfs_work *work) wake_up(&fs_info->async_submit_wait); async->submit_bio_hook(async->inode, async->rw, async->bio, - async->mirror_num); + async->mirror_num, async->bio_flags); kfree(async); } int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags, extent_submit_bio_hook_t *submit_bio_hook) { struct async_submit_bio *async; @@ -491,6 +494,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, async->submit_bio_hook = submit_bio_hook; async->work.func = run_one_async_submit; async->work.flags = 0; + async->bio_flags = bio_flags; while(atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { @@ -530,7 +534,7 @@ static int btree_csum_one_bio(struct bio *bio) } static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; @@ -556,17 +560,17 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, } static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { /* * kthread helpers are used to submit writes so that checksumming * can happen in parallel across all CPUs */ if (!(rw & (1 << BIO_RW))) { - return __btree_submit_bio_hook(inode, rw, bio, mirror_num); + return __btree_submit_bio_hook(inode, rw, bio, mirror_num, 0); } return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, - inode, rw, bio, mirror_num, + inode, rw, bio, mirror_num, 0, __btree_submit_bio_hook); } @@ -1407,6 +1411,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->btree_inode = new_inode(sb); fs_info->btree_inode->i_ino = 1; fs_info->btree_inode->i_nlink = 1; + fs_info->thread_pool_size = min(num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_extents); @@ -1508,6 +1513,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size)); @@ -1559,6 +1565,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, } fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); + fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, + 4 * 1024 * 1024 / PAGE_CACHE_SIZE); nodesize = btrfs_super_nodesize(disk_super); leafsize = btrfs_super_leafsize(disk_super); diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index f84f5058dbb..4eb1f1408d2 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -71,6 +71,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, int metadata); int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags, extent_submit_bio_hook_t *submit_bio_hook); int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 280ac1aa9b6..bbf04e80a1a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3278,6 +3278,7 @@ static int noinline relocate_data_extent(struct inode *reloc_inode, em->start = extent_key->objectid - offset; em->len = extent_key->offset; + em->block_len = extent_key->offset; em->block_start = extent_key->objectid; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); @@ -3314,10 +3315,14 @@ struct btrfs_ref_path { }; struct disk_extent { + u64 ram_bytes; u64 disk_bytenr; u64 disk_num_bytes; u64 offset; u64 num_bytes; + u8 compression; + u8 encryption; + u16 other_encoding; }; static int is_cowonly_root(u64 root_objectid) @@ -3631,6 +3636,11 @@ static int noinline get_new_locations(struct inode *reloc_inode, btrfs_file_extent_disk_num_bytes(leaf, fi); exts[nr].offset = btrfs_file_extent_offset(leaf, fi); exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); + exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); + exts[nr].compression = btrfs_file_extent_compression(leaf, fi); + exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); + exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, + fi); WARN_ON(exts[nr].offset > 0); WARN_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); @@ -3846,6 +3856,8 @@ next: new_extents[0].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[0].disk_num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extents[0].ram_bytes); ext_offset += new_extents[0].offset; btrfs_set_file_extent_offset(leaf, fi, ext_offset); btrfs_mark_buffer_dirty(leaf); @@ -3911,6 +3923,16 @@ next: new_extents[i].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[i].disk_num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extents[i].ram_bytes); + + btrfs_set_file_extent_compression(leaf, fi, + new_extents[i].compression); + btrfs_set_file_extent_encryption(leaf, fi, + new_extents[i].encryption); + btrfs_set_file_extent_other_encoding(leaf, fi, + new_extents[i].other_encoding); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_len); ext_offset += new_extents[i].offset; @@ -4169,6 +4191,8 @@ static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_ram_bytes(leaf, fi, + new_extent->ram_bytes); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extent->disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, @@ -4847,7 +4871,8 @@ static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info, BUG_ON(err); err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0, - group->key.offset, 0); + group->key.offset, 0, group->key.offset, + 0, 0, 0); BUG_ON(err); inode = btrfs_iget_locked(root->fs_info->sb, objectid, root); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 563b2d12f4f..314041fdfa4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -30,6 +30,7 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); +#define LEAK_DEBUG 1 #ifdef LEAK_DEBUG static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; #endif @@ -1067,8 +1068,8 @@ EXPORT_SYMBOL(find_first_extent_bit_state); * * 1 is returned if we find something, 0 if nothing was in the tree */ -static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, - u64 *start, u64 *end, u64 max_bytes) +static noinline u64 find_delalloc_range(struct extent_io_tree *tree, + u64 *start, u64 *end, u64 max_bytes) { struct rb_node *node; struct extent_state *state; @@ -1077,11 +1078,11 @@ static noinline u64 find_lock_delalloc_range(struct extent_io_tree *tree, u64 total_bytes = 0; spin_lock_irq(&tree->lock); + /* * this search will find all the extents that end after * our range starts. */ -search_again: node = tree_search(tree, cur_start); if (!node) { if (!found) @@ -1100,40 +1101,6 @@ search_again: *end = state->end; goto out; } - if (!found && !(state->state & EXTENT_BOUNDARY)) { - struct extent_state *prev_state; - struct rb_node *prev_node = node; - while(1) { - prev_node = rb_prev(prev_node); - if (!prev_node) - break; - prev_state = rb_entry(prev_node, - struct extent_state, - rb_node); - if ((prev_state->end + 1 != state->start) || - !(prev_state->state & EXTENT_DELALLOC)) - break; - if ((cur_start - prev_state->start) * 2 > - max_bytes) - break; - state = prev_state; - node = prev_node; - } - } - if (state->state & EXTENT_LOCKED) { - DEFINE_WAIT(wait); - atomic_inc(&state->refs); - prepare_to_wait(&state->wq, &wait, - TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&tree->lock); - schedule(); - spin_lock_irq(&tree->lock); - finish_wait(&state->wq, &wait); - free_extent_state(state); - goto search_again; - } - set_state_cb(tree, state, EXTENT_LOCKED); - state->state |= EXTENT_LOCKED; if (!found) *start = state->start; found++; @@ -1151,6 +1118,208 @@ out: return found; } +static noinline int __unlock_for_delalloc(struct inode *inode, + struct page *locked_page, + u64 start, u64 end) +{ + int ret; + struct page *pages[16]; + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long nr_pages = end_index - index + 1; + int i; + + if (index == locked_page->index && end_index == index) + return 0; + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + for (i = 0; i < ret; i++) { + if (pages[i] != locked_page) + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + cond_resched(); + } + return 0; +} + +static noinline int lock_delalloc_pages(struct inode *inode, + struct page *locked_page, + u64 delalloc_start, + u64 delalloc_end) +{ + unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; + unsigned long start_index = index; + unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; + unsigned long pages_locked = 0; + struct page *pages[16]; + unsigned long nrpages; + int ret; + int i; + + /* the caller is responsible for locking the start index */ + if (index == locked_page->index && index == end_index) + return 0; + + /* skip the page at the start index */ + nrpages = end_index - index + 1; + while(nrpages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nrpages, ARRAY_SIZE(pages)), pages); + if (ret == 0) { + ret = -EAGAIN; + goto done; + } + /* now we have an array of pages, lock them all */ + for (i = 0; i < ret; i++) { + /* + * the caller is taking responsibility for + * locked_page + */ + if (pages[i] != locked_page) + lock_page(pages[i]); + page_cache_release(pages[i]); + } + pages_locked += ret; + nrpages -= ret; + index += ret; + cond_resched(); + } + ret = 0; +done: + if (ret && pages_locked) { + __unlock_for_delalloc(inode, locked_page, + delalloc_start, + ((u64)(start_index + pages_locked - 1)) << + PAGE_CACHE_SHIFT); + } + return ret; +} + +/* + * find a contiguous range of bytes in the file marked as delalloc, not + * more than 'max_bytes'. start and end are used to return the range, + * + * 1 is returned if we find something, 0 if nothing was in the tree + */ +static noinline u64 find_lock_delalloc_range(struct inode *inode, + struct extent_io_tree *tree, + struct page *locked_page, + u64 *start, u64 *end, + u64 max_bytes) +{ + u64 delalloc_start; + u64 delalloc_end; + u64 found; + int ret; + int loops = 0; + +again: + /* step one, find a bunch of delalloc bytes starting at start */ + delalloc_start = *start; + delalloc_end = 0; + found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, + max_bytes); + if (!found) { + *start = delalloc_start; + *end = delalloc_end; + return found; + } + + /* + * make sure to limit the number of pages we try to lock down + * if we're looping. + */ + if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { + delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) & + ~((u64)PAGE_CACHE_SIZE - 1); + } + /* step two, lock all the pages after the page that has start */ + ret = lock_delalloc_pages(inode, locked_page, + delalloc_start, delalloc_end); + if (ret == -EAGAIN) { + /* some of the pages are gone, lets avoid looping by + * shortening the size of the delalloc range we're searching + */ + if (!loops) { + unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); + max_bytes = PAGE_CACHE_SIZE - offset; + loops = 1; + goto again; + } else { + found = 0; + goto out_failed; + } + } + BUG_ON(ret); + + /* step three, lock the state bits for the whole range */ + lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS); + + /* then test to make sure it is all still delalloc */ + ret = test_range_bit(tree, delalloc_start, delalloc_end, + EXTENT_DELALLOC, 1); + if (!ret) { + unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS); + __unlock_for_delalloc(inode, locked_page, + delalloc_start, delalloc_end); + cond_resched(); + goto again; + } + *start = delalloc_start; + *end = delalloc_end; +out_failed: + return found; +} + +int extent_clear_unlock_delalloc(struct inode *inode, + struct extent_io_tree *tree, + u64 start, u64 end, struct page *locked_page, + int clear_dirty, int set_writeback, + int end_writeback) +{ + int ret; + struct page *pages[16]; + unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long nr_pages = end_index - index + 1; + int i; + int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; + + if (clear_dirty) + clear_bits |= EXTENT_DIRTY; + + clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS); + + while(nr_pages > 0) { + ret = find_get_pages_contig(inode->i_mapping, index, + min(nr_pages, ARRAY_SIZE(pages)), pages); + for (i = 0; i < ret; i++) { + if (pages[i] == locked_page) { + page_cache_release(pages[i]); + continue; + } + if (clear_dirty) + clear_page_dirty_for_io(pages[i]); + if (set_writeback) + set_page_writeback(pages[i]); + if (end_writeback) + end_page_writeback(pages[i]); + unlock_page(pages[i]); + page_cache_release(pages[i]); + } + nr_pages -= ret; + index += ret; + cond_resched(); + } + return 0; +} +EXPORT_SYMBOL(extent_clear_unlock_delalloc); + /* * count the number of bytes in the tree that have a given bit(s) * set. This can be fairly slow, except for EXTENT_DIRTY which is @@ -1631,38 +1800,26 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, return bio; } -static int submit_one_bio(int rw, struct bio *bio, int mirror_num) +static int submit_one_bio(int rw, struct bio *bio, int mirror_num, + unsigned long bio_flags) { int ret = 0; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct page *page = bvec->bv_page; struct extent_io_tree *tree = bio->bi_private; - struct rb_node *node; - struct extent_state *state; u64 start; u64 end; start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; end = start + bvec->bv_len - 1; - spin_lock_irq(&tree->lock); - node = __etree_search(tree, start, NULL, NULL); - BUG_ON(!node); - state = rb_entry(node, struct extent_state, rb_node); - while(state->end < end) { - node = rb_next(node); - state = rb_entry(node, struct extent_state, rb_node); - } - BUG_ON(state->end != end); - spin_unlock_irq(&tree->lock); - bio->bi_private = NULL; bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) tree->ops->submit_bio_hook(page->mapping->host, rw, bio, - mirror_num); + mirror_num, bio_flags); else submit_bio(rw, bio); if (bio_flagged(bio, BIO_EOPNOTSUPP)) @@ -1678,39 +1835,56 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, struct bio **bio_ret, unsigned long max_pages, bio_end_io_t end_io_func, - int mirror_num) + int mirror_num, + unsigned long prev_bio_flags, + unsigned long bio_flags) { int ret = 0; struct bio *bio; int nr; + int contig = 0; + int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; + int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; + size_t page_size = min(size, PAGE_CACHE_SIZE); if (bio_ret && *bio_ret) { bio = *bio_ret; - if (bio->bi_sector + (bio->bi_size >> 9) != sector || + if (old_compressed) + contig = bio->bi_sector == sector; + else + contig = bio->bi_sector + (bio->bi_size >> 9) == + sector; + + if (prev_bio_flags != bio_flags || !contig || (tree->ops && tree->ops->merge_bio_hook && - tree->ops->merge_bio_hook(page, offset, size, bio)) || - bio_add_page(bio, page, size, offset) < size) { - ret = submit_one_bio(rw, bio, mirror_num); + tree->ops->merge_bio_hook(page, offset, page_size, bio, + bio_flags)) || + bio_add_page(bio, page, page_size, offset) < page_size) { + ret = submit_one_bio(rw, bio, mirror_num, + prev_bio_flags); bio = NULL; } else { return 0; } } - nr = bio_get_nr_vecs(bdev); + if (this_compressed) + nr = BIO_MAX_PAGES; + else + nr = bio_get_nr_vecs(bdev); + bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); if (!bio) { printk("failed to allocate bio nr %d\n", nr); } - - bio_add_page(bio, page, size, offset); + bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; if (bio_ret) { *bio_ret = bio; } else { - ret = submit_one_bio(rw, bio, mirror_num); + ret = submit_one_bio(rw, bio, mirror_num, bio_flags); } return ret; @@ -1738,7 +1912,8 @@ void set_page_extent_head(struct page *page, unsigned long len) static int __extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, - struct bio **bio, int mirror_num) + struct bio **bio, int mirror_num, + unsigned long *bio_flags) { struct inode *inode = page->mapping->host; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -1756,13 +1931,27 @@ static int __extent_read_full_page(struct extent_io_tree *tree, int nr = 0; size_t page_offset = 0; size_t iosize; + size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; + unsigned long this_bio_flag = 0; set_page_extent_mapped(page); end = page_end; lock_extent(tree, start, end, GFP_NOFS); + if (page->index == last_byte >> PAGE_CACHE_SHIFT) { + char *userpage; + size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); + + if (zero_offset) { + iosize = PAGE_CACHE_SIZE - zero_offset; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + zero_offset, 0, iosize); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + } + } while (cur <= end) { if (cur >= last_byte) { char *userpage; @@ -1793,10 +1982,19 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } BUG_ON(end < cur); + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + this_bio_flag = EXTENT_BIO_COMPRESSED; + iosize = min(extent_map_end(em) - cur, end - cur + 1); cur_end = min(extent_map_end(em) - 1, end); iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); - sector = (em->block_start + extent_offset) >> 9; + if (this_bio_flag & EXTENT_BIO_COMPRESSED) { + disk_io_size = em->block_len; + sector = em->block_start >> 9; + } else { + sector = (em->block_start + extent_offset) >> 9; + disk_io_size = iosize; + } bdev = em->bdev; block_start = em->block_start; free_extent_map(em); @@ -1845,10 +2043,13 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; pnr -= page->index; ret = submit_extent_page(READ, tree, page, - sector, iosize, page_offset, + sector, disk_io_size, page_offset, bdev, bio, pnr, - end_bio_extent_readpage, mirror_num); + end_bio_extent_readpage, mirror_num, + *bio_flags, + this_bio_flag); nr++; + *bio_flags = this_bio_flag; } if (ret) SetPageError(page); @@ -1867,11 +2068,13 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent) { struct bio *bio = NULL; + unsigned long bio_flags = 0; int ret; - ret = __extent_read_full_page(tree, page, get_extent, &bio, 0); + ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, + &bio_flags); if (bio) - submit_one_bio(READ, bio, 0); + submit_one_bio(READ, bio, 0, bio_flags); return ret; } EXPORT_SYMBOL(extent_read_full_page); @@ -1909,6 +2112,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; u64 nr_delalloc; u64 delalloc_end; + int page_started; + int compressed; WARN_ON(!PageLocked(page)); pg_offset = i_size & (PAGE_CACHE_SIZE - 1); @@ -1934,27 +2139,33 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = start; delalloc_end = 0; + page_started = 0; while(delalloc_end < page_end) { - nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start, + nr_delalloc = find_lock_delalloc_range(inode, tree, + page, + &delalloc_start, &delalloc_end, 128 * 1024 * 1024); if (nr_delalloc == 0) { delalloc_start = delalloc_end + 1; continue; } - tree->ops->fill_delalloc(inode, delalloc_start, - delalloc_end); - clear_extent_bit(tree, delalloc_start, - delalloc_end, - EXTENT_LOCKED | EXTENT_DELALLOC, - 1, 0, GFP_NOFS); + tree->ops->fill_delalloc(inode, page, delalloc_start, + delalloc_end, &page_started); delalloc_start = delalloc_end + 1; } + + /* did the fill delalloc function already unlock and start the IO? */ + if (page_started) { + return 0; + } + lock_extent(tree, start, page_end, GFP_NOFS); unlock_start = start; if (tree->ops && tree->ops->writepage_start_hook) { - ret = tree->ops->writepage_start_hook(page, start, page_end); + ret = tree->ops->writepage_start_hook(page, start, + page_end); if (ret == -EAGAIN) { unlock_extent(tree, start, page_end, GFP_NOFS); redirty_page_for_writepage(wbc, page); @@ -2006,10 +2217,15 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, sector = (em->block_start + extent_offset) >> 9; bdev = em->bdev; block_start = em->block_start; + compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); free_extent_map(em); em = NULL; - if (block_start == EXTENT_MAP_HOLE || + /* + * compressed and inline extents are written through other + * paths in the FS + */ + if (compressed || block_start == EXTENT_MAP_HOLE || block_start == EXTENT_MAP_INLINE) { clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); @@ -2017,16 +2233,28 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_extent(tree, unlock_start, cur + iosize -1, GFP_NOFS); - if (tree->ops && tree->ops->writepage_end_io_hook) + /* + * end_io notification does not happen here for + * compressed extents + */ + if (!compressed && tree->ops && + tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, cur, cur + iosize - 1, NULL, 1); - cur = cur + iosize; + else if (compressed) { + /* we don't want to end_page_writeback on + * a compressed extent. this happens + * elsewhere + */ + nr++; + } + + cur += iosize; pg_offset += iosize; unlock_start = cur; continue; } - /* leave this out until we have a page_mkwrite call */ if (0 && !test_range_bit(tree, cur, cur + iosize - 1, EXTENT_DIRTY, 0)) { @@ -2034,6 +2262,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, pg_offset += iosize; continue; } + clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); if (tree->ops && tree->ops->writepage_io_hook) { ret = tree->ops->writepage_io_hook(page, cur, @@ -2057,7 +2286,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ret = submit_extent_page(WRITE, tree, page, sector, iosize, pg_offset, bdev, &epd->bio, max_nr, - end_bio_extent_writepage, 0); + end_bio_extent_writepage, + 0, 0, 0); if (ret) SetPageError(page); } @@ -2226,7 +2456,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, extent_write_cache_pages(tree, mapping, &wbc_writepages, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio, 0); + submit_one_bio(WRITE, epd.bio, 0, 0); } return ret; } @@ -2248,7 +2478,7 @@ int extent_writepages(struct extent_io_tree *tree, ret = extent_write_cache_pages(tree, mapping, wbc, __extent_writepage, &epd); if (epd.bio) { - submit_one_bio(WRITE, epd.bio, 0); + submit_one_bio(WRITE, epd.bio, 0, 0); } return ret; } @@ -2262,6 +2492,7 @@ int extent_readpages(struct extent_io_tree *tree, struct bio *bio = NULL; unsigned page_idx; struct pagevec pvec; + unsigned long bio_flags = 0; pagevec_init(&pvec, 0); for (page_idx = 0; page_idx < nr_pages; page_idx++) { @@ -2281,7 +2512,7 @@ int extent_readpages(struct extent_io_tree *tree, if (!pagevec_add(&pvec, page)) __pagevec_lru_add(&pvec); __extent_read_full_page(tree, page, get_extent, - &bio, 0); + &bio, 0, &bio_flags); } page_cache_release(page); } @@ -2289,7 +2520,7 @@ int extent_readpages(struct extent_io_tree *tree, __pagevec_lru_add(&pvec); BUG_ON(!list_empty(pages)); if (bio) - submit_one_bio(READ, bio, 0); + submit_one_bio(READ, bio, 0, bio_flags); return 0; } EXPORT_SYMBOL(extent_readpages); @@ -2414,7 +2645,8 @@ int extent_prepare_write(struct extent_io_tree *tree, ret = submit_extent_page(READ, tree, page, sector, iosize, page_offset, em->bdev, NULL, 1, - end_bio_extent_preparewrite, 0); + end_bio_extent_preparewrite, 0, + 0, 0); iocount++; block_start = block_start + iosize; } else { @@ -2495,7 +2727,9 @@ int try_release_extent_mapping(struct extent_map_tree *map, } if (!test_range_bit(tree, em->start, extent_map_end(em) - 1, - EXTENT_LOCKED, 0)) { + EXTENT_LOCKED | EXTENT_WRITEBACK | + EXTENT_ORDERED, + 0)) { remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); @@ -2923,6 +3157,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, int inc_all_pages = 0; unsigned long num_pages; struct bio *bio = NULL; + unsigned long bio_flags = 0; if (eb->flags & EXTENT_UPTODATE) return 0; @@ -2973,7 +3208,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, ClearPageError(page); err = __extent_read_full_page(tree, page, get_extent, &bio, - mirror_num); + mirror_num, &bio_flags); if (err) { ret = err; printk("err %d from __extent_read_full_page\n", ret); @@ -2984,7 +3219,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, } if (bio) - submit_one_bio(READ, bio, mirror_num); + submit_one_bio(READ, bio, mirror_num, bio_flags); if (ret || !wait) { if (ret) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index c9d1908a1ae..86f859b87a6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -18,6 +18,9 @@ #define EXTENT_BOUNDARY (1 << 11) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) +/* flags for bio submission */ +#define EXTENT_BIO_COMPRESSED 1 + /* * page->private values. Every page that is controlled by the extent * map has page->private set to one. @@ -28,14 +31,17 @@ struct extent_state; typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, - struct bio *bio, int mirror_num); + struct bio *bio, int mirror_num, + unsigned long bio_flags); struct extent_io_ops { - int (*fill_delalloc)(struct inode *inode, u64 start, u64 end); + int (*fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started); int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; int (*merge_bio_hook)(struct page *page, unsigned long offset, - size_t size, struct bio *bio); + size_t size, struct bio *bio, + unsigned long bio_flags); int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, u64 start, u64 end, @@ -245,4 +251,9 @@ void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); int release_extent_buffer_tail_pages(struct extent_buffer *eb); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end); +int extent_clear_unlock_delalloc(struct inode *inode, + struct extent_io_tree *tree, + u64 start, u64 end, struct page *locked_page, + int clear_dirty, int set_writeback, + int clear_writeback); #endif diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 74b2a29880d..fd3ebfb8c3c 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -184,6 +184,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) return 0; + /* + * don't merge compressed extents, we need to know their + * actual size + */ + if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) + return 0; + if (extent_map_end(prev) == next->start && prev->flags == next->flags && prev->bdev == next->bdev && @@ -239,6 +246,7 @@ int add_extent_mapping(struct extent_map_tree *tree, if (rb && mergable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; + em->block_len += merge->block_len; em->block_start = merge->block_start; merge->in_tree = 0; rb_erase(&merge->rb_node, &tree->map); @@ -250,6 +258,7 @@ int add_extent_mapping(struct extent_map_tree *tree, merge = rb_entry(rb, struct extent_map, rb_node); if (rb && mergable_maps(em, merge)) { em->len += merge->len; + em->block_len += merge->len; rb_erase(&merge->rb_node, &tree->map); merge->in_tree = 0; free_extent_map(merge); diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 26ac6fe0b26..abbcbeb28c7 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -10,6 +10,7 @@ /* bits for the flags field */ #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ +#define EXTENT_FLAG_COMPRESSED 1 struct extent_map { struct rb_node rb_node; @@ -18,6 +19,7 @@ struct extent_map { u64 start; u64 len; u64 block_start; + u64 block_len; unsigned long flags; struct block_device *bdev; atomic_t refs; @@ -38,9 +40,9 @@ static inline u64 extent_map_end(struct extent_map *em) static inline u64 extent_map_block_end(struct extent_map *em) { - if (em->block_start + em->len < em->block_start) + if (em->block_start + em->block_len < em->block_start) return (u64)-1; - return em->block_start + em->len; + return em->block_start + em->block_len; } void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 6dbe88b9d7d..f4d3fa71bc4 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -31,7 +31,8 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, u64 disk_offset, u64 disk_num_bytes, - u64 num_bytes, u64 offset) + u64 num_bytes, u64 offset, u64 ram_bytes, + u8 compression, u8 encryption, u16 other_encoding) { int ret = 0; struct btrfs_file_extent_item *item; @@ -57,8 +58,13 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes); btrfs_set_file_extent_offset(leaf, item, offset); btrfs_set_file_extent_num_bytes(leaf, item, num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes); btrfs_set_file_extent_generation(leaf, item, trans->transid); btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_compression(leaf, item, compression); + btrfs_set_file_extent_encryption(leaf, item, encryption); + btrfs_set_file_extent_other_encoding(leaf, item, other_encoding); + btrfs_mark_buffer_dirty(leaf); out: btrfs_free_path(path); @@ -213,6 +219,73 @@ found: return 0; } +int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, + u64 start, unsigned long len) +{ + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; + struct btrfs_ordered_extent *ordered; + char *data; + struct page *page; + unsigned long total_bytes = 0; + unsigned long this_sum_bytes = 0; + + sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); + if (!sums) + return -ENOMEM; + + sector_sum = sums->sums; + sums->file_offset = start; + sums->len = len; + INIT_LIST_HEAD(&sums->list); + ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); + BUG_ON(!ordered); + + while(len > 0) { + if (start >= ordered->file_offset + ordered->len || + start < ordered->file_offset) { + sums->len = this_sum_bytes; + this_sum_bytes = 0; + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + + sums = kzalloc(btrfs_ordered_sum_size(root, len), + GFP_NOFS); + BUG_ON(!sums); + sector_sum = sums->sums; + sums->len = len; + sums->file_offset = start; + ordered = btrfs_lookup_ordered_extent(inode, + sums->file_offset); + BUG_ON(!ordered); + } + + page = find_get_page(inode->i_mapping, + start >> PAGE_CACHE_SHIFT); + + data = kmap_atomic(page, KM_USER0); + sector_sum->sum = ~(u32)0; + sector_sum->sum = btrfs_csum_data(root, data, sector_sum->sum, + PAGE_CACHE_SIZE); + kunmap_atomic(data, KM_USER0); + btrfs_csum_final(sector_sum->sum, + (char *)§or_sum->sum); + sector_sum->offset = page_offset(page); + page_cache_release(page); + + sector_sum++; + total_bytes += PAGE_CACHE_SIZE; + this_sum_bytes += PAGE_CACHE_SIZE; + start += PAGE_CACHE_SIZE; + + WARN_ON(len < PAGE_CACHE_SIZE); + len -= PAGE_CACHE_SIZE; + } + btrfs_add_ordered_sum(inode, ordered, sums); + btrfs_put_ordered_extent(ordered); + return 0; +} + int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, struct bio *bio) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 69abbe19add..0aa15436590 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -95,153 +95,6 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) } } -/* this does all the hard work for inserting an inline extent into - * the btree. Any existing inline extent is extended as required to make room, - * otherwise things are inserted as required into the btree - */ -static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, - u64 offset, size_t size, - struct page **pages, size_t page_offset, - int num_pages) -{ - struct btrfs_key key; - struct btrfs_path *path; - struct extent_buffer *leaf; - char *kaddr; - unsigned long ptr; - struct btrfs_file_extent_item *ei; - struct page *page; - u32 datasize; - int err = 0; - int ret; - int i; - ssize_t cur_size; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - - btrfs_set_trans_block_group(trans, inode); - - key.objectid = inode->i_ino; - key.offset = offset; - btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); - - ret = btrfs_search_slot(trans, root, &key, path, 0, 1); - if (ret < 0) { - err = ret; - goto fail; - } - if (ret == 1) { - struct btrfs_key found_key; - - if (path->slots[0] == 0) - goto insert; - - path->slots[0]--; - leaf = path->nodes[0]; - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - - if (found_key.objectid != inode->i_ino) - goto insert; - - if (found_key.type != BTRFS_EXTENT_DATA_KEY) - goto insert; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - if (btrfs_file_extent_type(leaf, ei) != - BTRFS_FILE_EXTENT_INLINE) { - goto insert; - } - btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); - ret = 0; - } - if (ret == 0) { - u32 found_size; - u64 found_end; - - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - - if (btrfs_file_extent_type(leaf, ei) != - BTRFS_FILE_EXTENT_INLINE) { - err = ret; - btrfs_print_leaf(root, leaf); - printk("found wasn't inline offset %Lu inode %lu\n", - offset, inode->i_ino); - goto fail; - } - found_size = btrfs_file_extent_inline_len(leaf, - btrfs_item_nr(leaf, path->slots[0])); - found_end = key.offset + found_size; - - if (found_end < offset + size) { - btrfs_release_path(root, path); - ret = btrfs_search_slot(trans, root, &key, path, - offset + size - found_end, 1); - BUG_ON(ret != 0); - - ret = btrfs_extend_item(trans, root, path, - offset + size - found_end); - if (ret) { - err = ret; - goto fail; - } - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - inode_add_bytes(inode, offset + size - found_end); - } - if (found_end < offset) { - ptr = btrfs_file_extent_inline_start(ei) + found_size; - memset_extent_buffer(leaf, 0, ptr, offset - found_end); - } - } else { -insert: - btrfs_release_path(root, path); - datasize = offset + size - key.offset; - inode_add_bytes(inode, datasize); - datasize = btrfs_file_extent_calc_inline_size(datasize); - ret = btrfs_insert_empty_item(trans, root, path, &key, - datasize); - if (ret) { - err = ret; - printk("got bad ret %d\n", ret); - goto fail; - } - leaf = path->nodes[0]; - ei = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, ei, trans->transid); - btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); - } - ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset; - - cur_size = size; - i = 0; - while (size > 0) { - page = pages[i]; - kaddr = kmap_atomic(page, KM_USER0); - cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size); - write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size); - kunmap_atomic(kaddr, KM_USER0); - page_offset = 0; - ptr += cur_size; - size -= cur_size; - if (i >= num_pages) { - printk("i %d num_pages %d\n", i, num_pages); - } - i++; - } - btrfs_mark_buffer_dirty(leaf); -fail: - btrfs_free_path(path); - return err; -} - /* * after copy_from_user, pages need to be dirtied and we need to make * sure holes are created between the current EOF and the start of @@ -267,8 +120,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, u64 start_pos; u64 end_of_last_block; u64 end_pos = pos + write_bytes; - u64 inline_size; - int did_inline = 0; loff_t isize = i_size_read(inode); start_pos = pos & ~((u64)root->sectorsize - 1); @@ -314,7 +165,8 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, err = btrfs_insert_file_extent(trans, root, inode->i_ino, last_pos_in_file, - 0, 0, hole_size, 0); + 0, 0, hole_size, 0, + hole_size, 0, 0, 0); btrfs_drop_extent_cache(inode, last_pos_in_file, last_pos_in_file + hole_size - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); @@ -324,57 +176,19 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, goto failed; } - /* - * either allocate an extent for the new bytes or setup the key - * to show we are doing inline data in the extent + /* check for reserved extents on each page, we don't want + * to reset the delalloc bit on things that already have + * extents reserved. */ - inline_size = end_pos; - if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) || - inline_size > root->fs_info->max_inline || - (inline_size & (root->sectorsize -1)) == 0 || - inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { - /* check for reserved extents on each page, we don't want - * to reset the delalloc bit on things that already have - * extents reserved. - */ - btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); - for (i = 0; i < num_pages; i++) { - struct page *p = pages[i]; - SetPageUptodate(p); - ClearPageChecked(p); - set_page_dirty(p); - } - } else { - u64 aligned_end; - /* step one, delete the existing extents in this range */ - aligned_end = (pos + write_bytes + root->sectorsize - 1) & - ~((u64)root->sectorsize - 1); - mutex_lock(&BTRFS_I(inode)->extent_mutex); - err = btrfs_drop_extents(trans, root, inode, start_pos, - aligned_end, aligned_end, &hint_byte); - if (err) - goto failed; - if (isize > inline_size) - inline_size = min_t(u64, isize, aligned_end); - inline_size -= start_pos; - err = insert_inline_extent(trans, root, inode, start_pos, - inline_size, pages, 0, num_pages); - btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1, 0); - BUG_ON(err); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); - - /* - * an ugly way to do all the prop accounting around - * the page bits and mapping tags - */ - set_page_writeback(pages[0]); - end_page_writeback(pages[0]); - did_inline = 1; + btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block); + for (i = 0; i < num_pages; i++) { + struct page *p = pages[i]; + SetPageUptodate(p); + ClearPageChecked(p); + set_page_dirty(p); } if (end_pos > isize) { i_size_write(inode, end_pos); - if (did_inline) - BTRFS_I(inode)->disk_i_size = end_pos; btrfs_update_inode(trans, root, inode); } failed: @@ -399,6 +213,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int ret; int testend = 1; unsigned long flags; + int compressed = 0; WARN_ON(end < start); if (end == (u64)-1) { @@ -434,6 +249,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, free_extent_map(em); continue; } + compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); clear_bit(EXTENT_FLAG_PINNED, &em->flags); remove_extent_mapping(em_tree, em); @@ -442,6 +258,12 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->start = em->start; split->len = start - em->start; split->block_start = em->block_start; + + if (compressed) + split->block_len = em->block_len; + else + split->block_len = split->len; + split->bdev = em->bdev; split->flags = flags; ret = add_extent_mapping(em_tree, split); @@ -459,7 +281,13 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->bdev = em->bdev; split->flags = flags; - split->block_start = em->block_start + diff; + if (compressed) { + split->block_len = em->block_len; + split->block_start = em->block_start; + } else { + split->block_len = split->len; + split->block_start = em->block_start + diff; + } ret = add_extent_mapping(em_tree, split); BUG_ON(ret); @@ -533,7 +361,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) struct btrfs_item *item; item = btrfs_item_nr(leaf, slot); extent_end = found_key.offset + - btrfs_file_extent_inline_len(leaf, item); + btrfs_file_extent_inline_len(leaf, extent); extent_end = (extent_end + root->sectorsize - 1) & ~((u64)root->sectorsize -1 ); } @@ -573,6 +401,10 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 extent_end = 0; u64 search_start = start; u64 leaf_start; + u64 ram_bytes = 0; + u8 compression = 0; + u8 encryption = 0; + u16 other_encoding = 0; u64 root_gen; u64 root_owner; struct extent_buffer *leaf; @@ -589,6 +421,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int recow; int ret; + inline_limit = 0; btrfs_drop_extent_cache(inode, start, end - 1, 0); path = btrfs_alloc_path(); @@ -637,6 +470,12 @@ next_slot: extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(leaf, extent); + compression = btrfs_file_extent_compression(leaf, + extent); + encryption = btrfs_file_extent_encryption(leaf, + extent); + other_encoding = btrfs_file_extent_other_encoding(leaf, + extent); if (found_type == BTRFS_FILE_EXTENT_REG) { extent_end = btrfs_file_extent_disk_bytenr(leaf, @@ -646,13 +485,13 @@ next_slot: extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, extent); + ram_bytes = btrfs_file_extent_ram_bytes(leaf, + extent); found_extent = 1; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { - struct btrfs_item *item; - item = btrfs_item_nr(leaf, slot); found_inline = 1; extent_end = key.offset + - btrfs_file_extent_inline_len(leaf, item); + btrfs_file_extent_inline_len(leaf, extent); } } else { extent_end = search_start; @@ -680,10 +519,9 @@ next_slot: search_start = (extent_end + mask) & ~mask; } else search_start = extent_end; - if (end <= extent_end && start >= key.offset && found_inline) { + + if (end <= extent_end && start >= key.offset && found_inline) *hint_byte = EXTENT_MAP_INLINE; - goto out; - } if (found_extent) { read_extent_buffer(leaf, &old, (unsigned long)extent, @@ -770,12 +608,27 @@ next_slot: write_extent_buffer(leaf, &old, (unsigned long)extent, sizeof(old)); + btrfs_set_file_extent_compression(leaf, extent, + compression); + btrfs_set_file_extent_encryption(leaf, extent, + encryption); + btrfs_set_file_extent_other_encoding(leaf, extent, + other_encoding); btrfs_set_file_extent_offset(leaf, extent, le64_to_cpu(old.offset) + end - key.offset); WARN_ON(le64_to_cpu(old.num_bytes) < (extent_end - end)); btrfs_set_file_extent_num_bytes(leaf, extent, extent_end - end); + + /* + * set the ram bytes to the size of the full extent + * before splitting. This is a worst case flag, + * but its the best we can do because we don't know + * how splitting affects compression + */ + btrfs_set_file_extent_ram_bytes(leaf, extent, + ram_bytes); btrfs_set_file_extent_type(leaf, extent, BTRFS_FILE_EXTENT_REG); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bf4bed6ca4d..9797592dc86 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -49,6 +49,7 @@ #include "compat.h" #include "tree-log.h" #include "ref-cache.h" +#include "compression.h" struct btrfs_iget_args { u64 ino; @@ -83,6 +84,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { }; static void btrfs_truncate(struct inode *inode); +static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); /* * a very lame attempt at stopping writes when the FS is 85% full. There @@ -113,58 +115,375 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, return ret; } +/* + * this does all the hard work for inserting an inline extent into + * the btree. The caller should have done a btrfs_drop_extents so that + * no overlapping inline items exist in the btree + */ +static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode, + u64 start, size_t size, size_t compressed_size, + struct page **compressed_pages) +{ + struct btrfs_key key; + struct btrfs_path *path; + struct extent_buffer *leaf; + struct page *page = NULL; + char *kaddr; + unsigned long ptr; + struct btrfs_file_extent_item *ei; + int err = 0; + int ret; + size_t cur_size = size; + size_t datasize; + unsigned long offset; + int use_compress = 0; + + if (compressed_size && compressed_pages) { + use_compress = 1; + cur_size = compressed_size; + } + + path = btrfs_alloc_path(); if (!path) + return -ENOMEM; + + btrfs_set_trans_block_group(trans, inode); + + key.objectid = inode->i_ino; + key.offset = start; + btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); + inode_add_bytes(inode, size); + datasize = btrfs_file_extent_calc_inline_size(cur_size); + + inode_add_bytes(inode, size); + ret = btrfs_insert_empty_item(trans, root, path, &key, + datasize); + BUG_ON(ret); + if (ret) { + err = ret; + printk("got bad ret %d\n", ret); + goto fail; + } + leaf = path->nodes[0]; + ei = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, ei, trans->transid); + btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); + btrfs_set_file_extent_encryption(leaf, ei, 0); + btrfs_set_file_extent_other_encoding(leaf, ei, 0); + btrfs_set_file_extent_ram_bytes(leaf, ei, size); + ptr = btrfs_file_extent_inline_start(ei); + + if (use_compress) { + struct page *cpage; + int i = 0; + while(compressed_size > 0) { + cpage = compressed_pages[i]; + cur_size = min(compressed_size, + PAGE_CACHE_SIZE); + + kaddr = kmap(cpage); + write_extent_buffer(leaf, kaddr, ptr, cur_size); + kunmap(cpage); + + i++; + ptr += cur_size; + compressed_size -= cur_size; + } + btrfs_set_file_extent_compression(leaf, ei, + BTRFS_COMPRESS_ZLIB); + } else { + page = find_get_page(inode->i_mapping, + start >> PAGE_CACHE_SHIFT); + btrfs_set_file_extent_compression(leaf, ei, 0); + kaddr = kmap_atomic(page, KM_USER0); + offset = start & (PAGE_CACHE_SIZE - 1); + write_extent_buffer(leaf, kaddr + offset, ptr, size); + kunmap_atomic(kaddr, KM_USER0); + page_cache_release(page); + } + btrfs_mark_buffer_dirty(leaf); + btrfs_free_path(path); + + BTRFS_I(inode)->disk_i_size = inode->i_size; + btrfs_update_inode(trans, root, inode); + return 0; +fail: + btrfs_free_path(path); + return err; +} + + +/* + * conditionally insert an inline extent into the file. This + * does the checks required to make sure the data is small enough + * to fit as an inline extent. + */ +static int cow_file_range_inline(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end, + size_t compressed_size, + struct page **compressed_pages) +{ + u64 isize = i_size_read(inode); + u64 actual_end = min(end + 1, isize); + u64 inline_len = actual_end - start; + u64 aligned_end = (end + root->sectorsize - 1) & + ~((u64)root->sectorsize - 1); + u64 hint_byte; + u64 data_len = inline_len; + int ret; + + if (compressed_size) + data_len = compressed_size; + + if (start > 0 || + data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || + (!compressed_size && + (actual_end & (root->sectorsize - 1)) == 0) || + end + 1 < isize || + data_len > root->fs_info->max_inline) { + return 1; + } + + mutex_lock(&BTRFS_I(inode)->extent_mutex); + ret = btrfs_drop_extents(trans, root, inode, start, + aligned_end, aligned_end, &hint_byte); + BUG_ON(ret); + + if (isize > actual_end) + inline_len = min_t(u64, isize, actual_end); + ret = insert_inline_extent(trans, root, inode, start, + inline_len, compressed_size, + compressed_pages); + BUG_ON(ret); + btrfs_drop_extent_cache(inode, start, aligned_end, 0); + mutex_unlock(&BTRFS_I(inode)->extent_mutex); + return 0; +} + /* * when extent_io.c finds a delayed allocation range in the file, * the call backs end up in this code. The basic idea is to * allocate extents on disk for the range, and create ordered data structs * in ram to track those extents. + * + * locked_page is the page that writepage had locked already. We use + * it to make sure we don't do extra locks or unlocks. + * + * *page_started is set to one if we unlock locked_page and do everything + * required to start IO on it. It may be clean and already done with + * IO when we return. */ -static int cow_file_range(struct inode *inode, u64 start, u64 end) +static int cow_file_range(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 alloc_hint = 0; u64 num_bytes; + unsigned long ram_size; + u64 orig_start; + u64 disk_num_bytes; u64 cur_alloc_size; u64 blocksize = root->sectorsize; - u64 orig_num_bytes; + u64 actual_end; struct btrfs_key ins; struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; + struct page **pages = NULL; + unsigned long nr_pages; + unsigned long nr_pages_ret = 0; + unsigned long total_compressed = 0; + unsigned long total_in = 0; + unsigned long max_compressed = 128 * 1024; + unsigned long max_uncompressed = 256 * 1024; + int i; + int will_compress; trans = btrfs_join_transaction(root, 1); BUG_ON(!trans); btrfs_set_trans_block_group(trans, inode); + orig_start = start; + + /* + * compression made this loop a bit ugly, but the basic idea is to + * compress some pages but keep the total size of the compressed + * extent relatively small. If compression is off, this goto target + * is never used. + */ +again: + will_compress = 0; + nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; + nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE); + actual_end = min_t(u64, i_size_read(inode), end + 1); + total_compressed = actual_end - start; + + /* we want to make sure that amount of ram required to uncompress + * an extent is reasonable, so we limit the total size in ram + * of a compressed extent to 256k + */ + total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); num_bytes = max(blocksize, num_bytes); - orig_num_bytes = num_bytes; + disk_num_bytes = num_bytes; + total_in = 0; + ret = 0; - if (alloc_hint == EXTENT_MAP_INLINE) - goto out; + /* we do compression for mount -o compress and when the + * inode has not been flagged as nocompress + */ + if (!btrfs_test_flag(inode, NOCOMPRESS) && + btrfs_test_opt(root, COMPRESS)) { + WARN_ON(pages); + pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + + /* we want to make sure the amount of IO required to satisfy + * a random read is reasonably small, so we limit the size + * of a compressed extent to 128k + */ + ret = btrfs_zlib_compress_pages(inode->i_mapping, start, + total_compressed, pages, + nr_pages, &nr_pages_ret, + &total_in, + &total_compressed, + max_compressed); + + if (!ret) { + unsigned long offset = total_compressed & + (PAGE_CACHE_SIZE - 1); + struct page *page = pages[nr_pages_ret - 1]; + char *kaddr; + + /* zero the tail end of the last page, we might be + * sending it down to disk + */ + if (offset) { + kaddr = kmap_atomic(page, KM_USER0); + memset(kaddr + offset, 0, + PAGE_CACHE_SIZE - offset); + kunmap_atomic(kaddr, KM_USER0); + } + will_compress = 1; + } + } + if (start == 0) { + /* lets try to make an inline extent */ + if (ret || total_in < (end - start + 1)) { + /* we didn't compress the entire range, try + * to make an uncompressed inline extent. This + * is almost sure to fail, but maybe inline sizes + * will get bigger later + */ + ret = cow_file_range_inline(trans, root, inode, + start, end, 0, NULL); + } else { + ret = cow_file_range_inline(trans, root, inode, + start, end, + total_compressed, pages); + } + if (ret == 0) { + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, end, NULL, + 1, 1, 1); + *page_started = 1; + ret = 0; + goto free_pages_out; + } + } + + if (will_compress) { + /* + * we aren't doing an inline extent round the compressed size + * up to a block size boundary so the allocator does sane + * things + */ + total_compressed = (total_compressed + blocksize - 1) & + ~(blocksize - 1); + + /* + * one last check to make sure the compression is really a + * win, compare the page count read with the blocks on disk + */ + total_in = (total_in + PAGE_CACHE_SIZE - 1) & + ~(PAGE_CACHE_SIZE - 1); + if (total_compressed >= total_in) { + will_compress = 0; + } else { + disk_num_bytes = total_compressed; + num_bytes = total_in; + } + } + if (!will_compress && pages) { + /* + * the compression code ran but failed to make things smaller, + * free any pages it allocated and our page pointer array + */ + for (i = 0; i < nr_pages_ret; i++) { + page_cache_release(pages[i]); + } + kfree(pages); + pages = NULL; + total_compressed = 0; + nr_pages_ret = 0; + + /* flag the file so we don't compress in the future */ + btrfs_set_flag(inode, NOCOMPRESS); + } + + BUG_ON(disk_num_bytes > + btrfs_super_total_bytes(&root->fs_info->super_copy)); - BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); mutex_lock(&BTRFS_I(inode)->extent_mutex); btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); mutex_unlock(&BTRFS_I(inode)->extent_mutex); - while(num_bytes > 0) { - cur_alloc_size = min(num_bytes, root->fs_info->max_extent); + while(disk_num_bytes > 0) { + unsigned long min_bytes; + + /* + * the max size of a compressed extent is pretty small, + * make the code a little less complex by forcing + * the allocator to find a whole compressed extent at once + */ + if (will_compress) + min_bytes = disk_num_bytes; + else + min_bytes = root->sectorsize; + + cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - root->sectorsize, 0, alloc_hint, + min_bytes, 0, alloc_hint, (u64)-1, &ins, 1); if (ret) { WARN_ON(1); - goto out; + goto free_pages_out_fail; } em = alloc_extent_map(GFP_NOFS); em->start = start; - em->len = ins.offset; + + if (will_compress) { + ram_size = num_bytes; + em->len = num_bytes; + } else { + /* ramsize == disk size */ + ram_size = ins.offset; + em->len = ins.offset; + } + em->block_start = ins.objectid; + em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + mutex_lock(&BTRFS_I(inode)->extent_mutex); set_bit(EXTENT_FLAG_PINNED, &em->flags); + + if (will_compress) + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + while(1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -174,26 +493,95 @@ static int cow_file_range(struct inode *inode, u64 start, u64 end) break; } btrfs_drop_extent_cache(inode, start, - start + ins.offset - 1, 0); + start + ram_size - 1, 0); } mutex_unlock(&BTRFS_I(inode)->extent_mutex); cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ins.offset, 0); + ram_size, cur_alloc_size, 0, + will_compress); BUG_ON(ret); - if (num_bytes < cur_alloc_size) { - printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes, + + if (disk_num_bytes < cur_alloc_size) { + printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, cur_alloc_size); break; } + + if (will_compress) { + /* + * we're doing compression, we and we need to + * submit the compressed extents down to the device. + * + * We lock down all the file pages, clearing their + * dirty bits and setting them writeback. Everyone + * that wants to modify the page will wait on the + * ordered extent above. + * + * The writeback bits on the file pages are + * cleared when the compressed pages are on disk + */ + btrfs_end_transaction(trans, root); + + if (start <= page_offset(locked_page) && + page_offset(locked_page) < start + ram_size) { + *page_started = 1; + } + + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, + start + ram_size - 1, + NULL, 1, 1, 0); + + ret = btrfs_submit_compressed_write(inode, start, + ram_size, ins.objectid, + cur_alloc_size, pages, + nr_pages_ret); + + BUG_ON(ret); + trans = btrfs_join_transaction(root, 1); + if (start + ram_size < end) { + start += ram_size; + alloc_hint = ins.objectid + ins.offset; + /* pages will be freed at end_bio time */ + pages = NULL; + goto again; + } else { + /* we've written everything, time to go */ + break; + } + } + /* we're not doing compressed IO, don't unlock the first + * page (which the caller expects to stay locked), don't + * clear any dirty bits and don't set any writeback bits + */ + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + start, start + ram_size - 1, + locked_page, 0, 0, 0); + disk_num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } + + ret = 0; out: btrfs_end_transaction(trans, root); + return ret; + +free_pages_out_fail: + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + start, end, locked_page, 0, 0, 0); +free_pages_out: + for (i = 0; i < nr_pages_ret; i++) + page_cache_release(pages[i]); + if (pages) + kfree(pages); + + goto out; } /* @@ -203,7 +591,8 @@ out: * If no cow copies or snapshots exist, we write directly to the existing * blocks on disk */ -static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end) +static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { u64 extent_start; u64 extent_end; @@ -260,6 +649,11 @@ again: extent_end = extent_start + extent_num_bytes; err = 0; + if (btrfs_file_extent_compression(leaf, item) || + btrfs_file_extent_encryption(leaf,item) || + btrfs_file_extent_other_encoding(leaf, item)) + goto not_found; + if (loops && start != extent_start) goto not_found; @@ -284,7 +678,8 @@ again: bytenr += btrfs_file_extent_offset(leaf, item); extent_num_bytes = min(end + 1, extent_end) - start; ret = btrfs_add_ordered_extent(inode, start, bytenr, - extent_num_bytes, 1); + extent_num_bytes, + extent_num_bytes, 1, 0); if (ret) { err = ret; goto out; @@ -300,7 +695,8 @@ again: not_found: btrfs_end_transaction(trans, root); btrfs_free_path(path); - return cow_file_range(inode, start, end); + return cow_file_range(inode, locked_page, start, end, + page_started); } out: WARN_ON(err); @@ -312,16 +708,19 @@ out: /* * extent_io.c call back to do delayed allocation processing */ -static int run_delalloc_range(struct inode *inode, u64 start, u64 end) +static int run_delalloc_range(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) - ret = run_delalloc_nocow(inode, start, end); + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started); else - ret = cow_file_range(inode, start, end); + ret = cow_file_range(inode, locked_page, start, end, + page_started); return ret; } @@ -383,7 +782,8 @@ int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, * we don't create bios that span stripes or chunks */ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, - size_t size, struct bio *bio) + size_t size, struct bio *bio, + unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; struct btrfs_mapping_tree *map_tree; @@ -413,7 +813,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * are inserted into the btree */ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -429,7 +829,7 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, * or reading the csums from the tree before a read */ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, - int mirror_num) + int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -444,11 +844,17 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, if (!(rw & (1 << BIO_RW))) { btrfs_lookup_bio_sums(root, inode, bio); + + if (bio_flags & EXTENT_BIO_COMPRESSED) { + return btrfs_submit_compressed_read(inode, bio, + mirror_num, bio_flags); + } + goto mapit; } return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, - __btrfs_submit_bio_hook); + bio_flags, __btrfs_submit_bio_hook); mapit: return btrfs_map_bio(root, rw, bio, mirror_num, 0); } @@ -539,7 +945,7 @@ out_page: * good idea. This causes problems because we want to make sure COW * properly happens and the data=ordered rules are followed. * - * In our case any range that doesn't have the EXTENT_ORDERED bit set + * In our case any range that doesn't have the ORDERED bit set * hasn't been properly setup for IO. We kick off an async process * to fix it up. The async helper will wait for ordered extents, set * the delalloc bit and make it safe to write the page. @@ -632,10 +1038,21 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_set_file_extent_disk_bytenr(leaf, extent_item, ordered_extent->start); btrfs_set_file_extent_disk_num_bytes(leaf, extent_item, - ordered_extent->len); + ordered_extent->disk_len); btrfs_set_file_extent_offset(leaf, extent_item, 0); + + if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) + btrfs_set_file_extent_compression(leaf, extent_item, 1); + else + btrfs_set_file_extent_compression(leaf, extent_item, 0); + btrfs_set_file_extent_encryption(leaf, extent_item, 0); + btrfs_set_file_extent_other_encoding(leaf, extent_item, 0); + + /* ram bytes = extent_num_bytes for now */ btrfs_set_file_extent_num_bytes(leaf, extent_item, ordered_extent->len); + btrfs_set_file_extent_ram_bytes(leaf, extent_item, + ordered_extent->len); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, ordered_extent->file_offset, @@ -644,7 +1061,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) mutex_unlock(&BTRFS_I(inode)->extent_mutex); ins.objectid = ordered_extent->start; - ins.offset = ordered_extent->len; + ins.offset = ordered_extent->disk_len; ins.type = BTRFS_EXTENT_ITEM_KEY; ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, root->root_key.objectid, @@ -714,6 +1131,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, int ret; int rw; u64 logical; + unsigned long bio_flags = 0; ret = get_state_private(failure_tree, start, &private); if (ret) { @@ -738,6 +1156,8 @@ int btrfs_io_failed_hook(struct bio *failed_bio, } logical = start - em->start; logical = em->block_start + logical; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + bio_flags = EXTENT_BIO_COMPRESSED; failrec->logical = logical; free_extent_map(em); set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | @@ -781,7 +1201,8 @@ int btrfs_io_failed_hook(struct bio *failed_bio, rw = READ; BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, - failrec->last_mirror); + failrec->last_mirror, + bio_flags); return 0; } @@ -1644,10 +2065,8 @@ search_again: item_end += btrfs_file_extent_num_bytes(leaf, fi); } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - struct btrfs_item *item = btrfs_item_nr(leaf, - path->slots[0]); item_end += btrfs_file_extent_inline_len(leaf, - item); + fi); } item_end--; } @@ -1715,7 +2134,14 @@ search_again: root_owner = btrfs_header_owner(leaf); } } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { - if (!del_item) { + /* + * we can't truncate inline items that have had + * special encodings + */ + if (!del_item && + btrfs_file_extent_compression(leaf, fi) == 0 && + btrfs_file_extent_encryption(leaf, fi) == 0 && + btrfs_file_extent_other_encoding(leaf, fi) == 0) { u32 size = new_size - found_key.offset; if (root->ref_cows) { @@ -1926,7 +2352,8 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) err = btrfs_insert_file_extent(trans, root, inode->i_ino, hole_start, 0, 0, - hole_size, 0); + hole_size, 0, hole_size, + 0, 0, 0); btrfs_drop_extent_cache(inode, hole_start, (u64)-1, 0); btrfs_check_file(root, inode); @@ -2894,11 +3321,50 @@ static int merge_extent_mapping(struct extent_map_tree *em_tree, start_diff = map_start - em->start; em->start = map_start; em->len = map_len; - if (em->block_start < EXTENT_MAP_LAST_BYTE) + if (em->block_start < EXTENT_MAP_LAST_BYTE && + !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { em->block_start += start_diff; + em->block_len -= start_diff; + } return add_extent_mapping(em_tree, em); } +static noinline int uncompress_inline(struct btrfs_path *path, + struct inode *inode, struct page *page, + size_t pg_offset, u64 extent_offset, + struct btrfs_file_extent_item *item) +{ + int ret; + struct extent_buffer *leaf = path->nodes[0]; + char *tmp; + size_t max_size; + unsigned long inline_size; + unsigned long ptr; + + WARN_ON(pg_offset != 0); + max_size = btrfs_file_extent_ram_bytes(leaf, item); + inline_size = btrfs_file_extent_inline_item_len(leaf, + btrfs_item_nr(leaf, path->slots[0])); + tmp = kmalloc(inline_size, GFP_NOFS); + ptr = btrfs_file_extent_inline_start(item); + + read_extent_buffer(leaf, tmp, ptr, inline_size); + + max_size = min(PAGE_CACHE_SIZE, max_size); + ret = btrfs_zlib_decompress(tmp, page, extent_offset, + inline_size, max_size); + if (ret) { + char *kaddr = kmap_atomic(page, KM_USER0); + unsigned long copy_size = min_t(u64, + PAGE_CACHE_SIZE - pg_offset, + max_size - extent_offset); + memset(kaddr + pg_offset, 0, copy_size); + kunmap_atomic(kaddr, KM_USER0); + } + kfree(tmp); + return 0; +} + /* * a bit scary, this does extent mapping from logical file offset to the disk. * the ugly parts come from merging extents from the disk with the @@ -2927,6 +3393,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; + int compressed; again: spin_lock(&em_tree->lock); @@ -2951,6 +3418,7 @@ again: em->bdev = root->fs_info->fs_devices->latest_bdev; em->start = EXTENT_MAP_HOLE; em->len = (u64)-1; + em->block_len = (u64)-1; if (!path) { path = btrfs_alloc_path(); @@ -2983,6 +3451,7 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; + compressed = btrfs_file_extent_compression(leaf, item); if (found_type == BTRFS_FILE_EXTENT_REG) { extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); @@ -3005,10 +3474,18 @@ again: em->block_start = EXTENT_MAP_HOLE; goto insert; } - bytenr += btrfs_file_extent_offset(leaf, item); - em->block_start = bytenr; em->start = extent_start; em->len = extent_end - extent_start; + if (compressed) { + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->block_start = bytenr; + em->block_len = btrfs_file_extent_disk_num_bytes(leaf, + item); + } else { + bytenr += btrfs_file_extent_offset(leaf, item); + em->block_start = bytenr; + em->block_len = em->len; + } goto insert; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { u64 page_start; @@ -3018,8 +3495,7 @@ again: size_t extent_offset; size_t copy_size; - size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf, - path->slots[0])); + size = btrfs_file_extent_inline_len(leaf, item); extent_end = (extent_start + size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); if (start < extent_start || start >= extent_end) { @@ -3035,9 +3511,10 @@ again: } em->block_start = EXTENT_MAP_INLINE; - if (!page) { + if (!page || create) { em->start = extent_start; - em->len = size; + em->len = (size + root->sectorsize - 1) & + ~((u64)root->sectorsize - 1); goto out; } @@ -3048,11 +3525,22 @@ again: em->start = extent_start + extent_offset; em->len = (copy_size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); - map = kmap(page); + if (compressed) + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); ptr = btrfs_file_extent_inline_start(item) + extent_offset; if (create == 0 && !PageUptodate(page)) { - read_extent_buffer(leaf, map + pg_offset, ptr, - copy_size); + if (btrfs_file_extent_compression(leaf, item) == + BTRFS_COMPRESS_ZLIB) { + ret = uncompress_inline(path, inode, page, + pg_offset, + extent_offset, item); + BUG_ON(ret); + } else { + map = kmap(page); + read_extent_buffer(leaf, map + pg_offset, ptr, + copy_size); + kunmap(page); + } flush_dcache_page(page); } else if (create && PageUptodate(page)) { if (!trans) { @@ -3063,11 +3551,12 @@ again: trans = btrfs_join_transaction(root, 1); goto again; } + map = kmap(page); write_extent_buffer(leaf, map + pg_offset, ptr, copy_size); + kunmap(page); btrfs_mark_buffer_dirty(leaf); } - kunmap(page); set_extent_uptodate(io_tree, em->start, extent_map_end(em) - 1, GFP_NOFS); goto insert; @@ -3779,6 +4268,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, btrfs_set_file_extent_generation(leaf, ei, trans->transid); btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE); + btrfs_set_file_extent_encryption(leaf, ei, 0); + btrfs_set_file_extent_compression(leaf, ei, 0); + btrfs_set_file_extent_other_encoding(leaf, ei, 0); + btrfs_set_file_extent_ram_bytes(leaf, ei, name_len); + ptr = btrfs_file_extent_inline_start(ei); write_extent_buffer(leaf, symname, ptr, name_len); btrfs_mark_buffer_dirty(leaf); diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2eb6caba57c..b5745bb96d4 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -165,7 +165,8 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, * inserted. */ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, int nocow) + u64 start, u64 len, u64 disk_len, int nocow, + int compressed) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -180,9 +181,12 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->file_offset = file_offset; entry->start = start; entry->len = len; + entry->disk_len = disk_len; entry->inode = inode; if (nocow) set_bit(BTRFS_ORDERED_NOCOW, &entry->flags); + if (compressed) + set_bit(BTRFS_ORDERED_COMPRESSED, &entry->flags); /* one ref for the tree */ atomic_set(&entry->refs, 1); @@ -389,9 +393,10 @@ void btrfs_start_ordered_extent(struct inode *inode, * for pdflush to find them */ btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); - if (wait) + if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); + } } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index f50f8870a14..1ef464145d2 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -66,6 +66,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ +#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -73,9 +75,12 @@ struct btrfs_ordered_extent { /* disk byte number */ u64 start; - /* length of the extent in bytes */ + /* ram length of the extent in bytes */ u64 len; + /* extent length on disk */ + u64 disk_len; + /* flags (described above) */ unsigned long flags; @@ -127,7 +132,8 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, int nocow); + u64 start, u64 len, u64 disk_len, int nocow, + int compressed); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index bd9ab3e9a7f..64725c13aa1 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -115,15 +115,16 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) if (btrfs_file_extent_type(l, fi) == BTRFS_FILE_EXTENT_INLINE) { printk("\t\tinline extent data size %u\n", - btrfs_file_extent_inline_len(l, item)); + btrfs_file_extent_inline_len(l, fi)); break; } printk("\t\textent data disk bytenr %llu nr %llu\n", (unsigned long long)btrfs_file_extent_disk_bytenr(l, fi), (unsigned long long)btrfs_file_extent_disk_num_bytes(l, fi)); - printk("\t\textent data offset %llu nr %llu\n", + printk("\t\textent data offset %llu nr %llu ram %llu\n", (unsigned long long)btrfs_file_extent_offset(l, fi), - (unsigned long long)btrfs_file_extent_num_bytes(l, fi)); + (unsigned long long)btrfs_file_extent_num_bytes(l, fi), + (unsigned long long)btrfs_file_extent_ram_bytes(l, fi)); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2e6039825b7..431fdf144b5 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -47,6 +47,7 @@ #include "volumes.h" #include "version.h" #include "export.h" +#include "compression.h" #define BTRFS_SUPER_MAGIC 0x9123683E @@ -69,7 +70,7 @@ static void btrfs_put_super (struct super_block * sb) enum { Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow, Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, - Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_err, + Opt_ssd, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_err, }; static match_table_t tokens = { @@ -83,6 +84,7 @@ static match_table_t tokens = { {Opt_max_inline, "max_inline=%s"}, {Opt_alloc_start, "alloc_start=%s"}, {Opt_thread_pool, "thread_pool=%d"}, + {Opt_compress, "compress"}, {Opt_ssd, "ssd"}, {Opt_noacl, "noacl"}, {Opt_err, NULL}, @@ -163,6 +165,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, NODATACOW); btrfs_set_opt(info->mount_opt, NODATASUM); break; + case Opt_compress: + printk(KERN_INFO "btrfs: use compression\n"); + btrfs_set_opt(info->mount_opt, COMPRESS); + break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); btrfs_set_opt(info->mount_opt, SSD); @@ -622,6 +628,7 @@ static int __init init_btrfs_fs(void) err = btrfs_interface_init(); if (err) goto free_extent_map; + err = register_filesystem(&btrfs_fs_type); if (err) goto unregister_ioctl; @@ -651,6 +658,7 @@ static void __exit exit_btrfs_fs(void) unregister_filesystem(&btrfs_fs_type); btrfs_exit_sysfs(); btrfs_cleanup_fs_uuids(); + btrfs_zlib_exit(); } module_init(init_btrfs_fs) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index cf618cc8b34..e6d579053a4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -540,8 +540,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, if (found_type == BTRFS_FILE_EXTENT_REG) extent_end = start + btrfs_file_extent_num_bytes(eb, item); else if (found_type == BTRFS_FILE_EXTENT_INLINE) { - size = btrfs_file_extent_inline_len(eb, - btrfs_item_nr(eb, slot)); + size = btrfs_file_extent_inline_len(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2eed7f91f51..7db4cfd03a9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1816,6 +1816,7 @@ again: em->start = key.offset; em->len = *num_bytes; em->block_start = 0; + em->block_len = em->len; if (type & BTRFS_BLOCK_GROUP_SYSTEM) { ret = btrfs_add_system_chunk(trans, chunk_root, &key, @@ -2323,6 +2324,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, em->start = logical; em->len = length; em->block_start = 0; + em->block_len = em->len; map->num_stripes = num_stripes; map->io_width = btrfs_chunk_io_width(leaf, chunk); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c new file mode 100644 index 00000000000..e99309180a1 --- /dev/null +++ b/fs/btrfs/zlib.c @@ -0,0 +1,637 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + * + * Based on jffs2 zlib code: + * Copyright © 2001-2007 Red Hat, Inc. + * Created by David Woodhouse + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Plan: call deflate() with avail_in == *sourcelen, + avail_out = *dstlen - 12 and flush == Z_FINISH. + If it doesn't manage to finish, call it again with + avail_in == 0 and avail_out set to the remaining 12 + bytes for it to clean up. + Q: Is 12 bytes sufficient? +*/ +#define STREAM_END_SPACE 12 + +struct workspace { + z_stream inf_strm; + z_stream def_strm; + char *buf; + struct list_head list; +}; + +static LIST_HEAD(idle_workspace); +static DEFINE_SPINLOCK(workspace_lock); +static unsigned long num_workspace; +static atomic_t alloc_workspace = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); + +/* + * this finds an available zlib workspace or allocates a new one + * NULL or an ERR_PTR is returned if things go bad. + */ +static struct workspace *find_zlib_workspace(void) +{ + struct workspace *workspace; + int ret; + int cpus = num_online_cpus(); + +again: + spin_lock(&workspace_lock); + if (!list_empty(&idle_workspace)) { + workspace = list_entry(idle_workspace.next, struct workspace, + list); + list_del(&workspace->list); + num_workspace--; + spin_unlock(&workspace_lock); + return workspace; + + } + spin_unlock(&workspace_lock); + if (atomic_read(&alloc_workspace) > cpus) { + DEFINE_WAIT(wait); + prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(&alloc_workspace) > cpus) + schedule(); + finish_wait(&workspace_wait, &wait); + goto again; + } + atomic_inc(&alloc_workspace); + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); + if (!workspace) { + ret = -ENOMEM; + goto fail; + } + + workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); + if (!workspace->def_strm.workspace) { + ret = -ENOMEM; + goto fail; + } + workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); + if (!workspace->inf_strm.workspace) { + ret = -ENOMEM; + goto fail_inflate; + } + workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); + if (!workspace->buf) { + ret = -ENOMEM; + goto fail_kmalloc; + } + return workspace; + +fail_kmalloc: + vfree(workspace->inf_strm.workspace); +fail_inflate: + vfree(workspace->def_strm.workspace); +fail: + kfree(workspace); + atomic_dec(&alloc_workspace); + wake_up(&workspace_wait); + return ERR_PTR(ret); +} + +/* + * put a workspace struct back on the list or free it if we have enough + * idle ones sitting around + */ +static int free_workspace(struct workspace *workspace) +{ + spin_lock(&workspace_lock); + if (num_workspace < num_online_cpus()) { + list_add_tail(&workspace->list, &idle_workspace); + num_workspace++; + spin_unlock(&workspace_lock); + if (waitqueue_active(&workspace_wait)) + wake_up(&workspace_wait); + return 0; + } + spin_unlock(&workspace_lock); + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); + + atomic_dec(&alloc_workspace); + if (waitqueue_active(&workspace_wait)) + wake_up(&workspace_wait); + return 0; +} + +/* + * cleanup function for module exit + */ +static void free_workspaces(void) +{ + struct workspace *workspace; + while(!list_empty(&idle_workspace)) { + workspace = list_entry(idle_workspace.next, struct workspace, + list); + list_del(&workspace->list); + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); + atomic_dec(&alloc_workspace); + } +} + +/* + * given an address space and start/len, compress the bytes. + * + * pages are allocated to hold the compressed result and stored + * in 'pages' + * + * out_pages is used to return the number of pages allocated. There + * may be pages allocated even if we return an error + * + * total_in is used to return the number of bytes actually read. It + * may be smaller then len if we had to exit early because we + * ran out of room in the pages array or because we cross the + * max_out threshold. + * + * total_out is used to return the total number of compressed bytes + * + * max_out tells us the max number of bytes that we're allowed to + * stuff into pages + */ +int btrfs_zlib_compress_pages(struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + int ret; + struct workspace *workspace; + char *data_in; + char *cpage_out; + int nr_pages = 0; + struct page *in_page = NULL; + struct page *out_page = NULL; + int out_written = 0; + int in_read = 0; + unsigned long bytes_left; + + *out_pages = 0; + *total_out = 0; + *total_in = 0; + + workspace = find_zlib_workspace(); + if (!workspace) + return -1; + + if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { + printk(KERN_WARNING "deflateInit failed\n"); + ret = -1; + goto out; + } + + workspace->def_strm.total_in = 0; + workspace->def_strm.total_out = 0; + + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + cpage_out = kmap(out_page); + pages[0] = out_page; + nr_pages = 1; + + workspace->def_strm.next_in = data_in; + workspace->def_strm.next_out = cpage_out; + workspace->def_strm.avail_out = PAGE_CACHE_SIZE; + workspace->def_strm.avail_in = min(len, PAGE_CACHE_SIZE); + + out_written = 0; + in_read = 0; + + while (workspace->def_strm.total_in < len) { + ret = zlib_deflate(&workspace->def_strm, Z_SYNC_FLUSH); + if (ret != Z_OK) { + printk(KERN_DEBUG "btrfs deflate in loop returned %d\n", + ret); + zlib_deflateEnd(&workspace->def_strm); + ret = -1; + goto out; + } + + /* we're making it bigger, give up */ + if (workspace->def_strm.total_in > 8192 && + workspace->def_strm.total_in < + workspace->def_strm.total_out) { + ret = -1; + goto out; + } + /* we need another page for writing out. Test this + * before the total_in so we will pull in a new page for + * the stream end if required + */ + if (workspace->def_strm.avail_out == 0) { + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -1; + goto out; + } + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + cpage_out = kmap(out_page); + pages[nr_pages] = out_page; + nr_pages++; + workspace->def_strm.avail_out = PAGE_CACHE_SIZE; + workspace->def_strm.next_out = cpage_out; + } + /* we're all done */ + if (workspace->def_strm.total_in >= len) + break; + + /* we've read in a full page, get a new one */ + if (workspace->def_strm.avail_in == 0) { + if (workspace->def_strm.total_out > max_out) + break; + + bytes_left = len - workspace->def_strm.total_in; + kunmap(in_page); + page_cache_release(in_page); + + start += PAGE_CACHE_SIZE; + in_page = find_get_page(mapping, + start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + workspace->def_strm.avail_in = min(bytes_left, + PAGE_CACHE_SIZE); + workspace->def_strm.next_in = data_in; + } + } + workspace->def_strm.avail_in = 0; + ret = zlib_deflate(&workspace->def_strm, Z_FINISH); + zlib_deflateEnd(&workspace->def_strm); + + if (ret != Z_STREAM_END) { + ret = -1; + goto out; + } + + if (workspace->def_strm.total_out >= workspace->def_strm.total_in) { + ret = -1; + goto out; + } + + ret = 0; + *total_out = workspace->def_strm.total_out; + *total_in = workspace->def_strm.total_in; +out: + *out_pages = nr_pages; + if (out_page) + kunmap(out_page); + + if (in_page) { + kunmap(in_page); + page_cache_release(in_page); + } + free_workspace(workspace); + return ret; +} + +/* + * pages_in is an array of pages with compressed data. + * + * disk_start is the starting logical offset of this array in the file + * + * bvec is a bio_vec of pages from the file that we want to decompress into + * + * vcnt is the count of pages in the biovec + * + * srclen is the number of bytes in pages_in + * + * The basic idea is that we have a bio that was created by readpages. + * The pages in the bio are for the uncompressed data, and they may not + * be contiguous. They all correspond to the range of bytes covered by + * the compressed extent. + */ +int btrfs_zlib_decompress_biovec(struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) +{ + int ret = 0; + int wbits = MAX_WBITS; + struct workspace *workspace; + char *data_in; + size_t total_out = 0; + unsigned long page_bytes_left; + unsigned long page_in_index = 0; + unsigned long page_out_index = 0; + struct page *page_out; + unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + unsigned long buf_start; + unsigned long buf_offset; + unsigned long bytes; + unsigned long working_bytes; + unsigned long pg_offset; + unsigned long start_byte; + unsigned long current_buf_start; + char *kaddr; + + workspace = find_zlib_workspace(); + if (!workspace) + return -ENOMEM; + + data_in = kmap(pages_in[page_in_index]); + workspace->inf_strm.next_in = data_in; + workspace->inf_strm.avail_in = min(srclen, PAGE_CACHE_SIZE); + workspace->inf_strm.total_in = 0; + + workspace->inf_strm.total_out = 0; + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + page_out = bvec[page_out_index].bv_page; + page_bytes_left = PAGE_CACHE_SIZE; + pg_offset = 0; + + /* If it's deflate, and it's got no preset dictionary, then + we can tell zlib to skip the adler32 check. */ + if (srclen > 2 && !(data_in[1] & PRESET_DICT) && + ((data_in[0] & 0x0f) == Z_DEFLATED) && + !(((data_in[0]<<8) + data_in[1]) % 31)) { + + wbits = -((data_in[0] >> 4) + 8); + workspace->inf_strm.next_in += 2; + workspace->inf_strm.avail_in -= 2; + } + + if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { + printk(KERN_WARNING "inflateInit failed\n"); + ret = -1; + goto out; + } + while(workspace->inf_strm.total_in < srclen) { + ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); + if (ret != Z_OK && ret != Z_STREAM_END) { + break; + } + + /* + * buf start is the byte offset we're of the start of + * our workspace buffer + */ + buf_start = total_out; + + /* total_out is the last byte of the workspace buffer */ + total_out = workspace->inf_strm.total_out; + + working_bytes = total_out - buf_start; + + /* + * start byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + if (working_bytes == 0) { + /* we didn't make progress in this inflate + * call, we're done + */ + if (ret != Z_STREAM_END) + ret = -1; + break; + } + + /* we haven't yet hit data corresponding to this page */ + if (total_out <= start_byte) { + goto next; + } + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while(working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, + bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + pg_offset += bytes; + page_bytes_left -= bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (page_bytes_left == 0) { + page_out_index++; + if (page_out_index >= vcnt) { + ret = 0; + goto done; + } + page_out = bvec[page_out_index].bv_page; + pg_offset = 0; + page_bytes_left = PAGE_CACHE_SIZE; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) { + goto next; + } + + /* the next page in the biovec might not + * be adjacent to the last page, but it + * might still be found inside this working + * buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + + buf_offset; + } + } + } +next: + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + + if (workspace->inf_strm.avail_in == 0) { + unsigned long tmp; + kunmap(pages_in[page_in_index]); + page_in_index++; + if (page_in_index >= total_pages_in) { + data_in = NULL; + break; + } + data_in = kmap(pages_in[page_in_index]); + workspace->inf_strm.next_in = data_in; + tmp = srclen - workspace->inf_strm.total_in; + workspace->inf_strm.avail_in = min(tmp, + PAGE_CACHE_SIZE); + } + } + if (ret != Z_STREAM_END) { + ret = -1; + } else { + ret = 0; + } +done: + zlib_inflateEnd(&workspace->inf_strm); + if (data_in) + kunmap(pages_in[page_in_index]); +out: + free_workspace(workspace); + return ret; +} + +/* + * a less complex decompression routine. Our compressed data fits in a + * single page, and we want to read a single page out of it. + * start_byte tells us the offset into the compressed data we're interested in + */ +int btrfs_zlib_decompress(unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) +{ + int ret = 0; + int wbits = MAX_WBITS; + struct workspace *workspace; + unsigned long bytes_left = destlen; + unsigned long total_out = 0; + char *kaddr; + + if (destlen > PAGE_CACHE_SIZE) + return -ENOMEM; + + workspace = find_zlib_workspace(); + if (!workspace) + return -ENOMEM; + + workspace->inf_strm.next_in = data_in; + workspace->inf_strm.avail_in = srclen; + workspace->inf_strm.total_in = 0; + + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + workspace->inf_strm.total_out = 0; + /* If it's deflate, and it's got no preset dictionary, then + we can tell zlib to skip the adler32 check. */ + if (srclen > 2 && !(data_in[1] & PRESET_DICT) && + ((data_in[0] & 0x0f) == Z_DEFLATED) && + !(((data_in[0]<<8) + data_in[1]) % 31)) { + + wbits = -((data_in[0] >> 4) + 8); + workspace->inf_strm.next_in += 2; + workspace->inf_strm.avail_in -= 2; + } + + if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { + printk(KERN_WARNING "inflateInit failed\n"); + ret = -1; + goto out; + } + + while(bytes_left > 0) { + unsigned long buf_start; + unsigned long buf_offset; + unsigned long bytes; + unsigned long pg_offset = 0; + + ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); + if (ret != Z_OK && ret != Z_STREAM_END) { + break; + } + + buf_start = total_out; + total_out = workspace->inf_strm.total_out; + + if (total_out == buf_start) { + ret = -1; + break; + } + + if (total_out <= start_byte) { + goto next; + } + + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + } else { + buf_offset = 0; + } + + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, bytes_left); + + kaddr = kmap_atomic(dest_page, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); + kunmap_atomic(kaddr, KM_USER0); + + pg_offset += bytes; + bytes_left -= bytes; +next: + workspace->inf_strm.next_out = workspace->buf; + workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; + } + if (ret != Z_STREAM_END && bytes_left != 0) { + ret = -1; + } else { + ret = 0; + } + zlib_inflateEnd(&workspace->inf_strm); +out: + free_workspace(workspace); + return ret; +} + +void btrfs_zlib_exit(void) +{ + free_workspaces(); +} -- cgit v1.2.3-70-g09d2 From 2517920135b0d29e70453e5b03d70d7b94207df3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 29 Oct 2008 14:49:05 -0400 Subject: Btrfs: nuke fs wide allocation mutex V2 This patch removes the giant fs_info->alloc_mutex and replaces it with a bunch of little locks. There is now a pinned_mutex, which is used when messing with the pinned_extents extent io tree, and the extent_ins_mutex which is used with the pending_del and extent_ins extent io trees. The locking for the extent tree stuff was inspired by a patch that Yan Zheng wrote to fix a race condition, I cleaned it up some and changed the locking around a little bit, but the idea remains the same. Basically instead of holding the extent_ins_mutex throughout the processing of an extent on the extent_ins or pending_del trees, we just hold it while we're searching and when we clear the bits on those trees, and lock the extent for the duration of the operations on the extent. Also to keep from getting hung up waiting to lock an extent, I've added a try_lock_extent so if we cannot lock the extent, move on to the next one in the tree and we'll come back to that one. I have tested this heavily and it does not appear to break anything. This has to be applied on top of my find_free_extent redo patch. I tested this patch on top of Yan's space reblancing code and it worked fine. The only thing that has changed since the last version is I pulled out all my debugging stuff, apparently I forgot to run guilt refresh before I sent the last patch out. Thank you, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 3 +- fs/btrfs/ctree.h | 8 +- fs/btrfs/disk-io.c | 3 +- fs/btrfs/extent-tree.c | 333 +++++++++++++++++++++----------------------- fs/btrfs/extent_io.c | 14 ++ fs/btrfs/extent_io.h | 2 + fs/btrfs/free-space-cache.c | 92 ++++++++---- fs/btrfs/transaction.c | 2 - fs/btrfs/tree-defrag.c | 3 - fs/btrfs/tree-log.c | 4 +- fs/btrfs/volumes.c | 2 - 11 files changed, 250 insertions(+), 216 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 73899d0f9d8..f82f8db0227 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1387,8 +1387,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len > 0); WARN_ON(p->nodes[0] != NULL); - WARN_ON(cow && root == root->fs_info->extent_root && - !mutex_is_locked(&root->fs_info->alloc_mutex)); + if (ins_len < 0) lowest_unlock = 2; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index caa860a1c3e..fdba4f1b634 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -558,6 +558,7 @@ struct btrfs_block_group_cache { struct btrfs_key key; struct btrfs_block_group_item item; spinlock_t lock; + struct mutex alloc_mutex; u64 pinned; u64 reserved; u64 flags; @@ -635,7 +636,8 @@ struct btrfs_fs_info { struct mutex tree_log_mutex; struct mutex transaction_kthread_mutex; struct mutex cleaner_mutex; - struct mutex alloc_mutex; + struct mutex extent_ins_mutex; + struct mutex pinned_mutex; struct mutex chunk_mutex; struct mutex drop_mutex; struct mutex volume_mutex; @@ -1941,8 +1943,12 @@ int btrfs_acl_chmod(struct inode *inode); /* free-space-cache.c */ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes); int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, u64 bytenr, u64 size); +int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes); void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group); struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 796256440df..d1137d7ea8d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1460,7 +1460,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->drop_mutex); - mutex_init(&fs_info->alloc_mutex); + mutex_init(&fs_info->extent_ins_mutex); + mutex_init(&fs_info->pinned_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index e3b3e13a481..564260872c7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -53,24 +53,6 @@ __btrfs_find_block_group(struct btrfs_root *root, struct btrfs_block_group_cache *hint, u64 search_start, int data, int owner); -void maybe_lock_mutex(struct btrfs_root *root) -{ - if (root != root->fs_info->extent_root && - root != root->fs_info->chunk_root && - root != root->fs_info->dev_root) { - mutex_lock(&root->fs_info->alloc_mutex); - } -} - -void maybe_unlock_mutex(struct btrfs_root *root) -{ - if (root != root->fs_info->extent_root && - root != root->fs_info->chunk_root && - root != root->fs_info->dev_root) { - mutex_unlock(&root->fs_info->alloc_mutex); - } -} - static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) { return (cache->flags & bits) == bits; @@ -164,6 +146,7 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, u64 extent_start, extent_end, size; int ret; + mutex_lock(&info->pinned_mutex); while (start < end) { ret = find_first_extent_bit(&info->pinned_extents, start, &extent_start, &extent_end, @@ -175,7 +158,8 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, start = extent_end + 1; } else if (extent_start > start && extent_start < end) { size = extent_start - start; - ret = btrfs_add_free_space(block_group, start, size); + ret = btrfs_add_free_space_lock(block_group, start, + size); BUG_ON(ret); start = extent_end + 1; } else { @@ -185,9 +169,10 @@ static int add_new_free_space(struct btrfs_block_group_cache *block_group, if (start < end) { size = end - start; - ret = btrfs_add_free_space(block_group, start, size); + ret = btrfs_add_free_space_lock(block_group, start, size); BUG_ON(ret); } + mutex_unlock(&info->pinned_mutex); return 0; } @@ -445,13 +430,11 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) path = btrfs_alloc_path(); BUG_ON(!path); - maybe_lock_mutex(root); key.objectid = start; key.offset = len; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 0, 0); - maybe_unlock_mutex(root); btrfs_free_path(path); return ret; } @@ -676,8 +659,9 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans, BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL); num_bytes = btrfs_level_size(root, (int)owner_objectid); + mutex_lock(&root->fs_info->extent_ins_mutex); if (test_range_bit(&root->fs_info->extent_ins, bytenr, - bytenr + num_bytes - 1, EXTENT_LOCKED, 0)) { + bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) { u64 priv; ret = get_state_private(&root->fs_info->extent_ins, bytenr, &priv); @@ -686,6 +670,7 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans, (unsigned long)priv; BUG_ON(extent_op->parent != orig_parent); BUG_ON(extent_op->generation != orig_generation); + extent_op->parent = parent; extent_op->generation = ref_generation; } else { @@ -703,10 +688,11 @@ static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans, set_extent_bits(&root->fs_info->extent_ins, bytenr, bytenr + num_bytes - 1, - EXTENT_LOCKED, GFP_NOFS); + EXTENT_WRITEBACK, GFP_NOFS); set_state_private(&root->fs_info->extent_ins, bytenr, (unsigned long)extent_op); } + mutex_unlock(&root->fs_info->extent_ins_mutex); return 0; } @@ -742,12 +728,10 @@ int btrfs_update_extent_ref(struct btrfs_trans_handle *trans, if (ref_root == BTRFS_TREE_LOG_OBJECTID && owner_objectid < BTRFS_FIRST_FREE_OBJECTID) return 0; - maybe_lock_mutex(root); ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent, parent, ref_root, ref_root, ref_generation, ref_generation, owner_objectid); - maybe_unlock_mutex(root); return ret; } @@ -817,11 +801,9 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, if (ref_root == BTRFS_TREE_LOG_OBJECTID && owner_objectid < BTRFS_FIRST_FREE_OBJECTID) return 0; - maybe_lock_mutex(root); ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent, 0, ref_root, 0, ref_generation, owner_objectid); - maybe_unlock_mutex(root); return ret; } @@ -886,7 +868,6 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); - mutex_lock(&root->fs_info->alloc_mutex); ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto out; @@ -953,7 +934,6 @@ static int get_reference_status(struct btrfs_root *root, u64 bytenr, } ret = 0; out: - mutex_unlock(&root->fs_info->alloc_mutex); btrfs_free_path(path); return ret; } @@ -1179,13 +1159,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, nr_file_extents++; - maybe_lock_mutex(root); ret = process_func(trans, root, bytenr, orig_buf->start, buf->start, orig_root, ref_root, orig_generation, ref_generation, key.objectid); - maybe_unlock_mutex(root); if (ret) { faili = i; @@ -1194,13 +1172,11 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, } } else { bytenr = btrfs_node_blockptr(buf, i); - maybe_lock_mutex(root); ret = process_func(trans, root, bytenr, orig_buf->start, buf->start, orig_root, ref_root, orig_generation, ref_generation, level - 1); - maybe_unlock_mutex(root); if (ret) { faili = i; WARN_ON(1); @@ -1270,24 +1246,20 @@ int btrfs_update_ref(struct btrfs_trans_handle *trans, bytenr = btrfs_file_extent_disk_bytenr(buf, fi); if (bytenr == 0) continue; - maybe_lock_mutex(root); ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_buf->start, buf->start, orig_root, ref_root, orig_generation, ref_generation, key.objectid); - maybe_unlock_mutex(root); if (ret) goto fail; } else { bytenr = btrfs_node_blockptr(buf, slot); - maybe_lock_mutex(root); ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_buf->start, buf->start, orig_root, ref_root, orig_generation, ref_generation, level - 1); - maybe_unlock_mutex(root); if (ret) goto fail; } @@ -1344,7 +1316,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - mutex_lock(&root->fs_info->alloc_mutex); while(1) { cache = NULL; spin_lock(&root->fs_info->block_group_cache_lock); @@ -1378,7 +1349,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, } } btrfs_free_path(path); - mutex_unlock(&root->fs_info->alloc_mutex); return werr; } @@ -1390,9 +1360,11 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found = __find_space_info(info, flags); if (found) { + spin_lock(&found->lock); found->total_bytes += total_bytes; found->bytes_used += bytes_used; found->full = 0; + spin_unlock(&found->lock); *space_info = found; return 0; } @@ -1479,43 +1451,53 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, } BUG_ON(!space_info); + spin_lock(&space_info->lock); if (space_info->force_alloc) { force = 1; space_info->force_alloc = 0; } - if (space_info->full) + if (space_info->full) { + spin_unlock(&space_info->lock); goto out; + } thresh = div_factor(space_info->total_bytes, 6); if (!force && (space_info->bytes_used + space_info->bytes_pinned + - space_info->bytes_reserved + alloc_bytes) < thresh) + space_info->bytes_reserved + alloc_bytes) < thresh) { + spin_unlock(&space_info->lock); goto out; + } - while (!mutex_trylock(&extent_root->fs_info->chunk_mutex)) { - if (!force) - goto out; - mutex_unlock(&extent_root->fs_info->alloc_mutex); - cond_resched(); - mutex_lock(&extent_root->fs_info->alloc_mutex); + spin_unlock(&space_info->lock); + + ret = mutex_trylock(&extent_root->fs_info->chunk_mutex); + if (!ret && !force) { + goto out; + } else if (!ret) { + mutex_lock(&extent_root->fs_info->chunk_mutex); waited = 1; } - if (waited && space_info->full) - goto out_unlock; + if (waited) { + spin_lock(&space_info->lock); + if (space_info->full) { + spin_unlock(&space_info->lock); + goto out_unlock; + } + spin_unlock(&space_info->lock); + } ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags); - if (ret == -ENOSPC) { + if (ret) { printk("space info full %Lu\n", flags); space_info->full = 1; goto out_unlock; } - BUG_ON(ret); ret = btrfs_make_block_group(trans, extent_root, 0, flags, BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes); BUG_ON(ret); - out_unlock: mutex_unlock(&extent_root->fs_info->chunk_mutex); out: @@ -1533,7 +1515,6 @@ static int update_block_group(struct btrfs_trans_handle *trans, u64 old_val; u64 byte_in_group; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); while(total) { cache = btrfs_lookup_block_group(info, bytenr); if (!cache) { @@ -1542,6 +1523,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); + spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); @@ -1551,11 +1533,13 @@ static int update_block_group(struct btrfs_trans_handle *trans, cache->space_info->bytes_used += num_bytes; btrfs_set_block_group_used(&cache->item, old_val); spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); } else { old_val -= num_bytes; cache->space_info->bytes_used -= num_bytes; btrfs_set_block_group_used(&cache->item, old_val); spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); if (mark_free) { int ret; ret = btrfs_add_free_space(cache, bytenr, @@ -1588,7 +1572,7 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache; struct btrfs_fs_info *fs_info = root->fs_info; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); + WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex)); if (pin) { set_extent_dirty(&fs_info->pinned_extents, bytenr, bytenr + num - 1, GFP_NOFS); @@ -1602,16 +1586,20 @@ int btrfs_update_pinned_extents(struct btrfs_root *root, len = min(num, cache->key.offset - (bytenr - cache->key.objectid)); if (pin) { + spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned += len; cache->space_info->bytes_pinned += len; spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); fs_info->total_pinned += len; } else { + spin_lock(&cache->space_info->lock); spin_lock(&cache->lock); cache->pinned -= len; cache->space_info->bytes_pinned -= len; spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); fs_info->total_pinned -= len; } bytenr += len; @@ -1627,23 +1615,23 @@ static int update_reserved_extents(struct btrfs_root *root, struct btrfs_block_group_cache *cache; struct btrfs_fs_info *fs_info = root->fs_info; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); while (num > 0) { cache = btrfs_lookup_block_group(fs_info, bytenr); BUG_ON(!cache); len = min(num, cache->key.offset - (bytenr - cache->key.objectid)); + + spin_lock(&cache->space_info->lock); + spin_lock(&cache->lock); if (reserve) { - spin_lock(&cache->lock); cache->reserved += len; cache->space_info->bytes_reserved += len; - spin_unlock(&cache->lock); } else { - spin_lock(&cache->lock); cache->reserved -= len; cache->space_info->bytes_reserved -= len; - spin_unlock(&cache->lock); } + spin_unlock(&cache->lock); + spin_unlock(&cache->space_info->lock); bytenr += len; num -= len; } @@ -1658,6 +1646,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents; int ret; + mutex_lock(&root->fs_info->pinned_mutex); while(1) { ret = find_first_extent_bit(pinned_extents, last, &start, &end, EXTENT_DIRTY); @@ -1666,6 +1655,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) set_extent_dirty(copy, start, end, GFP_NOFS); last = end + 1; } + mutex_unlock(&root->fs_info->pinned_mutex); return 0; } @@ -1678,7 +1668,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, int ret; struct btrfs_block_group_cache *cache; - mutex_lock(&root->fs_info->alloc_mutex); + mutex_lock(&root->fs_info->pinned_mutex); while(1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); @@ -1690,12 +1680,12 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, if (cache->cached) btrfs_add_free_space(cache, start, end - start + 1); if (need_resched()) { - mutex_unlock(&root->fs_info->alloc_mutex); + mutex_unlock(&root->fs_info->pinned_mutex); cond_resched(); - mutex_lock(&root->fs_info->alloc_mutex); + mutex_lock(&root->fs_info->pinned_mutex); } } - mutex_unlock(&root->fs_info->alloc_mutex); + mutex_unlock(&root->fs_info->pinned_mutex); return 0; } @@ -1705,6 +1695,7 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, u64 start; u64 end; u64 priv; + u64 search = 0; struct btrfs_fs_info *info = extent_root->fs_info; struct btrfs_path *path; struct btrfs_extent_ref *ref; @@ -1714,20 +1705,37 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, int ret; int err = 0; - WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex)); btrfs_set_stack_extent_refs(&extent_item, 1); path = btrfs_alloc_path(); while(1) { - ret = find_first_extent_bit(&info->extent_ins, 0, &start, - &end, EXTENT_LOCKED); - if (ret) + mutex_lock(&info->extent_ins_mutex); + ret = find_first_extent_bit(&info->extent_ins, search, &start, + &end, EXTENT_WRITEBACK); + if (ret) { + mutex_unlock(&info->extent_ins_mutex); + if (search) { + search = 0; + continue; + } break; + } + + ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS); + if (!ret) { + search = end+1; + mutex_unlock(&info->extent_ins_mutex); + cond_resched(); + continue; + } + BUG_ON(ret < 0); ret = get_state_private(&info->extent_ins, start, &priv); BUG_ON(ret); extent_op = (struct pending_extent_op *)(unsigned long)priv; + mutex_unlock(&info->extent_ins_mutex); + if (extent_op->type == PENDING_EXTENT_INSERT) { key.objectid = start; key.offset = end + 1 - start; @@ -1736,8 +1744,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, &extent_item, sizeof(extent_item)); BUG_ON(err); + mutex_lock(&info->extent_ins_mutex); clear_extent_bits(&info->extent_ins, start, end, - EXTENT_LOCKED, GFP_NOFS); + EXTENT_WRITEBACK, GFP_NOFS); + mutex_unlock(&info->extent_ins_mutex); err = insert_extent_backref(trans, extent_root, path, start, extent_op->parent, @@ -1753,8 +1763,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, extent_op->level, 0); BUG_ON(err); + mutex_lock(&info->extent_ins_mutex); clear_extent_bits(&info->extent_ins, start, end, - EXTENT_LOCKED, GFP_NOFS); + EXTENT_WRITEBACK, GFP_NOFS); + mutex_unlock(&info->extent_ins_mutex); key.objectid = start; key.offset = extent_op->parent; @@ -1772,12 +1784,10 @@ static int finish_current_insert(struct btrfs_trans_handle *trans, BUG_ON(1); } kfree(extent_op); + unlock_extent(&info->extent_ins, start, end, GFP_NOFS); + search = 0; - if (need_resched()) { - mutex_unlock(&extent_root->fs_info->alloc_mutex); - cond_resched(); - mutex_lock(&extent_root->fs_info->alloc_mutex); - } + cond_resched(); } btrfs_free_path(path); return 0; @@ -1790,7 +1800,6 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans, int err = 0; struct extent_buffer *buf; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); if (is_data) goto pinit; @@ -1847,7 +1856,6 @@ static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_extent_item *ei; u32 refs; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); key.objectid = bytenr; btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY); key.offset = num_bytes; @@ -1935,8 +1943,10 @@ static int __free_extent(struct btrfs_trans_handle *trans, #endif if (pin) { + mutex_lock(&root->fs_info->pinned_mutex); ret = pin_down_bytes(trans, root, bytenr, num_bytes, owner_objectid >= BTRFS_FIRST_FREE_OBJECTID); + mutex_unlock(&root->fs_info->pinned_mutex); if (ret > 0) mark_free = 1; BUG_ON(ret < 0); @@ -1956,6 +1966,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, ret = btrfs_del_items(trans, extent_root, path, path->slots[0], num_to_del); BUG_ON(ret); + btrfs_release_path(extent_root, path); ret = update_block_group(trans, root, bytenr, num_bytes, 0, mark_free); BUG_ON(ret); @@ -1994,70 +2005,91 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct { int ret; int err = 0; - int mark_free = 0; u64 start; u64 end; u64 priv; + u64 search = 0; struct extent_io_tree *pending_del; struct extent_io_tree *extent_ins; struct pending_extent_op *extent_op; + struct btrfs_fs_info *info = extent_root->fs_info; - WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex)); extent_ins = &extent_root->fs_info->extent_ins; pending_del = &extent_root->fs_info->pending_del; while(1) { - ret = find_first_extent_bit(pending_del, 0, &start, &end, - EXTENT_LOCKED); - if (ret) + mutex_lock(&info->extent_ins_mutex); + ret = find_first_extent_bit(pending_del, search, &start, &end, + EXTENT_WRITEBACK); + if (ret) { + mutex_unlock(&info->extent_ins_mutex); + if (search) { + search = 0; + continue; + } break; + } + + ret = try_lock_extent(extent_ins, start, end, GFP_NOFS); + if (!ret) { + search = end+1; + mutex_unlock(&info->extent_ins_mutex); + cond_resched(); + continue; + } + BUG_ON(ret < 0); ret = get_state_private(pending_del, start, &priv); BUG_ON(ret); extent_op = (struct pending_extent_op *)(unsigned long)priv; - clear_extent_bits(pending_del, start, end, EXTENT_LOCKED, + clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK, GFP_NOFS); - - ret = pin_down_bytes(trans, extent_root, start, - end + 1 - start, 0); - mark_free = ret > 0; if (!test_range_bit(extent_ins, start, end, - EXTENT_LOCKED, 0)) { + EXTENT_WRITEBACK, 0)) { + mutex_unlock(&info->extent_ins_mutex); free_extent: ret = __free_extent(trans, extent_root, start, end + 1 - start, extent_op->orig_parent, extent_root->root_key.objectid, extent_op->orig_generation, - extent_op->level, 0, mark_free); + extent_op->level, 1, 0); kfree(extent_op); } else { kfree(extent_op); - ret = get_state_private(extent_ins, start, &priv); + + ret = get_state_private(&info->extent_ins, start, + &priv); BUG_ON(ret); extent_op = (struct pending_extent_op *) - (unsigned long)priv; + (unsigned long)priv; + + clear_extent_bits(&info->extent_ins, start, end, + EXTENT_WRITEBACK, GFP_NOFS); - clear_extent_bits(extent_ins, start, end, - EXTENT_LOCKED, GFP_NOFS); + mutex_unlock(&info->extent_ins_mutex); if (extent_op->type == PENDING_BACKREF_UPDATE) goto free_extent; + mutex_lock(&extent_root->fs_info->pinned_mutex); + ret = pin_down_bytes(trans, extent_root, start, + end + 1 - start, 0); + mutex_unlock(&extent_root->fs_info->pinned_mutex); + ret = update_block_group(trans, extent_root, start, - end + 1 - start, 0, mark_free); + end + 1 - start, 0, ret > 0); + BUG_ON(ret); kfree(extent_op); } if (ret) err = ret; + unlock_extent(extent_ins, start, end, GFP_NOFS); - if (need_resched()) { - mutex_unlock(&extent_root->fs_info->alloc_mutex); - cond_resched(); - mutex_lock(&extent_root->fs_info->alloc_mutex); - } + search = 0; + cond_resched(); } return err; } @@ -2091,11 +2123,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, extent_op->orig_generation = ref_generation; extent_op->level = (int)owner_objectid; + mutex_lock(&root->fs_info->extent_ins_mutex); set_extent_bits(&root->fs_info->pending_del, bytenr, bytenr + num_bytes - 1, - EXTENT_LOCKED, GFP_NOFS); + EXTENT_WRITEBACK, GFP_NOFS); set_state_private(&root->fs_info->pending_del, bytenr, (unsigned long)extent_op); + mutex_unlock(&root->fs_info->extent_ins_mutex); return 0; } /* if metadata always pin */ @@ -2134,11 +2168,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, { int ret; - maybe_lock_mutex(root); ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent, root_objectid, ref_generation, owner_objectid, pin); - maybe_unlock_mutex(root); return ret; } @@ -2214,12 +2246,16 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, * group thats not of the proper type, while looping this * should never happen */ + WARN_ON(!block_group); + mutex_lock(&block_group->alloc_mutex); if (unlikely(!block_group_bits(block_group, data))) goto new_group; ret = cache_block_group(root, block_group); - if (ret) + if (ret) { + mutex_unlock(&block_group->alloc_mutex); break; + } if (block_group->ro) goto new_group; @@ -2250,8 +2286,10 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, * then we just re-search this block group */ if (search_start >= start && - search_start < end) + search_start < end) { + mutex_unlock(&block_group->alloc_mutex); continue; + } /* else we go to the next block group */ goto new_group; @@ -2259,10 +2297,15 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, ins->objectid = search_start; ins->offset = num_bytes; + + btrfs_remove_free_space_lock(block_group, search_start, + num_bytes); /* we are all good, lets return */ + mutex_unlock(&block_group->alloc_mutex); break; } new_group: + mutex_unlock(&block_group->alloc_mutex); /* * Here's how this works. * loop == 0: we were searching a block group via a hint @@ -2363,7 +2406,6 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, u64 search_start = 0; u64 alloc_profile; struct btrfs_fs_info *info = root->fs_info; - struct btrfs_block_group_cache *cache; if (data) { alloc_profile = info->avail_data_alloc_bits & @@ -2419,13 +2461,6 @@ again: dump_space_info(sinfo, num_bytes); BUG(); } - cache = btrfs_lookup_block_group(root->fs_info, ins->objectid); - if (!cache) { - printk(KERN_ERR "Unable to find block group for %Lu\n", ins->objectid); - return -ENOSPC; - } - - ret = btrfs_remove_free_space(cache, ins->objectid, ins->offset); return ret; } @@ -2434,16 +2469,13 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) { struct btrfs_block_group_cache *cache; - maybe_lock_mutex(root); cache = btrfs_lookup_block_group(root->fs_info, start); if (!cache) { printk(KERN_ERR "Unable to find block group for %Lu\n", start); - maybe_unlock_mutex(root); return -ENOSPC; } btrfs_add_free_space(cache, start, len); update_reserved_extents(root, start, len, 0); - maybe_unlock_mutex(root); return 0; } @@ -2455,12 +2487,10 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans, u64 data) { int ret; - maybe_lock_mutex(root); ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, empty_size, hint_byte, search_end, ins, data); update_reserved_extents(root, ins->objectid, ins->offset, 1); - maybe_unlock_mutex(root); return ret; } @@ -2510,11 +2540,13 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, extent_op->orig_generation = 0; extent_op->level = (int)owner; + mutex_lock(&root->fs_info->extent_ins_mutex); set_extent_bits(&root->fs_info->extent_ins, ins->objectid, ins->objectid + ins->offset - 1, - EXTENT_LOCKED, GFP_NOFS); + EXTENT_WRITEBACK, GFP_NOFS); set_state_private(&root->fs_info->extent_ins, ins->objectid, (unsigned long)extent_op); + mutex_unlock(&root->fs_info->extent_ins_mutex); goto update_block; } @@ -2578,11 +2610,9 @@ int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, if (root_objectid == BTRFS_TREE_LOG_OBJECTID) return 0; - maybe_lock_mutex(root); ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, ref_generation, owner, ins); update_reserved_extents(root, ins->objectid, ins->offset, 0); - maybe_unlock_mutex(root); return ret; } @@ -2599,15 +2629,16 @@ int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans, int ret; struct btrfs_block_group_cache *block_group; - maybe_lock_mutex(root); block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); + mutex_lock(&block_group->alloc_mutex); cache_block_group(root, block_group); - ret = btrfs_remove_free_space(block_group, ins->objectid, ins->offset); + ret = btrfs_remove_free_space_lock(block_group, ins->objectid, + ins->offset); + mutex_unlock(&block_group->alloc_mutex); BUG_ON(ret); ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid, ref_generation, owner, ins); - maybe_unlock_mutex(root); return ret; } @@ -2627,8 +2658,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, { int ret; - maybe_lock_mutex(root); - ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size, empty_size, hint_byte, search_end, ins, data); @@ -2642,7 +2671,6 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans, } else { update_reserved_extents(root, ins->objectid, ins->offset, 1); } - maybe_unlock_mutex(root); return ret; } @@ -2734,12 +2762,10 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, if (disk_bytenr == 0) continue; - mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, disk_bytenr, btrfs_file_extent_disk_num_bytes(leaf, fi), leaf->start, leaf_owner, leaf_generation, key.objectid, 0); - mutex_unlock(&root->fs_info->alloc_mutex); BUG_ON(ret); atomic_inc(&root->fs_info->throttle_gen); @@ -2758,12 +2784,10 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_extent_info *info = ref->extents; for (i = 0; i < ref->nritems; i++) { - mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, info->bytenr, info->num_bytes, ref->bytenr, ref->owner, ref->generation, info->objectid, 0); - mutex_unlock(&root->fs_info->alloc_mutex); atomic_inc(&root->fs_info->throttle_gen); wake_up(&root->fs_info->transaction_throttle); @@ -2875,13 +2899,11 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, root_gen = btrfs_header_generation(parent); path->slots[*level]++; - mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, root_owner, root_gen, *level - 1, 1); BUG_ON(ret); - mutex_unlock(&root->fs_info->alloc_mutex); atomic_inc(&root->fs_info->throttle_gen); wake_up(&root->fs_info->transaction_throttle); @@ -2957,11 +2979,9 @@ out: root_owner = btrfs_header_owner(parent); root_gen = btrfs_header_generation(parent); - mutex_lock(&root->fs_info->alloc_mutex); ret = __btrfs_free_extent(trans, root, bytenr, blocksize, parent->start, root_owner, root_gen, *level, 1); - mutex_unlock(&root->fs_info->alloc_mutex); free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; *level += 1; @@ -3440,8 +3460,6 @@ static int noinline __next_ref_path(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - mutex_lock(&extent_root->fs_info->alloc_mutex); - if (first_time) { ref_path->lowest_level = -1; ref_path->current_level = -1; @@ -3498,9 +3516,7 @@ next: level--; btrfs_release_path(extent_root, path); if (need_resched()) { - mutex_unlock(&extent_root->fs_info->alloc_mutex); cond_resched(); - mutex_lock(&extent_root->fs_info->alloc_mutex); } } /* reached lowest level */ @@ -3613,15 +3629,12 @@ found: btrfs_release_path(extent_root, path); if (need_resched()) { - mutex_unlock(&extent_root->fs_info->alloc_mutex); cond_resched(); - mutex_lock(&extent_root->fs_info->alloc_mutex); } } /* reached max tree level, but no tree root found. */ BUG(); out: - mutex_unlock(&extent_root->fs_info->alloc_mutex); btrfs_free_path(path); return ret; } @@ -4556,14 +4569,6 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, struct btrfs_ref_path *ref_path) { int ret; - int needs_lock = 0; - - if (root == root->fs_info->extent_root || - root == root->fs_info->chunk_root || - root == root->fs_info->dev_root) { - needs_lock = 1; - mutex_lock(&root->fs_info->alloc_mutex); - } ret = relocate_one_path(trans, root, path, first_key, ref_path, NULL, NULL); @@ -4571,8 +4576,6 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, if (root == root->fs_info->extent_root) btrfs_extent_post_op(trans, root); - if (needs_lock) - mutex_unlock(&root->fs_info->alloc_mutex); return 0; } @@ -4584,14 +4587,12 @@ static int noinline del_extent_zero(struct btrfs_trans_handle *trans, { int ret; - mutex_lock(&extent_root->fs_info->alloc_mutex); ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); if (ret) goto out; ret = btrfs_del_item(trans, extent_root, path); out: btrfs_release_path(extent_root, path); - mutex_unlock(&extent_root->fs_info->alloc_mutex); return ret; } @@ -4627,7 +4628,6 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root, struct btrfs_key first_key; u64 prev_block = 0; - mutex_unlock(&extent_root->fs_info->alloc_mutex); trans = btrfs_start_transaction(extent_root, 1); BUG_ON(!trans); @@ -4754,7 +4754,6 @@ out: btrfs_end_transaction(trans, extent_root); kfree(new_extents); kfree(ref_path); - mutex_lock(&extent_root->fs_info->alloc_mutex); return ret; } @@ -4807,10 +4806,8 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root, spin_lock(&shrink_block_group->lock); if (btrfs_block_group_used(&shrink_block_group->item) > 0) { spin_unlock(&shrink_block_group->lock); - mutex_unlock(&root->fs_info->alloc_mutex); trans = btrfs_start_transaction(root, 1); - mutex_lock(&root->fs_info->alloc_mutex); spin_lock(&shrink_block_group->lock); new_alloc_flags = update_block_group_flags(root, @@ -4826,9 +4823,7 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root, do_chunk_alloc(trans, root->fs_info->extent_root, calc + 2 * 1024 * 1024, new_alloc_flags, force); - mutex_unlock(&root->fs_info->alloc_mutex); btrfs_end_transaction(trans, root); - mutex_lock(&root->fs_info->alloc_mutex); } else spin_unlock(&shrink_block_group->lock); return 0; @@ -4952,14 +4947,10 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) reloc_inode = create_reloc_inode(info, block_group); BUG_ON(IS_ERR(reloc_inode)); - mutex_lock(&root->fs_info->alloc_mutex); - __alloc_chunk_for_shrink(root, block_group, 1); block_group->ro = 1; block_group->space_info->total_bytes -= block_group->key.offset; - mutex_unlock(&root->fs_info->alloc_mutex); - btrfs_start_delalloc_inodes(info->tree_root); btrfs_wait_ordered_extents(info->tree_root, 0); again: @@ -4978,8 +4969,6 @@ again: btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); mutex_unlock(&root->fs_info->cleaner_mutex); - mutex_lock(&root->fs_info->alloc_mutex); - while(1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) @@ -5007,9 +4996,7 @@ next: if (progress && need_resched()) { btrfs_release_path(root, path); - mutex_unlock(&root->fs_info->alloc_mutex); cond_resched(); - mutex_lock(&root->fs_info->alloc_mutex); progress = 0; continue; } @@ -5036,7 +5023,6 @@ next: } btrfs_release_path(root, path); - mutex_unlock(&root->fs_info->alloc_mutex); if (pass == 0) { btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1); @@ -5058,8 +5044,6 @@ next: trans = btrfs_start_transaction(info->tree_root, 1); btrfs_commit_transaction(trans, info->tree_root); - mutex_lock(&root->fs_info->alloc_mutex); - spin_lock(&block_group->lock); WARN_ON(block_group->pinned > 0); WARN_ON(block_group->reserved > 0); @@ -5067,7 +5051,6 @@ next: spin_unlock(&block_group->lock); ret = 0; out: - mutex_unlock(&root->fs_info->alloc_mutex); btrfs_free_path(path); return ret; } @@ -5114,7 +5097,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) struct btrfs_block_group_cache *block_group; struct rb_node *n; - mutex_lock(&info->alloc_mutex); spin_lock(&info->block_group_cache_lock); while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group_cache, @@ -5132,7 +5114,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) kfree(block_group); } spin_unlock(&info->block_group_cache_lock); - mutex_unlock(&info->alloc_mutex); return 0; } @@ -5155,7 +5136,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (!path) return -ENOMEM; - mutex_lock(&root->fs_info->alloc_mutex); while(1) { ret = find_first_block_group(root, path, &key); if (ret > 0) { @@ -5174,6 +5154,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) } spin_lock_init(&cache->lock); + mutex_init(&cache->alloc_mutex); INIT_LIST_HEAD(&cache->list); read_extent_buffer(leaf, &cache->item, btrfs_item_ptr_offset(leaf, path->slots[0]), @@ -5201,7 +5182,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) ret = 0; error: btrfs_free_path(path); - mutex_unlock(&root->fs_info->alloc_mutex); return ret; } @@ -5214,7 +5194,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root; struct btrfs_block_group_cache *cache; - WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); extent_root = root->fs_info->extent_root; root->fs_info->last_trans_new_blockgroup = trans->transid; @@ -5226,6 +5205,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->key.objectid = chunk_offset; cache->key.offset = size; spin_lock_init(&cache->lock); + mutex_init(&cache->alloc_mutex); INIT_LIST_HEAD(&cache->list); btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY); @@ -5264,7 +5244,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_key key; int ret; - BUG_ON(!mutex_is_locked(&root->fs_info->alloc_mutex)); root = root->fs_info->extent_root; block_group = btrfs_lookup_block_group(root->fs_info, group_start); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 314041fdfa4..7503bd46819 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -938,6 +938,20 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) } EXPORT_SYMBOL(lock_extent); +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask) +{ + int err; + u64 failed_start; + + err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, + &failed_start, mask); + if (err == -EEXIST) + return 0; + return 1; +} +EXPORT_SYMBOL(try_lock_extent); + int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 86f859b87a6..283110ec4ee 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -128,6 +128,8 @@ int try_release_extent_state(struct extent_map_tree *map, gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); +int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, + gfp_t mask); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent); int __init extent_io_init(void); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 96241f01fa0..f4926c0f3c8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) { struct btrfs_free_space *right_info; struct btrfs_free_space *left_info; @@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, * are adding, if there is remove that struct and add a new one to * cover the entire range */ - spin_lock(&block_group->lock); - right_info = tree_search_offset(&block_group->free_space_offset, offset+bytes, 0, 1); left_info = tree_search_offset(&block_group->free_space_offset, @@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, if (ret) kfree(info); out: - spin_unlock(&block_group->lock); if (ret) { printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); if (ret == -EEXIST) @@ -274,13 +271,13 @@ out: return ret; } -int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +static int +__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) { struct btrfs_free_space *info; int ret = 0; - spin_lock(&block_group->lock); info = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); @@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, /* step two, insert a new info struct to cover anything * before the hole */ - spin_unlock(&block_group->lock); - ret = btrfs_add_free_space(block_group, old_start, - offset - old_start); + ret = __btrfs_add_free_space(block_group, old_start, + offset - old_start); BUG_ON(ret); - goto out_nolock; } else { WARN_ON(1); } out: - spin_unlock(&block_group->lock); -out_nolock: + return ret; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + int ret; + struct btrfs_free_space *sp; + + mutex_lock(&block_group->alloc_mutex); + ret = __btrfs_add_free_space(block_group, offset, bytes); + sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); + BUG_ON(!sp); + mutex_unlock(&block_group->alloc_mutex); + + return ret; +} + +int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + int ret; + struct btrfs_free_space *sp; + + ret = __btrfs_add_free_space(block_group, offset, bytes); + sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); + BUG_ON(!sp); + + return ret; +} + +int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + int ret = 0; + + mutex_lock(&block_group->alloc_mutex); + ret = __btrfs_remove_free_space(block_group, offset, bytes); + mutex_unlock(&block_group->alloc_mutex); + + return ret; +} + +int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + int ret; + + ret = __btrfs_remove_free_space(block_group, offset, bytes); + return ret; } @@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) struct btrfs_free_space *info; struct rb_node *node; - spin_lock(&block_group->lock); + mutex_lock(&block_group->alloc_mutex); while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { info = rb_entry(node, struct btrfs_free_space, bytes_index); unlink_free_space(block_group, info); kfree(info); if (need_resched()) { - spin_unlock(&block_group->lock); + mutex_unlock(&block_group->alloc_mutex); cond_resched(); - spin_lock(&block_group->lock); + mutex_lock(&block_group->alloc_mutex); } } - spin_unlock(&block_group->lock); + mutex_unlock(&block_group->alloc_mutex); } struct btrfs_free_space *btrfs_find_free_space_offset(struct @@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct { struct btrfs_free_space *ret; - spin_lock(&block_group->lock); + mutex_lock(&block_group->alloc_mutex); ret = tree_search_offset(&block_group->free_space_offset, offset, bytes, 0); - spin_unlock(&block_group->lock); + mutex_unlock(&block_group->alloc_mutex); return ret; } @@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct { struct btrfs_free_space *ret; - spin_lock(&block_group->lock); + mutex_lock(&block_group->alloc_mutex); ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); - spin_unlock(&block_group->lock); + mutex_unlock(&block_group->alloc_mutex); return ret; } @@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) { - struct btrfs_free_space *ret; + struct btrfs_free_space *ret = NULL; - spin_lock(&block_group->lock); ret = tree_search_offset(&block_group->free_space_offset, offset, bytes, 0); if (!ret) ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); - spin_unlock(&block_group->lock); - return ret; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 1df67129cc3..48b455fdaac 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -670,7 +670,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, atomic_dec(&root->fs_info->throttles); wake_up(&root->fs_info->transaction_throttle); - mutex_lock(&root->fs_info->alloc_mutex); num_bytes -= btrfs_root_used(&dirty->root->root_item); bytes_used = btrfs_root_used(&root->root_item); if (num_bytes) { @@ -678,7 +677,6 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, btrfs_set_root_used(&root->root_item, bytes_used - num_bytes); } - mutex_unlock(&root->fs_info->alloc_mutex); ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key); if (ret) { diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 6f57d0889b1..a6a3956cedf 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -125,9 +125,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, if (is_extent) btrfs_extent_post_op(trans, root); out: - if (is_extent) - mutex_unlock(&root->fs_info->alloc_mutex); - if (path) btrfs_free_path(path); if (ret == -EAGAIN) { diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index e6d579053a4..835daed5561 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -271,10 +271,10 @@ static int process_one_buffer(struct btrfs_root *log, struct walk_control *wc, u64 gen) { if (wc->pin) { - mutex_lock(&log->fs_info->alloc_mutex); + mutex_lock(&log->fs_info->pinned_mutex); btrfs_update_pinned_extents(log->fs_info->extent_root, eb->start, eb->len, 1); - mutex_unlock(&log->fs_info->alloc_mutex); + mutex_unlock(&log->fs_info->pinned_mutex); } if (btrfs_buffer_uptodate(eb, gen)) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7db4cfd03a9..cbb9bb31431 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -58,14 +58,12 @@ void btrfs_unlock_volumes(void) static void lock_chunks(struct btrfs_root *root) { - mutex_lock(&root->fs_info->alloc_mutex); mutex_lock(&root->fs_info->chunk_mutex); } static void unlock_chunks(struct btrfs_root *root) { mutex_unlock(&root->fs_info->chunk_mutex); - mutex_unlock(&root->fs_info->alloc_mutex); } int btrfs_cleanup_fs_uuids(void) -- cgit v1.2.3-70-g09d2 From 6643558db29006825dbb10012b3f8890aca4bcd5 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Thu, 30 Oct 2008 14:19:50 -0400 Subject: Btrfs: Fix bookend extent race v2 When dropping middle part of an extent, btrfs_drop_extents truncates the extent at first, then inserts a bookend extent. Since truncation and insertion can't be done atomically, there is a small period that the bookend extent isn't in the tree. This causes problem for functions that search the tree for file extent item. The way to fix this is lock the range of the bookend extent before truncation. Signed-off-by: Yan Zheng --- fs/btrfs/extent-tree.c | 28 +++++++++++++--------------- fs/btrfs/extent_io.c | 6 +++++- fs/btrfs/file.c | 31 +++++++++++++++++++++++++------ fs/btrfs/inode.c | 10 ---------- 4 files changed, 43 insertions(+), 32 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index fada9c22a02..535cee47fcf 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3379,11 +3379,13 @@ static int noinline relocate_data_extent(struct inode *reloc_inode, struct btrfs_root *root = BTRFS_I(reloc_inode)->root; struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; struct extent_map *em; + u64 start = extent_key->objectid - offset; + u64 end = start + extent_key->offset - 1; em = alloc_extent_map(GFP_NOFS); BUG_ON(!em || IS_ERR(em)); - em->start = extent_key->objectid - offset; + em->start = start; em->len = extent_key->offset; em->block_len = extent_key->offset; em->block_start = extent_key->objectid; @@ -3391,7 +3393,7 @@ static int noinline relocate_data_extent(struct inode *reloc_inode, set_bit(EXTENT_FLAG_PINNED, &em->flags); /* setup extent map to cheat btrfs_readpage */ - mutex_lock(&BTRFS_I(reloc_inode)->extent_mutex); + lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); while (1) { int ret; spin_lock(&em_tree->lock); @@ -3401,13 +3403,11 @@ static int noinline relocate_data_extent(struct inode *reloc_inode, free_extent_map(em); break; } - btrfs_drop_extent_cache(reloc_inode, em->start, - em->start + em->len - 1, 0); + btrfs_drop_extent_cache(reloc_inode, start, end, 0); } - mutex_unlock(&BTRFS_I(reloc_inode)->extent_mutex); + unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); - return relocate_inode_pages(reloc_inode, extent_key->objectid - offset, - extent_key->offset); + return relocate_inode_pages(reloc_inode, start, extent_key->offset); } struct btrfs_ref_path { @@ -3831,7 +3831,6 @@ next: * the file extent item was modified by someone * before the extent got locked. */ - mutex_unlock(&BTRFS_I(inode)->extent_mutex); unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); extent_locked = 0; @@ -3896,8 +3895,12 @@ next: lock_start = key.offset; lock_end = lock_start + num_bytes - 1; } else { - BUG_ON(lock_start != key.offset); - BUG_ON(lock_end - lock_start + 1 < num_bytes); + if (lock_start > key.offset || + lock_end + 1 < key.offset + num_bytes) { + unlock_extent(&BTRFS_I(inode)->io_tree, + lock_start, lock_end, GFP_NOFS); + extent_locked = 0; + } } if (!inode) { @@ -3951,7 +3954,6 @@ next: if (ordered) btrfs_put_ordered_extent(ordered); - mutex_lock(&BTRFS_I(inode)->extent_mutex); extent_locked = 1; continue; } @@ -4073,7 +4075,6 @@ next: } if (extent_locked) { - mutex_unlock(&BTRFS_I(inode)->extent_mutex); unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); extent_locked = 0; @@ -4091,7 +4092,6 @@ out: if (inode) { mutex_unlock(&inode->i_mutex); if (extent_locked) { - mutex_unlock(&BTRFS_I(inode)->extent_mutex); unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, lock_end, GFP_NOFS); } @@ -4180,10 +4180,8 @@ static int noinline invalidate_extent_cache(struct btrfs_root *root, lock_extent(&BTRFS_I(inode)->io_tree, key.offset, key.offset + num_bytes - 1, GFP_NOFS); - mutex_lock(&BTRFS_I(inode)->extent_mutex); btrfs_drop_extent_cache(inode, key.offset, key.offset + num_bytes - 1, 1); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, key.offset + num_bytes - 1, GFP_NOFS); cond_resched(); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7503bd46819..65a0583027e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -946,8 +946,12 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1, &failed_start, mask); - if (err == -EEXIST) + if (err == -EEXIST) { + if (failed_start > start) + clear_extent_bit(tree, start, failed_start - 1, + EXTENT_LOCKED, 1, 0, mask); return 0; + } return 1; } EXPORT_SYMBOL(try_lock_extent); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b8a7637e14a..1a0510ad030 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -364,6 +364,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { u64 extent_end = 0; + u64 locked_end = end; u64 search_start = start; u64 leaf_start; u64 ram_bytes = 0; @@ -479,12 +480,6 @@ next_slot: goto next_slot; } - if (found_inline) { - u64 mask = root->sectorsize - 1; - search_start = (extent_end + mask) & ~mask; - } else - search_start = extent_end; - if (end <= extent_end && start >= key.offset && found_inline) *hint_byte = EXTENT_MAP_INLINE; @@ -501,6 +496,26 @@ next_slot: if (found_inline && start <= key.offset) keep = 1; } + + if (bookend && found_extent && locked_end < extent_end) { + ret = try_lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + if (!ret) { + btrfs_release_path(root, path); + lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + locked_end = extent_end; + continue; + } + locked_end = extent_end; + } + + if (found_inline) { + u64 mask = root->sectorsize - 1; + search_start = (extent_end + mask) & ~mask; + } else + search_start = extent_end; + /* truncate existing extent */ if (start > key.offset) { u64 new_num; @@ -638,6 +653,10 @@ next_slot: } out: btrfs_free_path(path); + if (locked_end > end) { + unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, + GFP_NOFS); + } btrfs_check_file(root, inode); return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8254d6fa691..e8511d14b11 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -246,7 +246,6 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, return 1; } - mutex_lock(&BTRFS_I(inode)->extent_mutex); ret = btrfs_drop_extents(trans, root, inode, start, aligned_end, aligned_end, &hint_byte); BUG_ON(ret); @@ -258,7 +257,6 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, compressed_pages); BUG_ON(ret); btrfs_drop_extent_cache(inode, start, aligned_end, 0); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); return 0; } @@ -437,9 +435,7 @@ again: BUG_ON(disk_num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy)); - mutex_lock(&BTRFS_I(inode)->extent_mutex); btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); while(disk_num_bytes > 0) { unsigned long min_bytes; @@ -477,8 +473,6 @@ again: em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; - - mutex_lock(&BTRFS_I(inode)->extent_mutex); set_bit(EXTENT_FLAG_PINNED, &em->flags); if (will_compress) @@ -495,7 +489,6 @@ again: btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); } - mutex_unlock(&BTRFS_I(inode)->extent_mutex); cur_alloc_size = ins.offset; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, @@ -1016,8 +1009,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) INIT_LIST_HEAD(&list); - mutex_lock(&BTRFS_I(inode)->extent_mutex); - ret = btrfs_drop_extents(trans, root, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -1059,7 +1050,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) btrfs_drop_extent_cache(inode, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, 0); - mutex_unlock(&BTRFS_I(inode)->extent_mutex); ins.objectid = ordered_extent->start; ins.offset = ordered_extent->disk_len; -- cgit v1.2.3-70-g09d2 From d899e05215178fed903ad0e7fc1cb4d8e0cc0a88 Mon Sep 17 00:00:00 2001 From: Yan Zheng Date: Thu, 30 Oct 2008 14:25:28 -0400 Subject: Btrfs: Add fallocate support v2 This patch updates btrfs-progs for fallocate support. fallocate is a little different in Btrfs because we need to tell the COW system that a given preallocated extent doesn't need to be cow'd as long as there are no snapshots of it. This leverages the -o nodatacow checks. Signed-off-by: Yan Zheng --- fs/btrfs/ctree.h | 9 +- fs/btrfs/extent-tree.c | 65 +++++----- fs/btrfs/extent_io.c | 12 +- fs/btrfs/extent_map.h | 1 + fs/btrfs/file.c | 245 +++++++++++++++++++++++++++++++++++- fs/btrfs/inode.c | 323 +++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ioctl.c | 3 +- fs/btrfs/ordered-data.c | 5 +- fs/btrfs/ordered-data.h | 4 +- fs/btrfs/tree-log.c | 13 +- 10 files changed, 563 insertions(+), 117 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8bf6a085a73..d5ba3d1aaf9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -462,8 +462,9 @@ struct btrfs_root_item { u8 level; } __attribute__ ((__packed__)); -#define BTRFS_FILE_EXTENT_REG 0 -#define BTRFS_FILE_EXTENT_INLINE 1 +#define BTRFS_FILE_EXTENT_INLINE 0 +#define BTRFS_FILE_EXTENT_REG 1 +#define BTRFS_FILE_EXTENT_PREALLOC 2 struct btrfs_file_extent_item { /* @@ -868,6 +869,7 @@ struct btrfs_root { #define BTRFS_INODE_NODATACOW (1 << 1) #define BTRFS_INODE_READONLY (1 << 2) #define BTRFS_INODE_NOCOMPRESS (1 << 3) +#define BTRFS_INODE_PREALLOC (1 << 4) #define btrfs_clear_flag(inode, flag) (BTRFS_I(inode)->flags &= \ ~BTRFS_INODE_##flag) #define btrfs_set_flag(inode, flag) (BTRFS_I(inode)->flags |= \ @@ -1924,6 +1926,9 @@ extern struct file_operations btrfs_file_operations; int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_block); +int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); /* tree-defrag.c */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1eb69a91b72..8af39521eb7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2147,6 +2147,9 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, total_needed += empty_size; block_group = btrfs_lookup_block_group(root->fs_info, search_start); + if (!block_group) + block_group = btrfs_lookup_first_block_group(root->fs_info, + search_start); space_info = __find_space_info(root->fs_info, data); down_read(&space_info->groups_sem); @@ -3426,9 +3429,7 @@ walk_down: next: level--; btrfs_release_path(extent_root, path); - if (need_resched()) { - cond_resched(); - } + cond_resched(); } /* reached lowest level */ ret = 1; @@ -3539,9 +3540,7 @@ found: } btrfs_release_path(extent_root, path); - if (need_resched()) { - cond_resched(); - } + cond_resched(); } /* reached max tree level, but no tree root found. */ BUG(); @@ -3654,8 +3653,9 @@ static int noinline get_new_locations(struct inode *reloc_inode, exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, fi); - WARN_ON(exts[nr].offset > 0); - WARN_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); + BUG_ON(exts[nr].offset > 0); + BUG_ON(exts[nr].compression || exts[nr].encryption); + BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); cur_pos += exts[nr].num_bytes; nr++; @@ -3709,6 +3709,7 @@ static int noinline replace_one_extent(struct btrfs_trans_handle *trans, u32 nritems; int nr_scaned = 0; int extent_locked = 0; + int extent_type; int ret; memcpy(&key, leaf_key, sizeof(key)); @@ -3781,8 +3782,9 @@ next: } fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); - if ((btrfs_file_extent_type(leaf, fi) != - BTRFS_FILE_EXTENT_REG) || + extent_type = btrfs_file_extent_type(leaf, fi); + if ((extent_type != BTRFS_FILE_EXTENT_REG && + extent_type != BTRFS_FILE_EXTENT_PREALLOC) || (btrfs_file_extent_disk_bytenr(leaf, fi) != extent_key->objectid)) { path->slots[0]++; @@ -3865,16 +3867,10 @@ next: if (nr_extents == 1) { /* update extent pointer in place */ - btrfs_set_file_extent_generation(leaf, fi, - trans->transid); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extents[0].disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extents[0].disk_num_bytes); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extents[0].ram_bytes); - ext_offset += new_extents[0].offset; - btrfs_set_file_extent_offset(leaf, fi, ext_offset); btrfs_mark_buffer_dirty(leaf); btrfs_drop_extent_cache(inode, key.offset, @@ -3901,6 +3897,8 @@ next: btrfs_release_path(root, path); key.offset += num_bytes; } else { + BUG_ON(1); +#if 0 u64 alloc_hint; u64 extent_len; int i; @@ -3977,6 +3975,7 @@ next: break; } BUG_ON(i >= nr_extents); +#endif } if (extent_locked) { @@ -4156,15 +4155,10 @@ static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, ref->extents[ext_index].bytenr = new_extent->disk_bytenr; ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; - btrfs_set_file_extent_generation(leaf, fi, trans->transid); - btrfs_set_file_extent_ram_bytes(leaf, fi, - new_extent->ram_bytes); btrfs_set_file_extent_disk_bytenr(leaf, fi, new_extent->disk_bytenr); btrfs_set_file_extent_disk_num_bytes(leaf, fi, new_extent->disk_num_bytes); - new_extent->offset += btrfs_file_extent_offset(leaf, fi); - btrfs_set_file_extent_offset(leaf, fi, new_extent->offset); btrfs_mark_buffer_dirty(leaf); ret = btrfs_inc_extent_ref(trans, root, @@ -4625,12 +4619,15 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root, */ if (!new_extents) { u64 group_start = group->key.objectid; + new_extents = kmalloc(sizeof(*new_extents), + GFP_NOFS); + nr_extents = 1; ret = get_new_locations(reloc_inode, extent_key, - group_start, 0, + group_start, 1, &new_extents, &nr_extents); - if (ret < 0) + if (ret) goto out; } btrfs_record_root_in_trans(found_root); @@ -4762,7 +4759,8 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, btrfs_set_inode_generation(leaf, item, 1); btrfs_set_inode_size(leaf, item, size); btrfs_set_inode_mode(leaf, item, S_IFREG | 0600); - btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM); + btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM | + BTRFS_INODE_NOCOMPRESS); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(root, path); out: @@ -4835,6 +4833,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) struct inode *reloc_inode; struct btrfs_block_group_cache *block_group; struct btrfs_key key; + u64 skipped; u64 cur_byte; u64 total_found; u32 nritems; @@ -4864,6 +4863,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) btrfs_start_delalloc_inodes(info->tree_root); btrfs_wait_ordered_extents(info->tree_root, 0); again: + skipped = 0; total_found = 0; progress = 0; key.objectid = block_group->key.objectid; @@ -4926,6 +4926,8 @@ next: ret = relocate_one_extent(root, path, &key, block_group, reloc_inode, pass); BUG_ON(ret < 0); + if (ret > 0) + skipped++; key.objectid = cur_byte; key.type = 0; @@ -4944,6 +4946,11 @@ next: printk("btrfs found %llu extents in pass %d\n", (unsigned long long)total_found, pass); pass++; + if (total_found == skipped && pass > 2) { + iput(reloc_inode); + reloc_inode = create_reloc_inode(info, block_group); + pass = 0; + } goto again; } @@ -5011,17 +5018,17 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { block_group = rb_entry(n, struct btrfs_block_group_cache, cache_node); - - spin_unlock(&info->block_group_cache_lock); - btrfs_remove_free_space_cache(block_group); - spin_lock(&info->block_group_cache_lock); - rb_erase(&block_group->cache_node, &info->block_group_cache_tree); + spin_unlock(&info->block_group_cache_lock); + + btrfs_remove_free_space_cache(block_group); down_write(&block_group->space_info->groups_sem); list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); kfree(block_group); + + spin_lock(&info->block_group_cache_lock); } spin_unlock(&info->block_group_cache_lock); return 0; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 65a0583027e..eb3c12e7bea 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2015,6 +2015,8 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } bdev = em->bdev; block_start = em->block_start; + if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) + block_start = EXTENT_MAP_HOLE; free_extent_map(em); em = NULL; @@ -2769,14 +2771,18 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, struct inode *inode = mapping->host; u64 start = iblock << inode->i_blkbits; sector_t sector = 0; + size_t blksize = (1 << inode->i_blkbits); struct extent_map *em; - em = get_extent(inode, NULL, 0, start, (1 << inode->i_blkbits), 0); + lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, + GFP_NOFS); + em = get_extent(inode, NULL, 0, start, blksize, 0); + unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, + GFP_NOFS); if (!em || IS_ERR(em)) return 0; - if (em->block_start == EXTENT_MAP_INLINE || - em->block_start == EXTENT_MAP_HOLE) + if (em->block_start > EXTENT_MAP_LAST_BYTE) goto out; sector = (em->block_start + start - em->start) >> inode->i_blkbits; diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index e693e1b4ac4..accfedaeb51 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -12,6 +12,7 @@ #define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */ #define EXTENT_FLAG_COMPRESSED 1 #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */ +#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ struct extent_map { struct rb_node rb_node; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 1a0510ad030..238a8e215eb 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -381,7 +381,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, int keep; int slot; int bookend; - int found_type; + int found_type = 0; int found_extent; int found_inline; int recow; @@ -442,7 +442,8 @@ next_slot: extent); other_encoding = btrfs_file_extent_other_encoding(leaf, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = btrfs_file_extent_disk_bytenr(leaf, extent); @@ -609,8 +610,7 @@ next_slot: */ btrfs_set_file_extent_ram_bytes(leaf, extent, ram_bytes); - btrfs_set_file_extent_type(leaf, extent, - BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_type(leaf, extent, found_type); btrfs_mark_buffer_dirty(path->nodes[0]); @@ -661,6 +661,243 @@ out: return ret; } +static int extent_mergeable(struct extent_buffer *leaf, int slot, + u64 objectid, u64 bytenr, u64 *start, u64 *end) +{ + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 extent_end; + + if (slot < 0 || slot >= btrfs_header_nritems(leaf)) + return 0; + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY) + return 0; + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG || + btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr || + btrfs_file_extent_compression(leaf, fi) || + btrfs_file_extent_encryption(leaf, fi) || + btrfs_file_extent_other_encoding(leaf, fi)) + return 0; + + extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + if ((*start && *start != key.offset) || (*end && *end != extent_end)) + return 0; + + *start = key.offset; + *end = extent_end; + return 1; +} + +/* + * Mark extent in the range start - end as written. + * + * This changes extent type from 'pre-allocated' to 'regular'. If only + * part of extent is marked as written, the extent will be split into + * two or three. + */ +int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *inode, u64 start, u64 end) +{ + struct extent_buffer *leaf; + struct btrfs_path *path; + struct btrfs_file_extent_item *fi; + struct btrfs_key key; + u64 bytenr; + u64 num_bytes; + u64 extent_end; + u64 extent_offset; + u64 other_start; + u64 other_end; + u64 split = start; + u64 locked_end = end; + int extent_type; + int split_end = 1; + int ret; + + btrfs_drop_extent_cache(inode, start, end - 1, 0); + + path = btrfs_alloc_path(); + BUG_ON(!path); +again: + key.objectid = inode->i_ino; + key.type = BTRFS_EXTENT_DATA_KEY; + if (split == start) + key.offset = split; + else + key.offset = split - 1; + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0 && path->slots[0] > 0) + path->slots[0]--; + + leaf = path->nodes[0]; + btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); + BUG_ON(key.objectid != inode->i_ino || + key.type != BTRFS_EXTENT_DATA_KEY); + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + extent_type = btrfs_file_extent_type(leaf, fi); + BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC); + extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); + BUG_ON(key.offset > start || extent_end < end); + + bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); + num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); + extent_offset = btrfs_file_extent_offset(leaf, fi); + + if (key.offset == start) + split = end; + + if (key.offset == start && extent_end == end) { + int del_nr = 0; + int del_slot = 0; + u64 leaf_owner = btrfs_header_owner(leaf); + u64 leaf_gen = btrfs_header_generation(leaf); + other_start = end; + other_end = 0; + if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + extent_end = other_end; + del_slot = path->slots[0] + 1; + del_nr++; + ret = btrfs_free_extent(trans, root, bytenr, num_bytes, + leaf->start, leaf_owner, + leaf_gen, inode->i_ino, 0); + BUG_ON(ret); + } + other_start = 0; + other_end = start; + if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + key.offset = other_start; + del_slot = path->slots[0]; + del_nr++; + ret = btrfs_free_extent(trans, root, bytenr, num_bytes, + leaf->start, leaf_owner, + leaf_gen, inode->i_ino, 0); + BUG_ON(ret); + } + split_end = 0; + if (del_nr == 0) { + btrfs_set_file_extent_type(leaf, fi, + BTRFS_FILE_EXTENT_REG); + goto done; + } + + fi = btrfs_item_ptr(leaf, del_slot - 1, + struct btrfs_file_extent_item); + btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG); + btrfs_set_file_extent_num_bytes(leaf, fi, + extent_end - key.offset); + btrfs_mark_buffer_dirty(leaf); + + ret = btrfs_del_items(trans, root, path, del_slot, del_nr); + BUG_ON(ret); + goto done; + } else if (split == start) { + if (locked_end < extent_end) { + ret = try_lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + if (!ret) { + btrfs_release_path(root, path); + lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, GFP_NOFS); + locked_end = extent_end; + goto again; + } + locked_end = extent_end; + } + btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset); + extent_offset += split - key.offset; + } else { + BUG_ON(key.offset != start); + btrfs_set_file_extent_offset(leaf, fi, extent_offset + + split - key.offset); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split); + key.offset = split; + btrfs_set_item_key_safe(trans, root, path, &key); + extent_end = split; + } + + if (extent_end == end) { + split_end = 0; + extent_type = BTRFS_FILE_EXTENT_REG; + } + if (extent_end == end && split == start) { + other_start = end; + other_end = 0; + if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino, + bytenr, &other_start, &other_end)) { + path->slots[0]++; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + key.offset = split; + btrfs_set_item_key_safe(trans, root, path, &key); + btrfs_set_file_extent_offset(leaf, fi, extent_offset); + btrfs_set_file_extent_num_bytes(leaf, fi, + other_end - split); + goto done; + } + } + if (extent_end == end && split == end) { + other_start = 0; + other_end = start; + if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino, + bytenr, &other_start, &other_end)) { + path->slots[0]--; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - + other_start); + goto done; + } + } + + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + + key.offset = start; + ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi)); + BUG_ON(ret); + + leaf = path->nodes[0]; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_type(leaf, fi, extent_type); + btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr); + btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_offset(leaf, fi, extent_offset); + btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset); + btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_compression(leaf, fi, 0); + btrfs_set_file_extent_encryption(leaf, fi, 0); + btrfs_set_file_extent_other_encoding(leaf, fi, 0); + + ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, + leaf->start, root->root_key.objectid, + trans->transid, inode->i_ino); + BUG_ON(ret); +done: + btrfs_mark_buffer_dirty(leaf); + btrfs_release_path(root, path); + if (split_end && split == start) { + split = end; + goto again; + } + if (locked_end > end) { + unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1, + GFP_NOFS); + } + btrfs_free_path(path); + return 0; +} + /* * this gets pages into the page cache and locks them down, it also properly * waits for data=ordered extents to finish before allowing the pages to be diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3e6f0568fdb..789c376157f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -587,7 +588,7 @@ free_pages_out: * blocks on disk */ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) + u64 start, u64 end, int *page_started, int force) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -602,6 +603,7 @@ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, u64 num_bytes; int extent_type; int ret; + int type; int nocow; int check_prev = 1; @@ -654,7 +656,8 @@ next_slot: struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); - if (extent_type == BTRFS_FILE_EXTENT_REG) { + if (extent_type == BTRFS_FILE_EXTENT_REG || + extent_type == BTRFS_FILE_EXTENT_PREALLOC) { struct btrfs_block_group_cache *block_group; disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); extent_end = found_key.offset + @@ -669,6 +672,8 @@ next_slot: goto out_check; if (disk_bytenr == 0) goto out_check; + if (extent_type == BTRFS_FILE_EXTENT_REG && !force) + goto out_check; if (btrfs_cross_ref_exist(trans, root, disk_bytenr)) goto out_check; block_group = btrfs_lookup_block_group(root->fs_info, @@ -709,10 +714,39 @@ out_check: disk_bytenr += cur_offset - found_key.offset; num_bytes = min(end + 1, extent_end) - cur_offset; + if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) { + struct extent_map *em; + struct extent_map_tree *em_tree; + em_tree = &BTRFS_I(inode)->extent_tree; + em = alloc_extent_map(GFP_NOFS); + em->start = cur_offset; + em->len = num_bytes; + em->block_len = num_bytes; + em->block_start = disk_bytenr; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + while (1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, em->start, + em->start + em->len - 1, 0); + } + type = BTRFS_ORDERED_PREALLOC; + } else { + type = BTRFS_ORDERED_NOCOW; + } ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, - num_bytes, num_bytes, - BTRFS_ORDERED_NOCOW); + num_bytes, num_bytes, type); + BUG_ON(ret); + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, + cur_offset, cur_offset + num_bytes - 1, + locked_page, 0, 0, 0); cur_offset = extent_end; if (cur_offset > end) break; @@ -745,7 +779,10 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started); + page_started, 0); + else if (btrfs_test_flag(inode, PREALLOC)) + ret = run_delalloc_nocow(inode, locked_page, start, end, + page_started, 1); else ret = cow_file_range(inode, locked_page, start, end, page_started); @@ -1006,6 +1043,63 @@ int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; } +static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, + struct inode *inode, u64 file_pos, + u64 disk_bytenr, u64 disk_num_bytes, + u64 num_bytes, u64 ram_bytes, + u8 compression, u8 encryption, + u16 other_encoding, int extent_type) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_file_extent_item *fi; + struct btrfs_path *path; + struct extent_buffer *leaf; + struct btrfs_key ins; + u64 hint; + int ret; + + path = btrfs_alloc_path(); + BUG_ON(!path); + + ret = btrfs_drop_extents(trans, root, inode, file_pos, + file_pos + num_bytes, file_pos, &hint); + BUG_ON(ret); + + ins.objectid = inode->i_ino; + ins.offset = file_pos; + ins.type = BTRFS_EXTENT_DATA_KEY; + ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); + BUG_ON(ret); + leaf = path->nodes[0]; + fi = btrfs_item_ptr(leaf, path->slots[0], + struct btrfs_file_extent_item); + btrfs_set_file_extent_generation(leaf, fi, trans->transid); + btrfs_set_file_extent_type(leaf, fi, extent_type); + btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr); + btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes); + btrfs_set_file_extent_offset(leaf, fi, 0); + btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes); + btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes); + btrfs_set_file_extent_compression(leaf, fi, compression); + btrfs_set_file_extent_encryption(leaf, fi, encryption); + btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding); + btrfs_mark_buffer_dirty(leaf); + + inode_add_bytes(inode, num_bytes); + btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0); + + ins.objectid = disk_bytenr; + ins.offset = disk_num_bytes; + ins.type = BTRFS_EXTENT_ITEM_KEY; + ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, + root->root_key.objectid, + trans->transid, inode->i_ino, &ins); + BUG_ON(ret); + + btrfs_free_path(path); + return 0; +} + /* as ordered data IO finishes, this gets called so we can finish * an ordered extent if the range of bytes in the file it covers are * fully written. @@ -1016,12 +1110,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_trans_handle *trans; struct btrfs_ordered_extent *ordered_extent; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; - struct btrfs_file_extent_item *extent_item; - struct btrfs_path *path = NULL; - struct extent_buffer *leaf; - u64 alloc_hint = 0; - struct list_head list; - struct btrfs_key ins; + int compressed = 0; int ret; ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1); @@ -1035,67 +1124,30 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) goto nocow; - path = btrfs_alloc_path(); - BUG_ON(!path); - lock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); - INIT_LIST_HEAD(&list); - - ret = btrfs_drop_extents(trans, root, inode, - ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len, - ordered_extent->file_offset, &alloc_hint); - BUG_ON(ret); - - ins.objectid = inode->i_ino; - ins.offset = ordered_extent->file_offset; - ins.type = BTRFS_EXTENT_DATA_KEY; - ret = btrfs_insert_empty_item(trans, root, path, &ins, - sizeof(*extent_item)); - BUG_ON(ret); - leaf = path->nodes[0]; - extent_item = btrfs_item_ptr(leaf, path->slots[0], - struct btrfs_file_extent_item); - btrfs_set_file_extent_generation(leaf, extent_item, trans->transid); - btrfs_set_file_extent_type(leaf, extent_item, BTRFS_FILE_EXTENT_REG); - btrfs_set_file_extent_disk_bytenr(leaf, extent_item, - ordered_extent->start); - btrfs_set_file_extent_disk_num_bytes(leaf, extent_item, - ordered_extent->disk_len); - btrfs_set_file_extent_offset(leaf, extent_item, 0); - if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) - btrfs_set_file_extent_compression(leaf, extent_item, 1); - else - btrfs_set_file_extent_compression(leaf, extent_item, 0); - btrfs_set_file_extent_encryption(leaf, extent_item, 0); - btrfs_set_file_extent_other_encoding(leaf, extent_item, 0); - - /* ram bytes = extent_num_bytes for now */ - btrfs_set_file_extent_num_bytes(leaf, extent_item, - ordered_extent->len); - btrfs_set_file_extent_ram_bytes(leaf, extent_item, - ordered_extent->len); - btrfs_mark_buffer_dirty(leaf); - - btrfs_drop_extent_cache(inode, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1, 0); - - ins.objectid = ordered_extent->start; - ins.offset = ordered_extent->disk_len; - ins.type = BTRFS_EXTENT_ITEM_KEY; - ret = btrfs_alloc_reserved_extent(trans, root, leaf->start, - root->root_key.objectid, - trans->transid, inode->i_ino, &ins); - BUG_ON(ret); - btrfs_release_path(root, path); - - inode_add_bytes(inode, ordered_extent->len); + compressed = 1; + if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { + BUG_ON(compressed); + ret = btrfs_mark_extent_written(trans, root, inode, + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len); + BUG_ON(ret); + } else { + ret = insert_reserved_file_extent(trans, inode, + ordered_extent->file_offset, + ordered_extent->start, + ordered_extent->disk_len, + ordered_extent->len, + ordered_extent->len, + compressed, 0, 0, + BTRFS_FILE_EXTENT_REG); + BUG_ON(ret); + } unlock_extent(io_tree, ordered_extent->file_offset, ordered_extent->file_offset + ordered_extent->len - 1, GFP_NOFS); @@ -1115,8 +1167,6 @@ nocow: btrfs_put_ordered_extent(ordered_extent); btrfs_end_transaction(trans, root); - if (path) - btrfs_free_path(path); return 0; } @@ -3488,7 +3538,8 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; compressed = btrfs_file_extent_compression(leaf, item); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = extent_start + btrfs_file_extent_num_bytes(leaf, item); } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { @@ -3521,7 +3572,8 @@ again: goto not_found_em; } - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { em->start = extent_start; em->len = extent_end - extent_start; bytenr = btrfs_file_extent_disk_bytenr(leaf, item); @@ -3538,6 +3590,8 @@ again: bytenr += btrfs_file_extent_offset(leaf, item); em->block_start = bytenr; em->block_len = em->len; + if (found_type == BTRFS_FILE_EXTENT_PREALLOC) + set_bit(EXTENT_FLAG_PREALLOC, &em->flags); } goto insert; } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { @@ -3969,6 +4023,7 @@ int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry, if (error) return error; + atomic_inc(&inode->i_count); d_instantiate(dentry, inode); return 0; } @@ -4318,6 +4373,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_symlink_inode_operations; inode->i_mapping->a_ops = &btrfs_symlink_aops; inode->i_mapping->backing_dev_info = &root->fs_info->bdi; + inode_set_bytes(inode, name_len); btrfs_i_size_write(inode, name_len - 1); err = btrfs_update_inode(trans, root, inode); if (err) @@ -4335,6 +4391,130 @@ out_fail: return err; } +static int prealloc_file_range(struct inode *inode, u64 start, u64 end, + u64 alloc_hint, int mode) +{ + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_key ins; + u64 alloc_size; + u64 cur_offset = start; + u64 num_bytes = end - start; + int ret = 0; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + + while (num_bytes > 0) { + alloc_size = min(num_bytes, root->fs_info->max_extent); + ret = btrfs_reserve_extent(trans, root, alloc_size, + root->sectorsize, 0, alloc_hint, + (u64)-1, &ins, 1); + if (ret) { + WARN_ON(1); + goto out; + } + ret = insert_reserved_file_extent(trans, inode, + cur_offset, ins.objectid, + ins.offset, ins.offset, + ins.offset, 0, 0, 0, + BTRFS_FILE_EXTENT_PREALLOC); + BUG_ON(ret); + num_bytes -= ins.offset; + cur_offset += ins.offset; + alloc_hint = ins.objectid + ins.offset; + } +out: + if (cur_offset > start) { + inode->i_ctime = CURRENT_TIME; + btrfs_set_flag(inode, PREALLOC); + if (!(mode & FALLOC_FL_KEEP_SIZE) && + cur_offset > i_size_read(inode)) + btrfs_i_size_write(inode, cur_offset); + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + } + + btrfs_end_transaction(trans, root); + return ret; +} + +static long btrfs_fallocate(struct inode *inode, int mode, + loff_t offset, loff_t len) +{ + u64 cur_offset; + u64 last_byte; + u64 alloc_start; + u64 alloc_end; + u64 alloc_hint = 0; + u64 mask = BTRFS_I(inode)->root->sectorsize - 1; + struct extent_map *em; + int ret; + + alloc_start = offset & ~mask; + alloc_end = (offset + len + mask) & ~mask; + + mutex_lock(&inode->i_mutex); + if (alloc_start > inode->i_size) { + ret = btrfs_cont_expand(inode, alloc_start); + if (ret) + goto out; + } + + while (1) { + struct btrfs_ordered_extent *ordered; + lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, + alloc_end - 1, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + alloc_end - 1); + if (ordered && + ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset < alloc_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent(&BTRFS_I(inode)->io_tree, + alloc_start, alloc_end - 1, GFP_NOFS); + btrfs_wait_ordered_range(inode, alloc_start, + alloc_end - alloc_start); + } else { + if (ordered) + btrfs_put_ordered_extent(ordered); + break; + } + } + + cur_offset = alloc_start; + while (1) { + em = btrfs_get_extent(inode, NULL, 0, cur_offset, + alloc_end - cur_offset, 0); + BUG_ON(IS_ERR(em) || !em); + last_byte = min(extent_map_end(em), alloc_end); + last_byte = (last_byte + mask) & ~mask; + if (em->block_start == EXTENT_MAP_HOLE) { + ret = prealloc_file_range(inode, cur_offset, + last_byte, alloc_hint, mode); + if (ret < 0) { + free_extent_map(em); + break; + } + } + if (em->block_start <= EXTENT_MAP_LAST_BYTE) + alloc_hint = em->block_start; + free_extent_map(em); + + cur_offset = last_byte; + if (cur_offset >= alloc_end) { + ret = 0; + break; + } + } + unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1, + GFP_NOFS); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + static int btrfs_set_page_dirty(struct page *page) { return __set_page_dirty_nobuffers(page); @@ -4421,6 +4601,7 @@ static struct inode_operations btrfs_file_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, + .fallocate = btrfs_fallocate, }; static struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 7f915d47839..9ff2b4e0e92 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -724,7 +724,8 @@ long btrfs_ioctl_clone(struct file *file, unsigned long src_fd) extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(leaf, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 ds = btrfs_file_extent_disk_bytenr(leaf, extent); u64 dl = btrfs_file_extent_disk_num_bytes(leaf, diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index e7317c8fda2..370bb428559 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -182,7 +182,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->len = len; entry->disk_len = disk_len; entry->inode = inode; - if (type == BTRFS_ORDERED_NOCOW || type == BTRFS_ORDERED_COMPRESSED) + if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); /* one ref for the tree */ @@ -339,7 +339,8 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only) ordered = list_entry(cur, struct btrfs_ordered_extent, root_extent_list); if (nocow_only && - !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { + !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && + !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) { list_move(&ordered->root_extent_list, &root->fs_info->ordered_extents); cond_resched_lock(&root->fs_info->ordered_extent_lock); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index e6d9bc54c2b..260bf95dfe0 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -68,6 +68,8 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ +#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ + struct btrfs_ordered_extent { /* logical offset in the file */ u64 file_offset; @@ -132,7 +134,7 @@ int btrfs_remove_ordered_extent(struct inode *inode, int btrfs_dec_test_ordered_pending(struct inode *inode, u64 file_offset, u64 io_size); int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, - u64 start, u64 len, u64 disk_len, int type); + u64 start, u64 len, u64 disk_len, int tyep); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index e0201c3a7dc..be4fc30a30e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -442,7 +442,8 @@ insert: fi = (struct btrfs_file_extent_item *)dst_ptr; extent_type = btrfs_file_extent_type(path->nodes[0], fi); - if (extent_type == BTRFS_FILE_EXTENT_REG) { + if (extent_type == BTRFS_FILE_EXTENT_REG || + extent_type == BTRFS_FILE_EXTENT_PREALLOC) { struct btrfs_key ins; ins.objectid = btrfs_file_extent_disk_bytenr( path->nodes[0], fi); @@ -538,7 +539,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(eb, item); - if (found_type == BTRFS_FILE_EXTENT_REG) + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) extent_end = start + btrfs_file_extent_num_bytes(eb, item); else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); @@ -562,7 +564,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, start, 0); - if (ret == 0 && found_type == BTRFS_FILE_EXTENT_REG) { + if (ret == 0 && + (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC)) { struct btrfs_file_extent_item cmp1; struct btrfs_file_extent_item cmp2; struct btrfs_file_extent_item *existing; @@ -2522,7 +2526,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_file_extent_item); found_type = btrfs_file_extent_type(src, extent); - if (found_type == BTRFS_FILE_EXTENT_REG) { + if (found_type == BTRFS_FILE_EXTENT_REG || + found_type == BTRFS_FILE_EXTENT_PREALLOC) { u64 ds = btrfs_file_extent_disk_bytenr(src, extent); u64 dl = btrfs_file_extent_disk_num_bytes(src, -- cgit v1.2.3-70-g09d2 From 70b99e6959a4c28ae1b314985eca731f3db72f1d Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 31 Oct 2008 12:46:39 -0400 Subject: Btrfs: Compression corner fixes Make sure we keep page->mapping NULL on the pages we're getting via alloc_page. It gets set so a few of the callbacks can do the right thing, but in general these pages don't have a mapping. Don't try to truncate compressed inline items in btrfs_drop_extents. The whole compressed item must be preserved. Don't try to create multipage inline compressed items. When we try to overwrite just the first page of the file, we would have to read in and recow all the pages after it in the same compressed inline items. For now, only create single page inline items. Make sure we lock pages in the correct order during delalloc. The search into the state tree for delalloc bytes can return bytes before the page we already have locked. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 4 ++++ fs/btrfs/extent_io.c | 11 ++++++++++- fs/btrfs/file.c | 21 +++++++++++++++------ fs/btrfs/inode.c | 8 ++++++-- 4 files changed, 35 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 9adaa79adad..354913177ba 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -220,10 +220,12 @@ static void end_compressed_bio_write(struct bio *bio, int err) */ inode = cb->inode; tree = &BTRFS_I(inode)->io_tree; + cb->compressed_pages[0]->mapping = cb->inode->i_mapping; tree->ops->writepage_end_io_hook(cb->compressed_pages[0], cb->start, cb->start + cb->len - 1, NULL, 1); + cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb->start, cb->len); /* note, our inode could be gone now */ @@ -306,6 +308,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, else ret = 0; + page->mapping = NULL; if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(bio); @@ -423,6 +426,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, else ret = 0; + page->mapping = NULL; if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_get(comp_bio); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eb3c12e7bea..9b37ce6e516 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1242,12 +1242,21 @@ again: delalloc_end = 0; found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, max_bytes); - if (!found) { + if (!found || delalloc_end <= *start) { *start = delalloc_start; *end = delalloc_end; return found; } + /* + * start comes from the offset of locked_page. We have to lock + * pages in order, so we can't process delalloc bytes before + * locked_page + */ + if (delalloc_start < *start) { + delalloc_start = *start; + } + /* * make sure to limit the number of pages we try to lock down * if we're looping. diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 238a8e215eb..0c8cc35a8b9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -368,8 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 search_start = start; u64 leaf_start; u64 ram_bytes = 0; - u8 compression = 0; - u8 encryption = 0; + u8 compression; + u8 encryption; u16 other_encoding = 0; u64 root_gen; u64 root_owner; @@ -415,6 +415,8 @@ next_slot: leaf_start = 0; root_gen = 0; root_owner = 0; + compression = 0; + encryption = 0; extent = NULL; leaf = path->nodes[0]; slot = path->slots[0]; @@ -546,8 +548,12 @@ next_slot: inline_limit - key.offset); inode_sub_bytes(inode, extent_end - inline_limit); - btrfs_truncate_item(trans, root, path, - new_size, 1); + btrfs_set_file_extent_ram_bytes(leaf, extent, + new_size); + if (!compression && !encryption) { + btrfs_truncate_item(trans, root, path, + new_size, 1); + } } } /* delete the entire extent */ @@ -567,8 +573,11 @@ next_slot: new_size = btrfs_file_extent_calc_inline_size( extent_end - end); inode_sub_bytes(inode, end - key.offset); - ret = btrfs_truncate_item(trans, root, path, - new_size, 0); + btrfs_set_file_extent_ram_bytes(leaf, extent, + new_size); + if (!compression && !encryption) + ret = btrfs_truncate_item(trans, root, path, + new_size, 0); BUG_ON(ret); } /* create bookend, splitting the extent in two */ diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 789c376157f..806708dd7e3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -239,6 +239,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, data_len = compressed_size; if (start > 0 || + actual_end >= PAGE_CACHE_SIZE || data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || (!compressed_size && (actual_end & (root->sectorsize - 1)) == 0) || @@ -248,7 +249,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, } ret = btrfs_drop_extents(trans, root, inode, start, - aligned_end, aligned_end, &hint_byte); + aligned_end, start, &hint_byte); BUG_ON(ret); if (isize > actual_end) @@ -423,6 +424,7 @@ again: * free any pages it allocated and our page pointer array */ for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); } kfree(pages); @@ -572,8 +574,10 @@ free_pages_out_fail: extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, start, end, locked_page, 0, 0, 0); free_pages_out: - for (i = 0; i < nr_pages_ret; i++) + for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); + } if (pages) kfree(pages); -- cgit v1.2.3-70-g09d2 From 771ed689d2cd53439e28e095bc38fbe40a71429e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 6 Nov 2008 22:02:51 -0500 Subject: Btrfs: Optimize compressed writeback and reads When reading compressed extents, try to put pages into the page cache for any pages covered by the compressed extent that readpages didn't already preload. Add an async work queue to handle transformations at delayed allocation processing time. Right now this is just compression. The workflow is: 1) Find offsets in the file marked for delayed allocation 2) Lock the pages 3) Lock the state bits 4) Call the async delalloc code The async delalloc code clears the state lock bits and delalloc bits. It is important this happens before the range goes into the work queue because otherwise it might deadlock with other work queue items that try to lock those extent bits. The file pages are compressed, and if the compression doesn't work the pages are written back directly. An ordered work queue is used to make sure the inodes are written in the same order that pdflush or writepages sent them down. This changes extent_write_cache_pages to let the writepage function update the wbc nr_written count. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 150 ++++++++++- fs/btrfs/ctree.h | 4 +- fs/btrfs/disk-io.c | 27 +- fs/btrfs/extent-tree.c | 6 +- fs/btrfs/extent_io.c | 140 +++++++++-- fs/btrfs/extent_io.h | 13 +- fs/btrfs/file.c | 53 ++-- fs/btrfs/inode.c | 643 +++++++++++++++++++++++++++++++++++++----------- fs/btrfs/ordered-data.c | 13 +- fs/btrfs/super.c | 4 + fs/btrfs/zlib.c | 3 +- 11 files changed, 849 insertions(+), 207 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 354913177ba..284f21025bc 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -145,9 +146,9 @@ static void end_compressed_bio_read(struct bio *bio, int err) } /* do io completion on the original bio */ - if (cb->errors) + if (cb->errors) { bio_io_error(cb->orig_bio); - else + } else bio_endio(cb->orig_bio, 0); /* finally free the cb struct */ @@ -333,6 +334,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, } bytes_left -= PAGE_CACHE_SIZE; first_byte += PAGE_CACHE_SIZE; + cond_resched(); } bio_get(bio); @@ -346,6 +348,130 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, return 0; } +static noinline int add_ra_bio_pages(struct inode *inode, + u64 compressed_end, + struct compressed_bio *cb) +{ + unsigned long end_index; + unsigned long page_index; + u64 last_offset; + u64 isize = i_size_read(inode); + int ret; + struct page *page; + unsigned long nr_pages = 0; + struct extent_map *em; + struct address_space *mapping = inode->i_mapping; + struct pagevec pvec; + struct extent_map_tree *em_tree; + struct extent_io_tree *tree; + u64 end; + int misses = 0; + + page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; + last_offset = (page_offset(page) + PAGE_CACHE_SIZE); + em_tree = &BTRFS_I(inode)->extent_tree; + tree = &BTRFS_I(inode)->io_tree; + + if (isize == 0) + return 0; + + end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + + pagevec_init(&pvec, 0); + while(last_offset < compressed_end) { + page_index = last_offset >> PAGE_CACHE_SHIFT; + + if (page_index > end_index) + break; + + rcu_read_lock(); + page = radix_tree_lookup(&mapping->page_tree, page_index); + rcu_read_unlock(); + if (page) { + misses++; + if (misses > 4) + break; + goto next; + } + + page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS); + if (!page) + break; + + page->index = page_index; + /* + * what we want to do here is call add_to_page_cache_lru, + * but that isn't exported, so we reproduce it here + */ + if (add_to_page_cache(page, mapping, + page->index, GFP_NOFS)) { + page_cache_release(page); + goto next; + } + + /* open coding of lru_cache_add, also not exported */ + page_cache_get(page); + if (!pagevec_add(&pvec, page)) + __pagevec_lru_add(&pvec); + + end = last_offset + PAGE_CACHE_SIZE - 1; + /* + * at this point, we have a locked page in the page cache + * for these bytes in the file. But, we have to make + * sure they map to this compressed extent on disk. + */ + set_page_extent_mapped(page); + lock_extent(tree, last_offset, end, GFP_NOFS); + spin_lock(&em_tree->lock); + em = lookup_extent_mapping(em_tree, last_offset, + PAGE_CACHE_SIZE); + spin_unlock(&em_tree->lock); + + if (!em || last_offset < em->start || + (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || + (em->block_start >> 9) != cb->orig_bio->bi_sector) { + free_extent_map(em); + unlock_extent(tree, last_offset, end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + break; + } + free_extent_map(em); + + if (page->index == end_index) { + char *userpage; + size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); + + if (zero_offset) { + int zeros; + zeros = PAGE_CACHE_SIZE - zero_offset; + userpage = kmap_atomic(page, KM_USER0); + memset(userpage + zero_offset, 0, zeros); + flush_dcache_page(page); + kunmap_atomic(userpage, KM_USER0); + } + } + + ret = bio_add_page(cb->orig_bio, page, + PAGE_CACHE_SIZE, 0); + + if (ret == PAGE_CACHE_SIZE) { + nr_pages++; + page_cache_release(page); + } else { + unlock_extent(tree, last_offset, end, GFP_NOFS); + unlock_page(page); + page_cache_release(page); + break; + } +next: + last_offset += PAGE_CACHE_SIZE; + } + if (pagevec_count(&pvec)) + __pagevec_lru_add(&pvec); + return 0; +} + /* * for a compressed read, the bio we get passed has all the inode pages * in it. We don't actually do IO on those pages but allocate new ones @@ -373,6 +499,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, struct block_device *bdev; struct bio *comp_bio; u64 cur_disk_byte = (u64)bio->bi_sector << 9; + u64 em_len; struct extent_map *em; int ret; @@ -393,6 +520,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->start = em->start; compressed_len = em->block_len; + em_len = em->len; free_extent_map(em); cb->len = uncompressed_len; @@ -411,6 +539,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, } cb->nr_pages = nr_pages; + add_ra_bio_pages(inode, cb->start + em_len, cb); + + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, cb->orig_bio); + } + + /* include any pages we added in add_ra-bio_pages */ + uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + cb->len = uncompressed_len; + comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; @@ -442,9 +581,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); atomic_inc(&cb->pending_bios); - bio->bi_private = cb; - bio->bi_end_io = end_compressed_bio_write; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + comp_bio->bi_private = cb; + comp_bio->bi_end_io = end_compressed_bio_read; + + bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); } cur_disk_byte += PAGE_CACHE_SIZE; } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 689df070c8e..c83cc5b2ded 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -625,8 +625,8 @@ struct btrfs_fs_info { struct btrfs_transaction *running_transaction; wait_queue_head_t transaction_throttle; wait_queue_head_t transaction_wait; - wait_queue_head_t async_submit_wait; + wait_queue_head_t async_submit_wait; wait_queue_head_t tree_log_wait; struct btrfs_super_block super_copy; @@ -653,6 +653,7 @@ struct btrfs_fs_info { atomic_t nr_async_submits; atomic_t async_submit_draining; atomic_t nr_async_bios; + atomic_t async_delalloc_pages; atomic_t tree_log_writers; atomic_t tree_log_commit; unsigned long tree_log_batch; @@ -677,6 +678,7 @@ struct btrfs_fs_info { * two */ struct btrfs_workers workers; + struct btrfs_workers delalloc_workers; struct btrfs_workers endio_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index e0a28f705a6..8efc123d222 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -539,6 +539,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, (atomic_read(&fs_info->nr_async_bios) < limit), HZ/10); } + + while(atomic_read(&fs_info->async_submit_draining) && + atomic_read(&fs_info->nr_async_submits)) { + wait_event(fs_info->async_submit_wait, + (atomic_read(&fs_info->nr_async_submits) == 0)); + } + return 0; } @@ -1437,6 +1444,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->space_info); btrfs_mapping_init(&fs_info->mapping_tree); atomic_set(&fs_info->nr_async_submits, 0); + atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->async_submit_draining, 0); atomic_set(&fs_info->nr_async_bios, 0); atomic_set(&fs_info->throttles, 0); @@ -1550,6 +1558,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->workers, "worker", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->delalloc_workers, "delalloc", + fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->submit_workers, "submit", min_t(u64, fs_devices->num_devices, fs_info->thread_pool_size)); @@ -1560,15 +1571,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ fs_info->submit_workers.idle_thresh = 64; - /* fs_info->workers is responsible for checksumming file data - * blocks and metadata. Using a larger idle thresh allows each - * worker thread to operate on things in roughly the order they - * were sent by the writeback daemons, improving overall locality - * of the IO going down the pipe. - */ - fs_info->workers.idle_thresh = 8; + fs_info->workers.idle_thresh = 16; fs_info->workers.ordered = 1; + fs_info->delalloc_workers.idle_thresh = 2; + fs_info->delalloc_workers.ordered = 1; + btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size); @@ -1584,6 +1592,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); + btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, @@ -1732,6 +1741,7 @@ fail_tree_root: fail_sys_array: fail_sb_buffer: btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); @@ -1988,6 +1998,7 @@ int close_ctree(struct btrfs_root *root) truncate_inode_pages(fs_info->btree_inode->i_mapping, 0); btrfs_stop_workers(&fs_info->fixup_workers); + btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_write_workers); @@ -2062,7 +2073,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) struct extent_io_tree *tree; u64 num_dirty; u64 start = 0; - unsigned long thresh = 96 * 1024 * 1024; + unsigned long thresh = 32 * 1024 * 1024; tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; if (current_is_pdflush() || current->flags & PF_MEMALLOC) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 8af39521eb7..ebd8275a193 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -768,7 +768,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, l = path->nodes[0]; btrfs_item_key_to_cpu(l, &key, path->slots[0]); - BUG_ON(key.objectid != bytenr); + if (key.objectid != bytenr) { + btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); + printk("wanted %Lu found %Lu\n", bytenr, key.objectid); + BUG(); + } BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9b37ce6e516..bbe3bcfcf4a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -47,6 +47,11 @@ struct extent_page_data { struct bio *bio; struct extent_io_tree *tree; get_extent_t *get_extent; + + /* tells writepage not to lock the state bits for this range + * it still does the unlocking + */ + int extent_locked; }; int __init extent_io_init(void) @@ -1198,11 +1203,18 @@ static noinline int lock_delalloc_pages(struct inode *inode, * the caller is taking responsibility for * locked_page */ - if (pages[i] != locked_page) + if (pages[i] != locked_page) { lock_page(pages[i]); + if (pages[i]->mapping != inode->i_mapping) { + ret = -EAGAIN; + unlock_page(pages[i]); + page_cache_release(pages[i]); + goto done; + } + } page_cache_release(pages[i]); + pages_locked++; } - pages_locked += ret; nrpages -= ret; index += ret; cond_resched(); @@ -1262,8 +1274,7 @@ again: * if we're looping. */ if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { - delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) & - ~((u64)PAGE_CACHE_SIZE - 1); + delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; } /* step two, lock all the pages after the page that has start */ ret = lock_delalloc_pages(inode, locked_page, @@ -1306,7 +1317,10 @@ out_failed: int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, - int clear_dirty, int set_writeback, + int unlock_pages, + int clear_unlock, + int clear_delalloc, int clear_dirty, + int set_writeback, int end_writeback) { int ret; @@ -1315,12 +1329,19 @@ int extent_clear_unlock_delalloc(struct inode *inode, unsigned long end_index = end >> PAGE_CACHE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; - int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC; + int clear_bits = 0; + if (clear_unlock) + clear_bits |= EXTENT_LOCKED; if (clear_dirty) clear_bits |= EXTENT_DIRTY; + if (clear_delalloc) + clear_bits |= EXTENT_DELALLOC; + clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS); + if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) + return 0; while(nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, @@ -1336,7 +1357,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, set_page_writeback(pages[i]); if (end_writeback) end_page_writeback(pages[i]); - unlock_page(pages[i]); + if (unlock_pages) + unlock_page(pages[i]); page_cache_release(pages[i]); } nr_pages -= ret; @@ -1741,9 +1763,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } } - if (uptodate) + if (uptodate) { set_extent_uptodate(tree, start, end, GFP_ATOMIC); + } unlock_extent(tree, start, end, GFP_ATOMIC); if (whole_page) { @@ -1925,6 +1948,7 @@ void set_page_extent_mapped(struct page *page) set_page_private(page, EXTENT_PAGE_PRIVATE); } } +EXPORT_SYMBOL(set_page_extent_mapped); void set_page_extent_head(struct page *page, unsigned long len) { @@ -2143,12 +2167,17 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, u64 delalloc_end; int page_started; int compressed; + unsigned long nr_written = 0; WARN_ON(!PageLocked(page)); pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0); + if (epd->extent_locked) { + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + page_end, NULL, 1); + } unlock_page(page); return 0; } @@ -2169,27 +2198,33 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_start = start; delalloc_end = 0; page_started = 0; - while(delalloc_end < page_end) { - nr_delalloc = find_lock_delalloc_range(inode, tree, + if (!epd->extent_locked) { + while(delalloc_end < page_end) { + nr_delalloc = find_lock_delalloc_range(inode, tree, page, &delalloc_start, &delalloc_end, 128 * 1024 * 1024); - if (nr_delalloc == 0) { + if (nr_delalloc == 0) { + delalloc_start = delalloc_end + 1; + continue; + } + tree->ops->fill_delalloc(inode, page, delalloc_start, + delalloc_end, &page_started, + &nr_written); delalloc_start = delalloc_end + 1; - continue; } - tree->ops->fill_delalloc(inode, page, delalloc_start, - delalloc_end, &page_started); - delalloc_start = delalloc_end + 1; - } - /* did the fill delalloc function already unlock and start the IO? */ - if (page_started) { - return 0; + /* did the fill delalloc function already unlock and start + * the IO? + */ + if (page_started) { + ret = 0; + goto update_nr_written; + } } - lock_extent(tree, start, page_end, GFP_NOFS); + unlock_start = start; if (tree->ops && tree->ops->writepage_start_hook) { @@ -2199,10 +2234,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, unlock_extent(tree, start, page_end, GFP_NOFS); redirty_page_for_writepage(wbc, page); unlock_page(page); - return 0; + ret = 0; + goto update_nr_written; } } + nr_written++; + end = page_end; if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { printk("found delalloc bits after lock_extent\n"); @@ -2333,6 +2371,12 @@ done: if (unlock_start <= page_end) unlock_extent(tree, unlock_start, page_end, GFP_NOFS); unlock_page(page); + +update_nr_written: + wbc->nr_to_write -= nr_written; + if (wbc->range_cyclic || (wbc->nr_to_write > 0 && + wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) + page->mapping->writeback_index = page->index + nr_written; return 0; } @@ -2431,7 +2475,7 @@ retry: unlock_page(page); ret = 0; } - if (ret || (--(wbc->nr_to_write) <= 0)) + if (ret || wbc->nr_to_write <= 0) done = 1; if (wbc->nonblocking && bdi_write_congested(bdi)) { wbc->encountered_congestion = 1; @@ -2452,6 +2496,8 @@ retry: } if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) mapping->writeback_index = index; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; if (wbc->range_cont) wbc->range_start = index << PAGE_CACHE_SHIFT; @@ -2469,6 +2515,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, .bio = NULL, .tree = tree, .get_extent = get_extent, + .extent_locked = 0, }; struct writeback_control wbc_writepages = { .bdi = wbc->bdi, @@ -2491,6 +2538,52 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, } EXPORT_SYMBOL(extent_write_full_page); +int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, + u64 start, u64 end, get_extent_t *get_extent, + int mode) +{ + int ret = 0; + struct address_space *mapping = inode->i_mapping; + struct page *page; + unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + + struct extent_page_data epd = { + .bio = NULL, + .tree = tree, + .get_extent = get_extent, + .extent_locked = 1, + }; + struct writeback_control wbc_writepages = { + .bdi = inode->i_mapping->backing_dev_info, + .sync_mode = mode, + .older_than_this = NULL, + .nr_to_write = nr_pages * 2, + .range_start = start, + .range_end = end + 1, + }; + + while(start <= end) { + page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + if (clear_page_dirty_for_io(page)) + ret = __extent_writepage(page, &wbc_writepages, &epd); + else { + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, + start + PAGE_CACHE_SIZE - 1, + NULL, 1); + unlock_page(page); + } + page_cache_release(page); + start += PAGE_CACHE_SIZE; + } + + if (epd.bio) + submit_one_bio(WRITE, epd.bio, 0, 0); + return ret; +} +EXPORT_SYMBOL(extent_write_locked_range); + int extent_writepages(struct extent_io_tree *tree, struct address_space *mapping, @@ -2502,6 +2595,7 @@ int extent_writepages(struct extent_io_tree *tree, .bio = NULL, .tree = tree, .get_extent = get_extent, + .extent_locked = 0, }; ret = extent_write_cache_pages(tree, mapping, wbc, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 283110ec4ee..2d5f67065b6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -35,7 +35,8 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, unsigned long bio_flags); struct extent_io_ops { int (*fill_delalloc)(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started); + u64 start, u64 end, int *page_started, + unsigned long *nr_written); int (*writepage_start_hook)(struct page *page, u64 start, u64 end); int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; @@ -172,6 +173,9 @@ int extent_invalidatepage(struct extent_io_tree *tree, int extent_write_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, struct writeback_control *wbc); +int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, + u64 start, u64 end, get_extent_t *get_extent, + int mode); int extent_writepages(struct extent_io_tree *tree, struct address_space *mapping, get_extent_t *get_extent, @@ -256,6 +260,9 @@ int extent_range_uptodate(struct extent_io_tree *tree, int extent_clear_unlock_delalloc(struct inode *inode, struct extent_io_tree *tree, u64 start, u64 end, struct page *locked_page, - int clear_dirty, int set_writeback, - int clear_writeback); + int unlock_page, + int clear_unlock, + int clear_delalloc, int clear_dirty, + int set_writeback, + int end_writeback); #endif diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0c8cc35a8b9..337221ecca2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -368,6 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, u64 search_start = start; u64 leaf_start; u64 ram_bytes = 0; + u64 orig_parent = 0; + u64 disk_bytenr = 0; u8 compression; u8 encryption; u16 other_encoding = 0; @@ -500,17 +502,31 @@ next_slot: keep = 1; } - if (bookend && found_extent && locked_end < extent_end) { - ret = try_lock_extent(&BTRFS_I(inode)->io_tree, - locked_end, extent_end - 1, GFP_NOFS); - if (!ret) { - btrfs_release_path(root, path); - lock_extent(&BTRFS_I(inode)->io_tree, - locked_end, extent_end - 1, GFP_NOFS); + if (bookend && found_extent) { + if (locked_end < extent_end) { + ret = try_lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, + GFP_NOFS); + if (!ret) { + btrfs_release_path(root, path); + lock_extent(&BTRFS_I(inode)->io_tree, + locked_end, extent_end - 1, + GFP_NOFS); + locked_end = extent_end; + continue; + } locked_end = extent_end; - continue; } - locked_end = extent_end; + orig_parent = path->nodes[0]->start; + disk_bytenr = le64_to_cpu(old.disk_bytenr); + if (disk_bytenr != 0) { + ret = btrfs_inc_extent_ref(trans, root, + disk_bytenr, + le64_to_cpu(old.disk_num_bytes), + orig_parent, root->root_key.objectid, + trans->transid, inode->i_ino); + BUG_ON(ret); + } } if (found_inline) { @@ -537,8 +553,12 @@ next_slot: inode_sub_bytes(inode, old_num - new_num); } - btrfs_set_file_extent_num_bytes(leaf, extent, - new_num); + if (!compression && !encryption) { + btrfs_set_file_extent_ram_bytes(leaf, + extent, new_num); + } + btrfs_set_file_extent_num_bytes(leaf, + extent, new_num); btrfs_mark_buffer_dirty(leaf); } else if (key.offset < inline_limit && (end > extent_end) && @@ -582,11 +602,11 @@ next_slot: } /* create bookend, splitting the extent in two */ if (bookend && found_extent) { - u64 disk_bytenr; struct btrfs_key ins; ins.objectid = inode->i_ino; ins.offset = end; btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY); + btrfs_release_path(root, path); ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*extent)); @@ -623,14 +643,13 @@ next_slot: btrfs_mark_buffer_dirty(path->nodes[0]); - disk_bytenr = le64_to_cpu(old.disk_bytenr); if (disk_bytenr != 0) { - ret = btrfs_inc_extent_ref(trans, root, - disk_bytenr, - le64_to_cpu(old.disk_num_bytes), - leaf->start, + ret = btrfs_update_extent_ref(trans, root, + disk_bytenr, orig_parent, + leaf->start, root->root_key.objectid, trans->transid, ins.objectid); + BUG_ON(ret); } btrfs_release_path(root, path); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3df0ffad976..e01c0d0310a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -86,6 +86,10 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = { static void btrfs_truncate(struct inode *inode); static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, int unlock); /* * a very lame attempt at stopping writes when the FS is 85% full. There @@ -262,35 +266,72 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, return 0; } +struct async_extent { + u64 start; + u64 ram_size; + u64 compressed_size; + struct page **pages; + unsigned long nr_pages; + struct list_head list; +}; + +struct async_cow { + struct inode *inode; + struct btrfs_root *root; + struct page *locked_page; + u64 start; + u64 end; + struct list_head extents; + struct btrfs_work work; +}; + +static noinline int add_async_extent(struct async_cow *cow, + u64 start, u64 ram_size, + u64 compressed_size, + struct page **pages, + unsigned long nr_pages) +{ + struct async_extent *async_extent; + + async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS); + async_extent->start = start; + async_extent->ram_size = ram_size; + async_extent->compressed_size = compressed_size; + async_extent->pages = pages; + async_extent->nr_pages = nr_pages; + list_add_tail(&async_extent->list, &cow->extents); + return 0; +} + /* - * when extent_io.c finds a delayed allocation range in the file, - * the call backs end up in this code. The basic idea is to - * allocate extents on disk for the range, and create ordered data structs - * in ram to track those extents. + * we create compressed extents in two phases. The first + * phase compresses a range of pages that have already been + * locked (both pages and state bits are locked). * - * locked_page is the page that writepage had locked already. We use - * it to make sure we don't do extra locks or unlocks. + * This is done inside an ordered work queue, and the compression + * is spread across many cpus. The actual IO submission is step + * two, and the ordered work queue takes care of making sure that + * happens in the same order things were put onto the queue by + * writepages and friends. * - * *page_started is set to one if we unlock locked_page and do everything - * required to start IO on it. It may be clean and already done with - * IO when we return. + * If this code finds it can't get good compression, it puts an + * entry onto the work queue to write the uncompressed bytes. This + * makes sure that both compressed inodes and uncompressed inodes + * are written in the same order that pdflush sent them down. */ -static int cow_file_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) +static noinline int compress_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, + struct async_cow *async_cow, + int *num_added) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - u64 alloc_hint = 0; u64 num_bytes; - unsigned long ram_size; u64 orig_start; u64 disk_num_bytes; - u64 cur_alloc_size; u64 blocksize = root->sectorsize; u64 actual_end; - struct btrfs_key ins; - struct extent_map *em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; int ret = 0; struct page **pages = NULL; unsigned long nr_pages; @@ -298,22 +339,12 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, unsigned long total_compressed = 0; unsigned long total_in = 0; unsigned long max_compressed = 128 * 1024; - unsigned long max_uncompressed = 256 * 1024; + unsigned long max_uncompressed = 128 * 1024; int i; - int ordered_type; int will_compress; - trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); - btrfs_set_trans_block_group(trans, inode); orig_start = start; - /* - * compression made this loop a bit ugly, but the basic idea is to - * compress some pages but keep the total size of the compressed - * extent relatively small. If compression is off, this goto target - * is never used. - */ again: will_compress = 0; nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; @@ -324,7 +355,13 @@ again: /* we want to make sure that amount of ram required to uncompress * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 256k + * of a compressed extent to 128k. This is a crucial number + * because it also controls how easily we can spread reads across + * cpus for decompression. + * + * We also want to make sure the amount of IO required to do + * a random read is reasonably small, so we limit the size of + * a compressed extent to 128k. */ total_compressed = min(total_compressed, max_uncompressed); num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -333,18 +370,16 @@ again: total_in = 0; ret = 0; - /* we do compression for mount -o compress and when the - * inode has not been flagged as nocompress + /* + * we do compression for mount -o compress and when the + * inode has not been flagged as nocompress. This flag can + * change at any time if we discover bad compression ratios. */ if (!btrfs_test_flag(inode, NOCOMPRESS) && btrfs_test_opt(root, COMPRESS)) { WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - /* we want to make sure the amount of IO required to satisfy - * a random read is reasonably small, so we limit the size - * of a compressed extent to 128k - */ ret = btrfs_zlib_compress_pages(inode->i_mapping, start, total_compressed, pages, nr_pages, &nr_pages_ret, @@ -371,26 +406,34 @@ again: } } if (start == 0) { + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + /* lets try to make an inline extent */ - if (ret || total_in < (end - start + 1)) { + if (ret || total_in < (actual_end - start)) { /* we didn't compress the entire range, try - * to make an uncompressed inline extent. This - * is almost sure to fail, but maybe inline sizes - * will get bigger later + * to make an uncompressed inline extent. */ ret = cow_file_range_inline(trans, root, inode, start, end, 0, NULL); } else { + /* try making a compressed inline extent */ ret = cow_file_range_inline(trans, root, inode, start, end, total_compressed, pages); } + btrfs_end_transaction(trans, root); if (ret == 0) { + /* + * inline extent creation worked, we don't need + * to create any more async work items. Unlock + * and free up our temp pages. + */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, NULL, - 1, 1, 1); - *page_started = 1; + start, end, NULL, 1, 0, + 0, 1, 1, 1); ret = 0; goto free_pages_out; } @@ -435,53 +478,280 @@ again: /* flag the file so we don't compress in the future */ btrfs_set_flag(inode, NOCOMPRESS); } + if (will_compress) { + *num_added += 1; - BUG_ON(disk_num_bytes > - btrfs_super_total_bytes(&root->fs_info->super_copy)); + /* the async work queues will take care of doing actual + * allocation on disk for these compressed pages, + * and will submit them to the elevator. + */ + add_async_extent(async_cow, start, num_bytes, + total_compressed, pages, nr_pages_ret); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + if (start + num_bytes < end) { + start += num_bytes; + pages = NULL; + cond_resched(); + goto again; + } + } else { + /* + * No compression, but we still need to write the pages in + * the file we've been given so far. redirty the locked + * page if it corresponds to our extent and set things up + * for the async work queue to run cow_file_range to do + * the normal delalloc dance + */ + if (page_offset(locked_page) >= start && + page_offset(locked_page) <= end) { + __set_page_dirty_nobuffers(locked_page); + /* unlocked later on in the async handlers */ + } + add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); + *num_added += 1; + } - while(disk_num_bytes > 0) { - unsigned long min_bytes; +out: + return 0; + +free_pages_out: + for (i = 0; i < nr_pages_ret; i++) { + WARN_ON(pages[i]->mapping); + page_cache_release(pages[i]); + } + if (pages) + kfree(pages); + + goto out; +} + +/* + * phase two of compressed writeback. This is the ordered portion + * of the code, which only gets called in the order the work was + * queued. We walk all the async extents created by compress_file_range + * and send them down to the disk. + */ +static noinline int submit_compressed_extents(struct inode *inode, + struct async_cow *async_cow) +{ + struct async_extent *async_extent; + u64 alloc_hint = 0; + struct btrfs_trans_handle *trans; + struct btrfs_key ins; + struct extent_map *em; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_io_tree *io_tree; + int ret; + + if (list_empty(&async_cow->extents)) + return 0; + + trans = btrfs_join_transaction(root, 1); + + while(!list_empty(&async_cow->extents)) { + async_extent = list_entry(async_cow->extents.next, + struct async_extent, list); + list_del(&async_extent->list); + io_tree = &BTRFS_I(inode)->io_tree; + + /* did the compression code fall back to uncompressed IO? */ + if (!async_extent->pages) { + int page_started = 0; + unsigned long nr_written = 0; + + lock_extent(io_tree, async_extent->start, + async_extent->start + async_extent->ram_size - 1, + GFP_NOFS); + + /* allocate blocks */ + cow_file_range(inode, async_cow->locked_page, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + &page_started, &nr_written, 0); + + /* + * if page_started, cow_file_range inserted an + * inline extent and took care of all the unlocking + * and IO for us. Otherwise, we need to submit + * all those pages down to the drive. + */ + if (!page_started) + extent_write_locked_range(io_tree, + inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + btrfs_get_extent, + WB_SYNC_ALL); + kfree(async_extent); + cond_resched(); + continue; + } + + lock_extent(io_tree, async_extent->start, + async_extent->start + async_extent->ram_size - 1, + GFP_NOFS); /* - * the max size of a compressed extent is pretty small, - * make the code a little less complex by forcing - * the allocator to find a whole compressed extent at once + * here we're doing allocation and writeback of the + * compressed pages */ - if (will_compress) - min_bytes = disk_num_bytes; - else - min_bytes = root->sectorsize; + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + + ret = btrfs_reserve_extent(trans, root, + async_extent->compressed_size, + async_extent->compressed_size, + 0, alloc_hint, + (u64)-1, &ins, 1); + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); + em->start = async_extent->start; + em->len = async_extent->ram_size; + + em->block_start = ins.objectid; + em->block_len = ins.offset; + em->bdev = root->fs_info->fs_devices->latest_bdev; + set_bit(EXTENT_FLAG_PINNED, &em->flags); + set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + + while(1) { + spin_lock(&em_tree->lock); + ret = add_extent_mapping(em_tree, em); + spin_unlock(&em_tree->lock); + if (ret != -EEXIST) { + free_extent_map(em); + break; + } + btrfs_drop_extent_cache(inode, async_extent->start, + async_extent->start + + async_extent->ram_size - 1, 0); + } + + ret = btrfs_add_ordered_extent(inode, async_extent->start, + ins.objectid, + async_extent->ram_size, + ins.offset, + BTRFS_ORDERED_COMPRESSED); + BUG_ON(ret); + + btrfs_end_transaction(trans, root); + + /* + * clear dirty, set writeback and unlock the pages. + */ + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + async_extent->start, + async_extent->start + + async_extent->ram_size - 1, + NULL, 1, 1, 0, 1, 1, 0); + + ret = btrfs_submit_compressed_write(inode, + async_extent->start, + async_extent->ram_size, + ins.objectid, + ins.offset, async_extent->pages, + async_extent->nr_pages); + + BUG_ON(ret); + trans = btrfs_join_transaction(root, 1); + alloc_hint = ins.objectid + ins.offset; + kfree(async_extent); + cond_resched(); + } + + btrfs_end_transaction(trans, root); + return 0; +} + +/* + * when extent_io.c finds a delayed allocation range in the file, + * the call backs end up in this code. The basic idea is to + * allocate extents on disk for the range, and create ordered data structs + * in ram to track those extents. + * + * locked_page is the page that writepage had locked already. We use + * it to make sure we don't do extra locks or unlocks. + * + * *page_started is set to one if we unlock locked_page and do everything + * required to start IO on it. It may be clean and already done with + * IO when we return. + */ +static noinline int cow_file_range(struct inode *inode, + struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written, + int unlock) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 alloc_hint = 0; + u64 num_bytes; + unsigned long ram_size; + u64 disk_num_bytes; + u64 cur_alloc_size; + u64 blocksize = root->sectorsize; + u64 actual_end; + struct btrfs_key ins; + struct extent_map *em; + struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + int ret = 0; + + trans = btrfs_join_transaction(root, 1); + BUG_ON(!trans); + btrfs_set_trans_block_group(trans, inode); + actual_end = min_t(u64, i_size_read(inode), end + 1); + + num_bytes = (end - start + blocksize) & ~(blocksize - 1); + num_bytes = max(blocksize, num_bytes); + disk_num_bytes = num_bytes; + ret = 0; + + if (start == 0) { + /* lets try to make an inline extent */ + ret = cow_file_range_inline(trans, root, inode, + start, end, 0, NULL); + if (ret == 0) { + extent_clear_unlock_delalloc(inode, + &BTRFS_I(inode)->io_tree, + start, end, NULL, 1, 1, + 1, 1, 1, 1); + *nr_written = *nr_written + + (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; + *page_started = 1; + ret = 0; + goto out; + } + } + + BUG_ON(disk_num_bytes > + btrfs_super_total_bytes(&root->fs_info->super_copy)); + + btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + + while(disk_num_bytes > 0) { cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, - min_bytes, 0, alloc_hint, + root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); if (ret) { - WARN_ON(1); - goto free_pages_out_fail; + BUG(); } em = alloc_extent_map(GFP_NOFS); em->start = start; - if (will_compress) { - ram_size = num_bytes; - em->len = num_bytes; - } else { - /* ramsize == disk size */ - ram_size = ins.offset; - em->len = ins.offset; - } + ram_size = ins.offset; + em->len = ins.offset; em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - if (will_compress) - set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - while(1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); @@ -495,10 +765,8 @@ again: } cur_alloc_size = ins.offset; - ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0; ret = btrfs_add_ordered_extent(inode, start, ins.objectid, - ram_size, cur_alloc_size, - ordered_type); + ram_size, cur_alloc_size, 0); BUG_ON(ret); if (disk_num_bytes < cur_alloc_size) { @@ -506,82 +774,145 @@ again: cur_alloc_size); break; } - - if (will_compress) { - /* - * we're doing compression, we and we need to - * submit the compressed extents down to the device. - * - * We lock down all the file pages, clearing their - * dirty bits and setting them writeback. Everyone - * that wants to modify the page will wait on the - * ordered extent above. - * - * The writeback bits on the file pages are - * cleared when the compressed pages are on disk - */ - btrfs_end_transaction(trans, root); - - if (start <= page_offset(locked_page) && - page_offset(locked_page) < start + ram_size) { - *page_started = 1; - } - - extent_clear_unlock_delalloc(inode, - &BTRFS_I(inode)->io_tree, - start, - start + ram_size - 1, - NULL, 1, 1, 0); - - ret = btrfs_submit_compressed_write(inode, start, - ram_size, ins.objectid, - cur_alloc_size, pages, - nr_pages_ret); - - BUG_ON(ret); - trans = btrfs_join_transaction(root, 1); - if (start + ram_size < end) { - start += ram_size; - alloc_hint = ins.objectid + ins.offset; - /* pages will be freed at end_bio time */ - pages = NULL; - goto again; - } else { - /* we've written everything, time to go */ - break; - } - } /* we're not doing compressed IO, don't unlock the first * page (which the caller expects to stay locked), don't * clear any dirty bits and don't set any writeback bits */ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, start, start + ram_size - 1, - locked_page, 0, 0, 0); + locked_page, unlock, 1, + 1, 0, 0, 0); disk_num_bytes -= cur_alloc_size; num_bytes -= cur_alloc_size; alloc_hint = ins.objectid + ins.offset; start += cur_alloc_size; } - - ret = 0; out: + ret = 0; btrfs_end_transaction(trans, root); return ret; +} -free_pages_out_fail: - extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, - start, end, locked_page, 0, 0, 0); -free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { - WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); +/* + * work queue call back to started compression on a file and pages + */ +static noinline void async_cow_start(struct btrfs_work *work) +{ + struct async_cow *async_cow; + int num_added = 0; + async_cow = container_of(work, struct async_cow, work); + + compress_file_range(async_cow->inode, async_cow->locked_page, + async_cow->start, async_cow->end, async_cow, + &num_added); + if (num_added == 0) + async_cow->inode = NULL; +} + +/* + * work queue call back to submit previously compressed pages + */ +static noinline void async_cow_submit(struct btrfs_work *work) +{ + struct async_cow *async_cow; + struct btrfs_root *root; + unsigned long nr_pages; + + async_cow = container_of(work, struct async_cow, work); + + root = async_cow->root; + nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + + atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); + + if (atomic_read(&root->fs_info->async_delalloc_pages) < + 5 * 1042 * 1024 && + waitqueue_active(&root->fs_info->async_submit_wait)) + wake_up(&root->fs_info->async_submit_wait); + + if (async_cow->inode) { + submit_compressed_extents(async_cow->inode, async_cow); } - if (pages) - kfree(pages); +} - goto out; +static noinline void async_cow_free(struct btrfs_work *work) +{ + struct async_cow *async_cow; + async_cow = container_of(work, struct async_cow, work); + kfree(async_cow); +} + +static int cow_file_range_async(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written) +{ + struct async_cow *async_cow; + struct btrfs_root *root = BTRFS_I(inode)->root; + unsigned long nr_pages; + u64 cur_end; + int limit = 10 * 1024 * 1042; + + if (!btrfs_test_opt(root, COMPRESS)) { + return cow_file_range(inode, locked_page, start, end, + page_started, nr_written, 1); + } + + clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | + EXTENT_DELALLOC, 1, 0, GFP_NOFS); + while(start < end) { + async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); + async_cow->inode = inode; + async_cow->root = root; + async_cow->locked_page = locked_page; + async_cow->start = start; + + if (btrfs_test_flag(inode, NOCOMPRESS)) + cur_end = end; + else + cur_end = min(end, start + 512 * 1024 - 1); + + async_cow->end = cur_end; + INIT_LIST_HEAD(&async_cow->extents); + + async_cow->work.func = async_cow_start; + async_cow->work.ordered_func = async_cow_submit; + async_cow->work.ordered_free = async_cow_free; + async_cow->work.flags = 0; + + while(atomic_read(&root->fs_info->async_submit_draining) && + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) + == 0)); + } + + nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> + PAGE_CACHE_SHIFT; + atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); + + btrfs_queue_worker(&root->fs_info->delalloc_workers, + &async_cow->work); + + if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) < + limit)); + } + + while(atomic_read(&root->fs_info->async_submit_draining) && + atomic_read(&root->fs_info->async_delalloc_pages)) { + wait_event(root->fs_info->async_submit_wait, + (atomic_read(&root->fs_info->async_delalloc_pages) == + 0)); + } + + *nr_written += nr_pages; + start = cur_end + 1; + } + *page_started = 1; + return 0; } /* @@ -592,7 +923,8 @@ free_pages_out: * blocks on disk */ static int run_delalloc_nocow(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, int force) + u64 start, u64 end, int *page_started, int force, + unsigned long *nr_written) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -711,7 +1043,8 @@ out_check: btrfs_release_path(root, path); if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, - found_key.offset - 1, page_started); + found_key.offset - 1, page_started, + nr_written, 1); BUG_ON(ret); cow_start = (u64)-1; } @@ -748,9 +1081,10 @@ out_check: ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, num_bytes, num_bytes, type); BUG_ON(ret); + extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, cur_offset, cur_offset + num_bytes - 1, - locked_page, 0, 0, 0); + locked_page, 1, 1, 1, 0, 0, 0); cur_offset = extent_end; if (cur_offset > end) break; @@ -761,7 +1095,7 @@ out_check: cow_start = cur_offset; if (cow_start != (u64)-1) { ret = cow_file_range(inode, locked_page, cow_start, end, - page_started); + page_started, nr_written, 1); BUG_ON(ret); } @@ -775,7 +1109,8 @@ out_check: * extent_io.c call back to do delayed allocation processing */ static int run_delalloc_range(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started) + u64 start, u64 end, int *page_started, + unsigned long *nr_written) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; @@ -783,13 +1118,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_opt(root, NODATACOW) || btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 0); + page_started, 0, nr_written); else if (btrfs_test_flag(inode, PREALLOC)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 1); + page_started, 1, nr_written); else - ret = cow_file_range(inode, locked_page, start, end, - page_started); + ret = cow_file_range_async(inode, locked_page, start, end, + page_started, nr_written); return ret; } @@ -861,6 +1196,9 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, u64 map_length; int ret; + if (bio_flags & EXTENT_BIO_COMPRESSED) + return 0; + length = bio->bi_size; map_tree = &root->fs_info->mapping_tree; map_length = length; @@ -925,12 +1263,12 @@ int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btrfs_test_flag(inode, NODATASUM); if (!(rw & (1 << BIO_RW))) { - if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio); if (bio_flags & EXTENT_BIO_COMPRESSED) return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); + else if (!skip_sum) + btrfs_lookup_bio_sums(root, inode, bio); goto mapit; } else if (!skip_sum) { /* we're doing a write, do the async checksumming */ @@ -966,6 +1304,9 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { + WARN_ON(1); + } return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -2105,6 +2446,7 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int pending_del_nr = 0; int pending_del_slot = 0; int extent_type = -1; + int encoding; u64 mask = root->sectorsize - 1; if (root->ref_cows) @@ -2144,6 +2486,7 @@ search_again: leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); + encoding = 0; if (found_key.objectid != inode->i_ino) break; @@ -2156,6 +2499,10 @@ search_again: fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(leaf, fi); + encoding = btrfs_file_extent_compression(leaf, fi); + encoding |= btrfs_file_extent_encryption(leaf, fi); + encoding |= btrfs_file_extent_other_encoding(leaf, fi); + if (extent_type != BTRFS_FILE_EXTENT_INLINE) { item_end += btrfs_file_extent_num_bytes(leaf, fi); @@ -2200,7 +2547,7 @@ search_again: if (extent_type != BTRFS_FILE_EXTENT_INLINE) { u64 num_dec; extent_start = btrfs_file_extent_disk_bytenr(leaf, fi); - if (!del_item) { + if (!del_item && !encoding) { u64 orig_num_bytes = btrfs_file_extent_num_bytes(leaf, fi); extent_num_bytes = new_size - @@ -2436,7 +2783,14 @@ int btrfs_cont_expand(struct inode *inode, loff_t size) last_byte = min(extent_map_end(em), block_end); last_byte = (last_byte + mask) & ~mask; if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { + u64 hint_byte = 0; hole_size = last_byte - cur_offset; + err = btrfs_drop_extents(trans, root, inode, + cur_offset, + cur_offset + hole_size, + cur_offset, &hint_byte); + if (err) + break; err = btrfs_insert_file_extent(trans, root, inode->i_ino, cur_offset, 0, 0, hole_size, 0, hole_size, @@ -3785,6 +4139,7 @@ int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_io_tree *tree; + tree = &BTRFS_I(mapping->host)->io_tree; return extent_writepages(tree, mapping, btrfs_get_extent, wbc); } @@ -4285,9 +4640,11 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) * ordered extents get created before we return */ atomic_inc(&root->fs_info->async_submit_draining); - while(atomic_read(&root->fs_info->nr_async_submits)) { + while(atomic_read(&root->fs_info->nr_async_submits) || + atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, - (atomic_read(&root->fs_info->nr_async_submits) == 0)); + (atomic_read(&root->fs_info->nr_async_submits) == 0 && + atomic_read(&root->fs_info->async_delalloc_pages) == 0)); } atomic_dec(&root->fs_info->async_submit_draining); return 0; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 370bb428559..027ad6b3839 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -390,7 +390,7 @@ void btrfs_start_ordered_extent(struct inode *inode, * start IO on any dirty ones so the wait doesn't stall waiting * for pdflush to find them */ - btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE); + btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); if (wait) { wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); @@ -421,6 +421,12 @@ again: */ btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE); + /* The compression code will leave pages locked but return from + * writepage without setting the page writeback. Starting again + * with WB_SYNC_ALL will end up waiting for the IO to actually start. + */ + btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); + btrfs_wait_on_page_writeback_range(inode->i_mapping, start >> PAGE_CACHE_SHIFT, orig_end >> PAGE_CACHE_SHIFT); @@ -448,10 +454,7 @@ again: } if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, EXTENT_ORDERED | EXTENT_DELALLOC, 0)) { - printk("inode %lu still ordered or delalloc after wait " - "%llu %llu\n", inode->i_ino, - (unsigned long long)start, - (unsigned long long)orig_end); + schedule_timeout(1); goto again; } return 0; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 431fdf144b5..ab9d5e89ed1 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -375,6 +375,10 @@ int btrfs_sync_fs(struct super_block *sb, int wait) filemap_flush(root->fs_info->btree_inode->i_mapping); return 0; } + + btrfs_start_delalloc_inodes(root); + btrfs_wait_ordered_extents(root, 0); + btrfs_clean_old_snapshots(root); trans = btrfs_start_transaction(root, 1); ret = btrfs_commit_transaction(trans, root); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index e99309180a1..ba2527d0873 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -423,8 +423,9 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, /* we didn't make progress in this inflate * call, we're done */ - if (ret != Z_STREAM_END) + if (ret != Z_STREAM_END) { ret = -1; + } break; } -- cgit v1.2.3-70-g09d2 From 3b7885bf96e03271a9fff41124c38ed7176616e2 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 6 Nov 2008 21:48:27 -0500 Subject: Btrfs: enforce metadata allocation clustering The allocator uses the last allocation as a starting point for metadata allocations, and tries to allocate in clusters of at least 256k. If the search for a free block fails to find the expected block, this patch forces a new cluster to be found in the free list. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 17 +++++++++++++++++ fs/btrfs/extent_io.c | 7 ------- 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ebd8275a193..b8c6541c33f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2195,6 +2195,23 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, if (search_start + num_bytes > end) goto new_group; + if (last_ptr && *last_ptr && search_start != *last_ptr) { + total_needed += empty_cluster; + *last_ptr = 0; + /* + * if search_start is still in this block group + * then we just re-search this block group + */ + if (search_start >= start && + search_start < end) { + mutex_unlock(&block_group->alloc_mutex); + continue; + } + + /* else we go to the next block group */ + goto new_group; + } + if (exclude_nr > 0 && (search_start + num_bytes > exclude_start && search_start < exclude_start + exclude_nr)) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index bbe3bcfcf4a..ad75a9cc360 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2494,13 +2494,6 @@ retry: index = 0; goto retry; } - if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) - mapping->writeback_index = index; - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) - range_whole = 1; - - if (wbc->range_cont) - wbc->range_start = index << PAGE_CACHE_SHIFT; return ret; } EXPORT_SYMBOL(extent_write_cache_pages); -- cgit v1.2.3-70-g09d2 From f2b1c41cf94d7f839fe9ede5f3ead92698a93fb3 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 10 Nov 2008 07:31:30 -0500 Subject: Btrfs: Make sure pages are dirty before doing delalloc for them This adds a PageDirty check to the writeback path that locks pages for delalloc. If a page wasn't dirty at this point, it is in the process of being truncated away. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ad75a9cc360..69ea09659d4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1205,7 +1205,8 @@ static noinline int lock_delalloc_pages(struct inode *inode, */ if (pages[i] != locked_page) { lock_page(pages[i]); - if (pages[i]->mapping != inode->i_mapping) { + if (!PageDirty(pages[i]) || + pages[i]->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(pages[i]); page_cache_release(pages[i]); -- cgit v1.2.3-70-g09d2 From 39be25cd89450940b0e5f8a6aad71d1ec99b17bf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 10 Nov 2008 11:50:50 -0500 Subject: Btrfs: Use invalidatepage when writepage finds a page outside of i_size With all the recent fixes to the delalloc locking, it is now safe again to use invalidatepage inside the writepage code for pages outside of i_size. This used to deadlock against some of the code to write locked ranges of pages, but all of that has been fixed. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 6 +----- fs/btrfs/file-item.c | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 69ea09659d4..af2d9a9300a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2174,11 +2174,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, pg_offset = i_size & (PAGE_CACHE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - if (epd->extent_locked) { - if (tree->ops && tree->ops->writepage_end_io_hook) - tree->ops->writepage_end_io_hook(page, start, - page_end, NULL, 1); - } + page->mapping->a_ops->invalidatepage(page, 0); unlock_page(page); return 0; } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f4d3fa71bc4..f7637883140 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -185,6 +185,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, "%llu\n", inode->i_ino, (unsigned long long)offset); item = NULL; + btrfs_release_path(root, path); goto found; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, -- cgit v1.2.3-70-g09d2 From b47eda8690a10f4fc01eb7b795078fa3fa57149f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 10 Nov 2008 12:34:40 -0500 Subject: Btrfs: Turn off extent state leak debugging The extent_io.c code has a #define to find and cleanup extent state leaks on module unmount. This adds a very highly contended spinlock to a hot path for most FS operations. Turn it off by default. A later changeset will add a .config option for it. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index af2d9a9300a..5cc0082379c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -30,7 +30,7 @@ static struct kmem_cache *extent_buffer_cache; static LIST_HEAD(buffers); static LIST_HEAD(states); -#define LEAK_DEBUG 1 +#define LEAK_DEBUG 0 #ifdef LEAK_DEBUG static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; #endif -- cgit v1.2.3-70-g09d2 From 5b050f04c8ce911c5b6831305a24d70eab95e732 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 11 Nov 2008 09:34:41 -0500 Subject: Btrfs: Fix compile warnings on 32 bit machines Simple casting here and there to fix things up. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 3 ++- fs/btrfs/extent_io.c | 11 +++++++---- fs/btrfs/inode.c | 4 ++-- fs/btrfs/zlib.c | 2 +- 4 files changed, 12 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b582c6fd80f..bfd1512cce0 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -174,7 +174,8 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start, while(nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, - min(nr_pages, ARRAY_SIZE(pages)), pages); + min_t(unsigned long, + nr_pages, ARRAY_SIZE(pages)), pages); if (ret == 0) { nr_pages -= 1; index += 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5cc0082379c..54d013c3bb8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1157,7 +1157,8 @@ static noinline int __unlock_for_delalloc(struct inode *inode, while(nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, - min(nr_pages, ARRAY_SIZE(pages)), pages); + min_t(unsigned long, nr_pages, + ARRAY_SIZE(pages)), pages); for (i = 0; i < ret; i++) { if (pages[i] != locked_page) unlock_page(pages[i]); @@ -1192,7 +1193,8 @@ static noinline int lock_delalloc_pages(struct inode *inode, nrpages = end_index - index + 1; while(nrpages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, - min(nrpages, ARRAY_SIZE(pages)), pages); + min_t(unsigned long, + nrpages, ARRAY_SIZE(pages)), pages); if (ret == 0) { ret = -EAGAIN; goto done; @@ -1346,7 +1348,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, while(nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, - min(nr_pages, ARRAY_SIZE(pages)), pages); + min_t(unsigned long, + nr_pages, ARRAY_SIZE(pages)), pages); for (i = 0; i < ret; i++) { if (pages[i] == locked_page) { page_cache_release(pages[i]); @@ -1896,7 +1899,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, int contig = 0; int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; - size_t page_size = min(size, PAGE_CACHE_SIZE); + size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); if (bio_ret && *bio_ret) { bio = *bio_ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5ca9c067237..2ed2deacde9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -184,7 +184,7 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, int i = 0; while(compressed_size > 0) { cpage = compressed_pages[i]; - cur_size = min(compressed_size, + cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); kaddr = kmap(cpage); @@ -3812,7 +3812,7 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); - max_size = min(PAGE_CACHE_SIZE, max_size); + max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); ret = btrfs_zlib_decompress(tmp, page, extent_offset, inline_size, max_size); if (ret) { diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index ba2527d0873..5b9f7002513 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -370,7 +370,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, data_in = kmap(pages_in[page_in_index]); workspace->inf_strm.next_in = data_in; - workspace->inf_strm.avail_in = min(srclen, PAGE_CACHE_SIZE); + workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); workspace->inf_strm.total_in = 0; workspace->inf_strm.total_out = 0; -- cgit v1.2.3-70-g09d2 From d2c3f4f695edac4d75c1b3eb01a1d16072de63bb Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 19 Nov 2008 12:44:22 -0500 Subject: Btrfs: Avoid writeback stalls While building large bios in writepages, btrfs may end up waiting for other page writeback to finish if WB_SYNC_ALL is used. While it is waiting, the bio it is building has a number of pages with the writeback bit set and they aren't getting to the disk any time soon. This lowers the latencies of writeback in general by sending down the bio being built before waiting for other pages. The bio submission code tries to limit the total number of async bios in flight by waiting when we're over a certain number of async bios. But, the waits are happening while writepages is building bios, and this can easily lead to stalls and other problems for people calling wait_on_page_writeback. The current fix is to let the congestion tests take care of waiting. sync() and others make sure to drain the current async requests to make sure that everything that was pending when the sync was started really get to disk. The code would drain pending requests both before and after submitting a new request. But, if one of the requests is waiting for page writeback to finish, the draining waits might block that page writeback. This changes the draining code to only wait after submitting the bio being processed. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 12 +++--------- fs/btrfs/extent_io.c | 24 ++++++++++++++++++++---- fs/btrfs/inode.c | 7 ------- 3 files changed, 23 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0e8d31274c9..8d03e4a3c4e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -538,15 +538,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, async->work.flags = 0; async->bio_flags = bio_flags; - while(atomic_read(&fs_info->async_submit_draining) && - atomic_read(&fs_info->nr_async_submits)) { - wait_event(fs_info->async_submit_wait, - (atomic_read(&fs_info->nr_async_submits) == 0)); - } - atomic_inc(&fs_info->nr_async_submits); btrfs_queue_worker(&fs_info->workers, &async->work); - +#if 0 if (atomic_read(&fs_info->nr_async_submits) > limit) { wait_event_timeout(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) < limit), @@ -556,7 +550,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, (atomic_read(&fs_info->nr_async_bios) < limit), HZ/10); } - +#endif while(atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, @@ -1765,11 +1759,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_cleanup_reloc_trees(tree_root); BUG_ON(ret); +read_fs_root: location.objectid = BTRFS_FS_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = (u64)-1; -read_fs_root: fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); if (!fs_info->fs_root) goto fail_cleaner; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 54d013c3bb8..a0f3804efe4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2398,7 +2398,8 @@ update_nr_written: int extent_write_cache_pages(struct extent_io_tree *tree, struct address_space *mapping, struct writeback_control *wbc, - writepage_t writepage, void *data) + writepage_t writepage, void *data, + void (*flush_fn)(void *)) { struct backing_dev_info *bdi = mapping->backing_dev_info; int ret = 0; @@ -2460,8 +2461,10 @@ retry: continue; } - if (wbc->sync_mode != WB_SYNC_NONE) + if (wbc->sync_mode != WB_SYNC_NONE) { + flush_fn(data); wait_on_page_writeback(page); + } if (PageWriteback(page) || !clear_page_dirty_for_io(page)) { @@ -2498,6 +2501,15 @@ retry: } EXPORT_SYMBOL(extent_write_cache_pages); +static noinline void flush_write_bio(void *data) +{ + struct extent_page_data *epd = data; + if (epd->bio) { + submit_one_bio(WRITE, epd->bio, 0, 0); + epd->bio = NULL; + } +} + int extent_write_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, struct writeback_control *wbc) @@ -2523,7 +2535,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_writepage(page, wbc, &epd); extent_write_cache_pages(tree, mapping, &wbc_writepages, - __extent_writepage, &epd); + __extent_writepage, &epd, flush_write_bio); if (epd.bio) { submit_one_bio(WRITE, epd.bio, 0, 0); } @@ -2592,7 +2604,8 @@ int extent_writepages(struct extent_io_tree *tree, }; ret = extent_write_cache_pages(tree, mapping, wbc, - __extent_writepage, &epd); + __extent_writepage, &epd, + flush_write_bio); if (epd.bio) { submit_one_bio(WRITE, epd.bio, 0, 0); } @@ -3087,6 +3100,9 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); + if (!set && !PageDirty(page)) + continue; + lock_page(page); if (i == 0) set_page_extent_head(page, eb->len); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6854bf41856..806caacff86 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -883,13 +883,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_cow->work.ordered_free = async_cow_free; async_cow->work.flags = 0; - while(atomic_read(&root->fs_info->async_submit_draining) && - atomic_read(&root->fs_info->async_delalloc_pages)) { - wait_event(root->fs_info->async_submit_wait, - (atomic_read(&root->fs_info->async_delalloc_pages) - == 0)); - } - nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT; atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); -- cgit v1.2.3-70-g09d2 From 15916de835a683bd8133a0d1ac0c982b795ab4ff Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 19 Nov 2008 21:17:22 -0500 Subject: Btrfs: Fixes for 2.6.28-rc API changes * open/close_bdev_excl -> open/close_bdev_exclusive * blkdev_issue_discard takes a GFP mask now * Fix blkdev_issue_discard usage now that it is enabled Signed-off-by: Chris Mason --- fs/btrfs/compat.h | 7 +++++++ fs/btrfs/compression.c | 4 ++-- fs/btrfs/extent-tree.c | 27 ++++++++++++++++++++------- fs/btrfs/extent_io.c | 4 ++-- fs/btrfs/version.sh | 34 +++++++++++++++++----------------- fs/btrfs/volumes.c | 41 +++++++++++++++++++++++------------------ fs/btrfs/volumes.h | 3 +++ 7 files changed, 74 insertions(+), 46 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h index cd6598b169d..dd1defdbfa3 100644 --- a/fs/btrfs/compat.h +++ b/fs/btrfs/compat.h @@ -21,4 +21,11 @@ static inline struct dentry *d_obtain_alias(struct inode *inode) } #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) +# define __pagevec_lru_add_file __pagevec_lru_add +# define open_bdev_exclusive open_bdev_excl +# define close_bdev_exclusive(bdev, mode) close_bdev_excl(bdev) +#endif + + #endif /* _COMPAT_H_ */ diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index bfd1512cce0..df05f513e1e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -419,7 +419,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, /* open coding of lru_cache_add, also not exported */ page_cache_get(page); if (!pagevec_add(&pvec, page)) - __pagevec_lru_add(&pvec); + __pagevec_lru_add_file(&pvec); end = last_offset + PAGE_CACHE_SIZE - 1; /* @@ -475,7 +475,7 @@ next: last_offset += PAGE_CACHE_SIZE; } if (pagevec_count(&pvec)) - __pagevec_lru_add(&pvec); + __pagevec_lru_add_file(&pvec); return 0; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ee73efe7542..62d49705d14 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -28,6 +28,7 @@ #include "volumes.h" #include "locking.h" #include "ref-cache.h" +#include "compat.h" #define PENDING_EXTENT_INSERT 0 #define PENDING_EXTENT_DELETE 1 @@ -899,6 +900,17 @@ static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, return ret; } +static void btrfs_issue_discard(struct block_device *bdev, + u64 start, u64 len) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) + blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); +#else + blkdev_issue_discard(bdev, start >> 9, len >> 9); +#endif +} + + static int noinline free_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct list_head *del_list) @@ -1108,6 +1120,7 @@ search: BUG_ON(ret); #ifdef BIO_RW_DISCARD + map_length = tmp->num_bytes; ret = btrfs_map_block(&info->mapping_tree, READ, tmp->bytenr, &map_length, &multi, 0); @@ -1115,16 +1128,16 @@ search: struct btrfs_bio_stripe *stripe; int i; - stripe = multi->stripe; + stripe = multi->stripes; if (map_length > tmp->num_bytes) map_length = tmp->num_bytes; for (i = 0; i < multi->num_stripes; i++, stripe++) - blkdev_issue_discard(stripe->dev->bdev, - stripe->physical >> 9, - map_length >> 9); + btrfs_issue_discard(stripe->dev->bdev, + stripe->physical, + map_length); kfree(multi); } #endif @@ -2498,9 +2511,9 @@ static int __free_extent(struct btrfs_trans_handle *trans, map_length = num_bytes; for (i = 0; i < multi->num_stripes; i++, stripe++) { - blkdev_issue_discard(stripe->dev->bdev, - stripe->physical >> 9, - map_length >> 9); + btrfs_issue_discard(stripe->dev->bdev, + stripe->physical, + map_length); } kfree(multi); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a0f3804efe4..3a65c10dce3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2639,14 +2639,14 @@ int extent_readpages(struct extent_io_tree *tree, /* open coding of lru_cache_add, also not exported */ page_cache_get(page); if (!pagevec_add(&pvec, page)) - __pagevec_lru_add(&pvec); + __pagevec_lru_add_file(&pvec); __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); } page_cache_release(page); } if (pagevec_count(&pvec)) - __pagevec_lru_add(&pvec); + __pagevec_lru_add_file(&pvec); BUG_ON(!list_empty(pages)); if (bio) submit_one_bio(READ, bio, 0, bio_flags); diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh index 0f57f24404d..1ca1952fd91 100644 --- a/fs/btrfs/version.sh +++ b/fs/btrfs/version.sh @@ -8,24 +8,24 @@ v="v0.16" -which hg > /dev/null -if [ -d .hg ] && [ $? == 0 ]; then - last=$(hg tags | grep -m1 -o '^v[0-9.]\+') - - # now check if the repo has commits since then... - if [[ $(hg id -t) == $last || \ - $(hg di -r "$last:." | awk '/^diff/{print $NF}' | sort -u) == .hgtags ]] - then - # check if it's dirty - if [[ $(hg id | cut -d' ' -f1) == *+ ]]; then - v=$last+ - else - v=$last +which git &> /dev/null +if [ $? == 0 ]; then + git branch >& /dev/null + if [ $? == 0 ]; then + if head=`git rev-parse --verify HEAD 2>/dev/null`; then + if tag=`git describe --tags 2>/dev/null`; then + v="$tag" + fi + + # Are there uncommitted changes? + git update-index --refresh --unmerged > /dev/null + if git diff-index --name-only HEAD | \ + grep -v "^scripts/package" \ + | read dummy; then + v="$v"-dirty + fi fi - else - # includes dirty flag - v=$last+$(hg id -i) - fi + fi fi echo "#ifndef __BUILD_VERSION" > .build-version.h diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ecf0633ab8c..c3ee63f92a5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -85,7 +85,7 @@ int btrfs_cleanup_fs_uuids(void) dev = list_entry(fs_devices->devices.next, struct btrfs_device, dev_list); if (dev->bdev) { - close_bdev_excl(dev->bdev); + close_bdev_exclusive(dev->bdev, dev->mode); fs_devices->open_devices--; } fs_devices->num_devices--; @@ -317,7 +317,7 @@ again: continue; if (device->bdev) { - close_bdev_excl(device->bdev); + close_bdev_exclusive(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } @@ -356,7 +356,7 @@ again: list_for_each(cur, &fs_devices->devices) { device = list_entry(cur, struct btrfs_device, dev_list); if (device->bdev) { - close_bdev_excl(device->bdev); + close_bdev_exclusive(device->bdev, device->mode); fs_devices->open_devices--; } if (device->writeable) { @@ -391,7 +391,8 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) return ret; } -int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder) +int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, + int flags, void *holder) { struct block_device *bdev; struct list_head *head = &fs_devices->devices; @@ -413,7 +414,7 @@ int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder) if (!device->name) continue; - bdev = open_bdev_excl(device->name, MS_RDONLY, holder); + bdev = open_bdev_exclusive(device->name, flags, holder); if (IS_ERR(bdev)) { printk("open %s failed\n", device->name); goto error; @@ -453,6 +454,8 @@ int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder) device->bdev = bdev; device->in_fs_metadata = 0; + device->mode = flags; + fs_devices->open_devices++; if (device->writeable) { fs_devices->rw_devices++; @@ -464,7 +467,7 @@ int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, void *holder) error_brelse: brelse(bh); error_close: - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, MS_RDONLY); error: continue; } @@ -496,7 +499,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, ret = 0; } } else { - ret = __btrfs_open_devices(fs_devices, holder); + ret = __btrfs_open_devices(fs_devices, flags, holder); } mutex_unlock(&uuid_mutex); return ret; @@ -514,7 +517,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder, mutex_lock(&uuid_mutex); - bdev = open_bdev_excl(path, flags, holder); + bdev = open_bdev_exclusive(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); @@ -551,7 +554,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder, error_brelse: brelse(bh); error_close: - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; @@ -1003,7 +1006,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto out; } } else { - bdev = open_bdev_excl(device_path, MS_RDONLY, + bdev = open_bdev_exclusive(device_path, MS_RDONLY, root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); @@ -1073,10 +1076,10 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) BUG_ON(device->writeable); brelse(bh); if (bdev) - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, MS_RDONLY); if (device->bdev) { - close_bdev_excl(device->bdev); + close_bdev_exclusive(device->bdev, device->mode); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1112,11 +1115,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->bdev) { /* one close for the device struct or super_block */ - close_bdev_excl(device->bdev); + close_bdev_exclusive(device->bdev, device->mode); } if (bdev) { /* one close for us */ - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, MS_RDONLY); } kfree(device->name); kfree(device); @@ -1127,7 +1130,7 @@ error_brelse: brelse(bh); error_close: if (bdev) - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, MS_RDONLY); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); @@ -1272,7 +1275,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; - bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder); + bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); if (!bdev) { return -EIO; } @@ -1331,6 +1334,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; + device->mode = 0; set_blocksize(device->bdev, 4096); if (seeding_dev) { @@ -1379,7 +1383,7 @@ out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: - close_bdev_excl(bdev); + close_bdev_exclusive(bdev, 0); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); @@ -2907,7 +2911,8 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid) goto out; } - ret = __btrfs_open_devices(fs_devices, root->fs_info->bdev_holder); + ret = __btrfs_open_devices(fs_devices, MS_RDONLY, + root->fs_info->bdev_holder); if (ret) goto out; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 1f6f25a5787..9b41e4d3984 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -42,6 +42,9 @@ struct btrfs_device { struct block_device *bdev; + /* the mode sent to open_bdev_exclusive */ + fmode_t mode; + char *name; /* the internal btrfs device id */ -- cgit v1.2.3-70-g09d2 From 0e6bd956ed238eb2f69386f251847fe3163532e1 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 20 Nov 2008 10:46:35 -0500 Subject: Btrfs: only flush down bios for writeback pages The btrfs write_cache_pages call has a flush function so that it submits the bio it has been building before it waits on any writeback pages. This adds a check so that flush only happens on writeback pages. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3a65c10dce3..87dba851793 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2462,7 +2462,8 @@ retry: } if (wbc->sync_mode != WB_SYNC_NONE) { - flush_fn(data); + if (PageWriteback(page)) + flush_fn(data); wait_on_page_writeback(page); } -- cgit v1.2.3-70-g09d2 From 641f5219f2ea0dfd1c024c91021ddc83f50bf1ce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 2 Dec 2008 06:36:10 -0500 Subject: Btrfs: sparse lock verification annotations for wait_on_state Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 87dba851793..d79ccdbfdd9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -577,6 +577,8 @@ EXPORT_SYMBOL(clear_extent_bit); static int wait_on_state(struct extent_io_tree *tree, struct extent_state *state) + __releases(tree->lock) + __acquires(tree->lock) { DEFINE_WAIT(wait); prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); -- cgit v1.2.3-70-g09d2 From b2950863c61bc24cf0f63bc05947d9d50663c4c0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 2 Dec 2008 09:54:17 -0500 Subject: Btrfs: make things static and include the right headers Shut up various sparse warnings about symbols that should be either static or have their declarations in scope. Signed-off-by: Christoph Hellwig --- fs/btrfs/ctree.c | 2 +- fs/btrfs/disk-io.c | 16 ++++++++-------- fs/btrfs/extent-tree.c | 12 ++++++------ fs/btrfs/extent_io.c | 35 +++++++++++++++-------------------- fs/btrfs/free-space-cache.c | 6 ++++-- fs/btrfs/inode-item.c | 2 +- fs/btrfs/inode.c | 26 +++++++++++++------------- fs/btrfs/ioctl.c | 14 +++++++------- fs/btrfs/root-tree.c | 2 ++ fs/btrfs/super.c | 2 +- fs/btrfs/tree-log.c | 5 +++-- fs/btrfs/volumes.c | 12 ++++++------ fs/btrfs/zlib.c | 1 + 13 files changed, 68 insertions(+), 67 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 71ef0a2e2da..a83cbdf1d8c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -217,7 +217,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent * is used to finish the allocation. */ -int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, +static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fb04665e500..8a2bcc7024f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -93,9 +93,9 @@ struct async_submit_bio { * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ -struct extent_map *btree_get_extent(struct inode *inode, struct page *page, - size_t page_offset, u64 start, u64 len, - int create) +static struct extent_map *btree_get_extent(struct inode *inode, + struct page *page, size_t page_offset, u64 start, u64 len, + int create) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_map *em; @@ -295,7 +295,7 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror * checksum a dirty tree block before IO. This has extra checks to make * sure we only fill in the checksum field in the first page of a multi-page block */ -int csum_dirty_buffer(struct btrfs_root *root, struct page *page) +static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; u64 start = (u64)page->index << PAGE_CACHE_SHIFT; @@ -365,7 +365,7 @@ static int check_tree_block_fsid(struct btrfs_root *root, return ret; } -int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, +static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { struct extent_io_tree *tree; @@ -660,7 +660,7 @@ static int btree_writepages(struct address_space *mapping, return extent_writepages(tree, mapping, btree_get_extent, wbc); } -int btree_readpage(struct file *file, struct page *page) +static int btree_readpage(struct file *file, struct page *page) { struct extent_io_tree *tree; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1200,7 +1200,7 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) } } -void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) +static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { struct inode *inode; struct extent_map_tree *em_tree; @@ -1842,7 +1842,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) put_bh(bh); } -int write_all_supers(struct btrfs_root *root) +static int write_all_supers(struct btrfs_root *root) { struct list_head *cur; struct list_head *head = &root->fs_info->fs_devices->devices; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a970472eab1..d1563852938 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -74,7 +74,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) * this adds the block group to the fs_info rb tree for the block group * cache */ -int btrfs_add_block_group_cache(struct btrfs_fs_info *info, +static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, struct btrfs_block_group_cache *block_group) { struct rb_node **p; @@ -289,7 +289,7 @@ err: /* * return the block group that starts at or after bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct +static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) { @@ -3445,7 +3445,7 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, return 0; } -int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, +static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, u32 *refs) { int ret; @@ -5434,7 +5434,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) return flags; } -int __alloc_chunk_for_shrink(struct btrfs_root *root, +static int __alloc_chunk_for_shrink(struct btrfs_root *root, struct btrfs_block_group_cache *shrink_block_group, int force) { @@ -5703,8 +5703,8 @@ out: return ret; } -int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path, - struct btrfs_key *key) +static int find_first_block_group(struct btrfs_root *root, + struct btrfs_path *path, struct btrfs_key *key) { int ret = 0; struct btrfs_key found_key; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d79ccdbfdd9..c3dfe2a0ec8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -112,7 +112,7 @@ void extent_io_tree_init(struct extent_io_tree *tree, } EXPORT_SYMBOL(extent_io_tree_init); -struct extent_state *alloc_extent_state(gfp_t mask) +static struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; #ifdef LEAK_DEBUG @@ -136,7 +136,7 @@ struct extent_state *alloc_extent_state(gfp_t mask) } EXPORT_SYMBOL(alloc_extent_state); -void free_extent_state(struct extent_state *state) +static void free_extent_state(struct extent_state *state) { if (!state) return; @@ -662,7 +662,7 @@ static void set_state_bits(struct extent_io_tree *tree, * [start, end] is inclusive * This takes the tree lock. */ -int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, +static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int exclusive, u64 *failed_start, gfp_t mask) { struct extent_state *state; @@ -879,12 +879,11 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_new); -int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, +static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask); } -EXPORT_SYMBOL(clear_extent_new); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -894,27 +893,24 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_uptodate); -int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, +static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); } -EXPORT_SYMBOL(clear_extent_uptodate); -int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, +static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return set_extent_bit(tree, start, end, EXTENT_WRITEBACK, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_writeback); -int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, +static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); } -EXPORT_SYMBOL(clear_extent_writeback); int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) { @@ -994,7 +990,7 @@ EXPORT_SYMBOL(set_range_dirty); /* * helper function to set both pages and extents in the tree writeback */ -int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) +static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) { unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long end_index = end >> PAGE_CACHE_SHIFT; @@ -1010,7 +1006,6 @@ int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) set_extent_writeback(tree, start, end, GFP_NOFS); return 0; } -EXPORT_SYMBOL(set_range_writeback); /* * find the first offset in the io tree with 'bits' set. zero is @@ -1432,11 +1427,13 @@ out: spin_unlock_irq(&tree->lock); return total_bytes; } + +#if 0 /* * helper function to lock both pages and extents in the tree. * pages must be locked first. */ -int lock_range(struct extent_io_tree *tree, u64 start, u64 end) +static int lock_range(struct extent_io_tree *tree, u64 start, u64 end) { unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long end_index = end >> PAGE_CACHE_SHIFT; @@ -1473,12 +1470,11 @@ failed: } return err; } -EXPORT_SYMBOL(lock_range); /* * helper function to unlock both pages and extents in the tree. */ -int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) +static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) { unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long end_index = end >> PAGE_CACHE_SHIFT; @@ -1493,7 +1489,7 @@ int unlock_range(struct extent_io_tree *tree, u64 start, u64 end) unlock_extent(tree, start, end, GFP_NOFS); return 0; } -EXPORT_SYMBOL(unlock_range); +#endif /* * set the private field for a given byte offset in the tree. If there isn't @@ -1956,7 +1952,7 @@ void set_page_extent_mapped(struct page *page) } EXPORT_SYMBOL(set_page_extent_mapped); -void set_page_extent_head(struct page *page, unsigned long len) +static void set_page_extent_head(struct page *page, unsigned long len) { set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); } @@ -2397,7 +2393,7 @@ update_nr_written: * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. */ -int extent_write_cache_pages(struct extent_io_tree *tree, +static int extent_write_cache_pages(struct extent_io_tree *tree, struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data, @@ -2502,7 +2498,6 @@ retry: } return ret; } -EXPORT_SYMBOL(extent_write_cache_pages); static noinline void flush_write_bio(void *data) { diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f4926c0f3c8..09462adfbe3 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -443,7 +443,8 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) mutex_unlock(&block_group->alloc_mutex); } -struct btrfs_free_space *btrfs_find_free_space_offset(struct +#if 0 +static struct btrfs_free_space *btrfs_find_free_space_offset(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) @@ -458,7 +459,7 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct return ret; } -struct btrfs_free_space *btrfs_find_free_space_bytes(struct +static struct btrfs_free_space *btrfs_find_free_space_bytes(struct btrfs_block_group_cache *block_group, u64 offset, u64 bytes) @@ -472,6 +473,7 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct return ret; } +#endif struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache *block_group, u64 offset, diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index d93451c66ba..3d46fa1f29a 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -20,7 +20,7 @@ #include "disk-io.h" #include "transaction.h" -int find_name_in_backref(struct btrfs_path *path, const char * name, +static int find_name_in_backref(struct btrfs_path *path, const char *name, int name_len, struct btrfs_inode_ref **ref_ret) { struct extent_buffer *leaf; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b3d4078b69a..bd58ba655a4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1130,7 +1130,7 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, * bytes in this file, and to maintain the list of inodes that * have pending delalloc work to be done. */ -int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, +static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { unsigned long flags; @@ -1151,7 +1151,7 @@ int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end, /* * extent_io.c clear_bit_hook, see set_bit_hook for why */ -int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, +static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, unsigned long old, unsigned long bits) { if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { @@ -1215,7 +1215,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * At IO completion time the cums attached on the ordered extent record * are inserted into the btree */ -int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, +static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -1234,7 +1234,7 @@ int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, * At IO completion time the cums attached on the ordered extent record * are inserted into the btree */ -int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, +static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -1245,7 +1245,7 @@ int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, * extent_io.c submission hook. This does the right thing for csum calculation on write, * or reading the csums from the tree before a read */ -int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, +static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; @@ -1313,7 +1313,7 @@ struct btrfs_writepage_fixup { struct btrfs_work work; }; -void btrfs_writepage_fixup_worker(struct btrfs_work *work) +static void btrfs_writepage_fixup_worker(struct btrfs_work *work) { struct btrfs_writepage_fixup *fixup; struct btrfs_ordered_extent *ordered; @@ -1372,7 +1372,7 @@ out_page: * to fix it up. The async helper will wait for ordered extents, set * the delalloc bit and make it safe to write the page. */ -int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) +static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) { struct inode *inode = page->mapping->host; struct btrfs_writepage_fixup *fixup; @@ -1526,7 +1526,7 @@ nocow: return 0; } -int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, +static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { return btrfs_finish_ordered_io(page->mapping->host, start, end); @@ -1548,7 +1548,7 @@ struct io_failure_record { int last_mirror; }; -int btrfs_io_failed_hook(struct bio *failed_bio, +static int btrfs_io_failed_hook(struct bio *failed_bio, struct page *page, u64 start, u64 end, struct extent_state *state) { @@ -1642,7 +1642,7 @@ int btrfs_io_failed_hook(struct bio *failed_bio, * each time an IO finishes, we do a fast check in the IO failure tree * to see if we need to process or clean up an io_failure_record */ -int btrfs_clean_io_failures(struct inode *inode, u64 start) +static int btrfs_clean_io_failures(struct inode *inode, u64 start) { u64 private; u64 private_failure; @@ -1675,7 +1675,7 @@ int btrfs_clean_io_failures(struct inode *inode, u64 start) * if there's a match, we allow the bio to finish. If not, we go through * the io_failure_record routines to find good copies */ -int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, +static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state) { size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); @@ -4362,8 +4362,8 @@ out: * Invalidate a single dcache entry at the root of the filesystem. * Needed after creation of snapshot or subvolume. */ -void btrfs_invalidate_dcache_root(struct inode *dir, char *name, - int namelen) +static void btrfs_invalidate_dcache_root(struct inode *dir, + char *name, int namelen) { struct dentry *alias, *entry; struct qstr qstr; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 35f650e183e..cc7c5161e26 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -354,7 +354,7 @@ out_unlock: } -int btrfs_defrag_file(struct file *file) +static int btrfs_defrag_file(struct file *file) { struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -649,7 +649,7 @@ static int btrfs_ioctl_defrag(struct file *file) return 0; } -long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg) +static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; int ret; @@ -671,7 +671,7 @@ out: return ret; } -long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) +static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; int ret; @@ -696,8 +696,8 @@ out: return ret; } -long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 off, - u64 olen, u64 destoff) +static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, + u64 off, u64 olen, u64 destoff) { struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -1035,7 +1035,7 @@ out_fput: return ret; } -long btrfs_ioctl_clone_range(struct file *file, unsigned long argptr) +static long btrfs_ioctl_clone_range(struct file *file, unsigned long argptr) { struct btrfs_ioctl_clone_range_args args; @@ -1051,7 +1051,7 @@ long btrfs_ioctl_clone_range(struct file *file, unsigned long argptr) * basically own the machine, and have a very in depth understanding * of all the possible deadlocks and enospc problems. */ -long btrfs_ioctl_trans_start(struct file *file) +static long btrfs_ioctl_trans_start(struct file *file) { struct inode *inode = fdentry(file)->d_inode; struct btrfs_root *root = BTRFS_I(inode)->root; diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index dbe20d4c6ea..f99335a999d 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -276,6 +276,7 @@ out: return ret; } +#if 0 /* this will get used when snapshot deletion is implemented */ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root, u64 root_id, u8 type, u64 ref_id) @@ -299,6 +300,7 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, btrfs_free_path(path); return ret; } +#endif int btrfs_find_root_ref(struct btrfs_root *tree_root, struct btrfs_path *path, diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 1975ea273dc..93a21c77064 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -647,7 +647,7 @@ static int btrfs_interface_init(void) return misc_register(&btrfs_misc); } -void btrfs_interface_exit(void) +static void btrfs_interface_exit(void) { if (misc_deregister(&btrfs_misc) < 0) printk("misc_deregister failed for control device"); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index be4fc30a30e..4fcfc8b1189 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -23,6 +23,7 @@ #include "locking.h" #include "print-tree.h" #include "compat.h" +#include "tree-log.h" /* magic values for the inode_only field in btrfs_log_inode: * @@ -78,7 +79,7 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans, * tree of log tree roots. This must be called with a tree log transaction * running (see start_log_trans). */ -int btrfs_add_log_tree(struct btrfs_trans_handle *trans, +static int btrfs_add_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_key key; @@ -1934,7 +1935,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, return ret; } -int wait_log_commit(struct btrfs_root *log) +static int wait_log_commit(struct btrfs_root *log) { DEFINE_WAIT(wait); u64 transid = log->fs_info->tree_log_transid; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 724ead54529..769f2c5d9e9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -238,7 +238,7 @@ done: return 0; } -void pending_bios_fn(struct btrfs_work *work) +static void pending_bios_fn(struct btrfs_work *work) { struct btrfs_device *device; @@ -686,7 +686,7 @@ error: return ret; } -int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, +static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 start) { @@ -1393,7 +1393,7 @@ error: goto out; } -int noinline btrfs_update_device(struct btrfs_trans_handle *trans, +static int noinline btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; @@ -1497,7 +1497,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, return 0; } -int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 +static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 chunk_offset) { struct btrfs_super_block *super_copy = &root->fs_info->super_copy; @@ -1543,7 +1543,7 @@ int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 return ret; } -int btrfs_relocate_chunk(struct btrfs_root *root, +static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset) { @@ -1884,7 +1884,7 @@ done: return ret; } -int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, +static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_chunk *chunk, int item_size) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 5b9f7002513..c4617cde6c7 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -30,6 +30,7 @@ #include #include #include +#include "compression.h" /* Plan: call deflate() with avail_in == *sourcelen, avail_out = *dstlen - 12 and flush == Z_FINISH. -- cgit v1.2.3-70-g09d2 From d20f7043fa65659136c1a7c3c456eeeb5c6f431f Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 8 Dec 2008 16:58:54 -0500 Subject: Btrfs: move data checksumming into a dedicated tree Btrfs stores checksums for each data block. Until now, they have been stored in the subvolume trees, indexed by the inode that is referencing the data block. This means that when we read the inode, we've probably read in at least some checksums as well. But, this has a few problems: * The checksums are indexed by logical offset in the file. When compression is on, this means we have to do the expensive checksumming on the uncompressed data. It would be faster if we could checksum the compressed data instead. * If we implement encryption, we'll be checksumming the plain text and storing that on disk. This is significantly less secure. * For either compression or encryption, we have to get the plain text back before we can verify the checksum as correct. This makes the raid layer balancing and extent moving much more expensive. * It makes the front end caching code more complex, as we have touch the subvolume and inodes as we cache extents. * There is potentitally one copy of the checksum in each subvolume referencing an extent. The solution used here is to store the extent checksums in a dedicated tree. This allows us to index the checksums by phyiscal extent start and length. It means: * The checksum is against the data stored on disk, after any compression or encryption is done. * The checksum is stored in a central location, and can be verified without following back references, or reading inodes. This makes compression significantly faster by reducing the amount of data that needs to be checksummed. It will also allow much faster raid management code in general. The checksums are indexed by a key with a fixed objectid (a magic value in ctree.h) and offset set to the starting byte of the extent. This allows us to copy the checksum items into the fsync log tree directly (or any other tree), without having to invent a second format for them. Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 124 ++++++++++++++++++++++++++++---- fs/btrfs/ctree.h | 30 +++++--- fs/btrfs/disk-io.c | 45 ++++++++++-- fs/btrfs/extent_io.c | 5 ++ fs/btrfs/file-item.c | 185 ++++++++++++++++++------------------------------ fs/btrfs/inode.c | 45 ++++++------ fs/btrfs/ioctl.c | 55 +------------- fs/btrfs/ordered-data.c | 7 +- fs/btrfs/ordered-data.h | 10 +-- fs/btrfs/tree-log.c | 121 +++++++++++++++++++++++++++---- fs/btrfs/volumes.c | 1 + 11 files changed, 387 insertions(+), 241 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 4febe2eb0b8..ad727413730 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -69,11 +69,27 @@ struct compressed_bio { /* IO errors */ int errors; + int mirror_num; /* for reads, this is the bio we are copying the data into */ struct bio *orig_bio; + + /* + * the start of a variable length array of checksums only + * used by reads + */ + u32 sums; }; +static inline int compressed_bio_size(struct btrfs_root *root, + unsigned long disk_size) +{ + u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); + return sizeof(struct compressed_bio) + + ((disk_size + root->sectorsize - 1) / root->sectorsize) * + csum_size; +} + static struct bio *compressed_bio_alloc(struct block_device *bdev, u64 first_byte, gfp_t gfp_flags) { @@ -96,6 +112,47 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev, return bio; } +static int check_compressed_csum(struct inode *inode, + struct compressed_bio *cb, + u64 disk_start) +{ + int ret; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct page *page; + unsigned long i; + char *kaddr; + u32 csum; + u32 *cb_sum = &cb->sums; + + if (btrfs_test_opt(root, NODATASUM) || + btrfs_test_flag(inode, NODATASUM)) + return 0; + + for (i = 0; i < cb->nr_pages; i++) { + page = cb->compressed_pages[i]; + csum = ~(u32)0; + + kaddr = kmap_atomic(page, KM_USER0); + csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); + btrfs_csum_final(csum, (char *)&csum); + kunmap_atomic(kaddr, KM_USER0); + + if (csum != *cb_sum) { + printk("btrfs csum failed ino %lu extent %llu csum %u " + "wanted %u mirror %d\n", inode->i_ino, + (unsigned long long)disk_start, + csum, *cb_sum, cb->mirror_num); + ret = -EIO; + goto fail; + } + cb_sum++; + + } + ret = 0; +fail: + return ret; +} + /* when we finish reading compressed pages from the disk, we * decompress them and then run the bio end_io routines on the * decompressed pages (in the inode address space). @@ -124,16 +181,21 @@ static void end_compressed_bio_read(struct bio *bio, int err) if (!atomic_dec_and_test(&cb->pending_bios)) goto out; + inode = cb->inode; + ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); + if (ret) + goto csum_failed; + /* ok, we're the last bio for this extent, lets start * the decompression. */ - inode = cb->inode; tree = &BTRFS_I(inode)->io_tree; ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, cb->start, cb->orig_bio->bi_io_vec, cb->orig_bio->bi_vcnt, cb->compressed_len); +csum_failed: if (ret) cb->errors = 1; @@ -148,8 +210,21 @@ static void end_compressed_bio_read(struct bio *bio, int err) /* do io completion on the original bio */ if (cb->errors) { bio_io_error(cb->orig_bio); - } else + } else { + int bio_index = 0; + struct bio_vec *bvec = cb->orig_bio->bi_io_vec; + + /* + * we have verified the checksum already, set page + * checked so the end_io handlers know about it + */ + while(bio_index < cb->orig_bio->bi_vcnt) { + SetPageChecked(bvec->bv_page); + bvec++; + bio_index++; + } bio_endio(cb->orig_bio, 0); + } /* finally free the cb struct */ kfree(cb->compressed_pages); @@ -277,12 +352,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, int ret; WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); - cb = kmalloc(sizeof(*cb), GFP_NOFS); + cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; cb->start = start; cb->len = len; + cb->mirror_num = 0; cb->compressed_pages = compressed_pages; cb->compressed_len = compressed_len; cb->orig_bio = NULL; @@ -290,9 +366,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; - ret = btrfs_csum_file_bytes(root, inode, start, len); - BUG_ON(ret); - bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; @@ -325,6 +398,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); + ret = btrfs_csum_one_bio(root, inode, bio, start, 1); + BUG_ON(ret); + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); @@ -348,6 +424,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); + ret = btrfs_csum_one_bio(root, inode, bio, start, 1); + BUG_ON(ret); + ret = btrfs_map_bio(root, WRITE, bio, 0, 1); BUG_ON(ret); @@ -510,6 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_start; struct extent_map *em; int ret; + u32 *sums; tree = &BTRFS_I(inode)->io_tree; em_tree = &BTRFS_I(inode)->extent_tree; @@ -521,15 +601,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, PAGE_CACHE_SIZE); spin_unlock(&em_tree->lock); - cb = kmalloc(sizeof(*cb), GFP_NOFS); + compressed_len = em->block_len; + cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; + cb->mirror_num = mirror_num; + sums = &cb->sums; cb->start = em->orig_start; - compressed_len = em->block_len; em_len = em->len; em_start = em->start; + free_extent_map(em); em = NULL; @@ -551,11 +634,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, add_ra_bio_pages(inode, em_start + em_len, cb); - if (!btrfs_test_opt(root, NODATASUM) && - !btrfs_test_flag(inode, NODATASUM)) { - btrfs_lookup_bio_sums(root, inode, cb->orig_bio); - } - /* include any pages we added in add_ra-bio_pages */ uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; cb->len = uncompressed_len; @@ -568,6 +646,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, for (page_index = 0; page_index < nr_pages; page_index++) { page = cb->compressed_pages[page_index]; page->mapping = inode->i_mapping; + page->index = em_start >> PAGE_CACHE_SHIFT; + if (comp_bio->bi_size) ret = tree->ops->merge_bio_hook(page, 0, PAGE_CACHE_SIZE, @@ -591,7 +671,16 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, */ atomic_inc(&cb->pending_bios); - ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, comp_bio, + sums); + } + sums += (comp_bio->bi_size + root->sectorsize - 1) / + root->sectorsize; + + ret = btrfs_map_bio(root, READ, comp_bio, + mirror_num, 0); BUG_ON(ret); bio_put(comp_bio); @@ -610,7 +699,12 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); - ret = btrfs_map_bio(root, READ, comp_bio, 0, 0); + if (!btrfs_test_opt(root, NODATASUM) && + !btrfs_test_flag(inode, NODATASUM)) { + btrfs_lookup_bio_sums(root, inode, comp_bio, sums); + } + + ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); BUG_ON(ret); bio_put(comp_bio); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 96f2ec7ad5b..242b961ae6d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -73,6 +73,9 @@ struct btrfs_ordered_sum; /* directory objectid inside the root tree */ #define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL +/* holds checksums of all the data extents */ +#define BTRFS_CSUM_TREE_OBJECTID 7ULL + /* orhpan objectid for tracking unlinked/truncated files */ #define BTRFS_ORPHAN_OBJECTID -5ULL @@ -84,6 +87,13 @@ struct btrfs_ordered_sum; #define BTRFS_TREE_RELOC_OBJECTID -8ULL #define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL +/* + * extent checksums all have this objectid + * this allows them to share the logging tree + * for fsyncs + */ +#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL + /* dummy objectid represents multiple objectids */ #define BTRFS_MULTIPLE_OBJECTIDS -255ULL @@ -634,6 +644,7 @@ struct btrfs_fs_info { struct btrfs_root *chunk_root; struct btrfs_root *dev_root; struct btrfs_root *fs_root; + struct btrfs_root *csum_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; @@ -716,6 +727,7 @@ struct btrfs_fs_info { struct btrfs_workers workers; struct btrfs_workers delalloc_workers; struct btrfs_workers endio_workers; + struct btrfs_workers endio_meta_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; /* @@ -858,13 +870,12 @@ struct btrfs_root { * extent data is for file data */ #define BTRFS_EXTENT_DATA_KEY 108 + /* - * csum items have the checksums for data in the extents + * extent csums are stored in a separate tree and hold csums for + * an entire extent on disk. */ -#define BTRFS_CSUM_ITEM_KEY 120 - - -/* reserve 21-31 for other file/dir stuff */ +#define BTRFS_EXTENT_CSUM_KEY 128 /* * root items point to tree roots. There are typically in the root @@ -1917,7 +1928,7 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root /* file-item.c */ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, - struct bio *bio); + struct bio *bio, u32 *dst); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, @@ -1929,17 +1940,16 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path, u64 objectid, u64 bytenr, int mod); int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_ordered_sum *sums); int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, - struct bio *bio); + struct bio *bio, u64 file_start, int contig); int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, u64 start, unsigned long len); struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, - int cow); + u64 bytenr, int cow); int btrfs_csum_truncate(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 isize); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3eb7c2576fe..61dc3b2c834 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -445,11 +445,18 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->error = err; end_io_wq->work.func = end_workqueue_fn; end_io_wq->work.flags = 0; - if (bio->bi_rw & (1 << BIO_RW)) + + if (bio->bi_rw & (1 << BIO_RW)) { btrfs_queue_worker(&fs_info->endio_write_workers, &end_io_wq->work); - else - btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work); + } else { + if (end_io_wq->metadata) + btrfs_queue_worker(&fs_info->endio_meta_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_workers, + &end_io_wq->work); + } } int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, @@ -1208,6 +1215,9 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) info = (struct btrfs_fs_info *)bdi->unplug_io_data; list_for_each(cur, &info->fs_devices->devices) { device = list_entry(cur, struct btrfs_device, dev_list); + if (!device->bdev) + continue; + bdi = blk_get_backing_dev_info(device->bdev); if (bdi->unplug_io_fn) { bdi->unplug_io_fn(bdi, page); @@ -1344,7 +1354,7 @@ static void end_workqueue_fn(struct btrfs_work *work) * blocksize <= pagesize, it is basically a noop */ if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { - btrfs_queue_worker(&fs_info->endio_workers, + btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); return; } @@ -1454,6 +1464,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct buffer_head *bh; struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); + struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), + GFP_NOFS); struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info), @@ -1470,7 +1482,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, struct btrfs_super_block *disk_super; if (!extent_root || !tree_root || !fs_info || - !chunk_root || !dev_root) { + !chunk_root || !dev_root || !csum_root) { err = -ENOMEM; goto fail; } @@ -1487,6 +1499,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; fs_info->extent_root = extent_root; + fs_info->csum_root = csum_root; fs_info->chunk_root = chunk_root; fs_info->dev_root = dev_root; fs_info->fs_devices = fs_devices; @@ -1652,6 +1665,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); btrfs_init_workers(&fs_info->endio_workers, "endio", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", + fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size); @@ -1667,6 +1682,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->delalloc_workers, 1); btrfs_start_workers(&fs_info->fixup_workers, 1); btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_meta_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); @@ -1751,6 +1768,13 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (ret) goto fail_extent_root; + ret = find_and_setup_root(tree_root, fs_info, + BTRFS_CSUM_TREE_OBJECTID, csum_root); + if (ret) + goto fail_extent_root; + + csum_root->track_dirty = 1; + btrfs_read_block_groups(extent_root); fs_info->generation = generation + 1; @@ -1761,7 +1785,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (!fs_info->cleaner_kthread) - goto fail_extent_root; + goto fail_csum_root; fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, @@ -1825,6 +1849,8 @@ fail_cleaner: filemap_write_and_wait(fs_info->btree_inode->i_mapping); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); +fail_csum_root: + free_extent_buffer(csum_root->node); fail_extent_root: free_extent_buffer(extent_root->node); fail_tree_root: @@ -1838,6 +1864,7 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: @@ -1853,6 +1880,7 @@ fail: kfree(fs_info); kfree(chunk_root); kfree(dev_root); + kfree(csum_root); return ERR_PTR(err); } @@ -2131,6 +2159,9 @@ int close_ctree(struct btrfs_root *root) if (root->fs_info->dev_root->node); free_extent_buffer(root->fs_info->dev_root->node); + if (root->fs_info->csum_root->node); + free_extent_buffer(root->fs_info->csum_root->node); + btrfs_free_block_groups(root->fs_info); del_fs_roots(fs_info); @@ -2141,6 +2172,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->delalloc_workers); btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); + btrfs_stop_workers(&fs_info->endio_meta_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); @@ -2163,6 +2195,7 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->tree_root); kfree(fs_info->chunk_root); kfree(fs_info->dev_root); + kfree(fs_info->csum_root); return 0; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c3dfe2a0ec8..7449ecf32c5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1732,6 +1732,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) int whole_page; int ret; + if (err) + uptodate = 0; + do { struct page *page = bvec->bv_page; tree = &BTRFS_I(page->mapping->host)->io_tree; @@ -1761,6 +1764,8 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (ret == 0) { uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + if (err) + uptodate = 0; continue; } } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 234ed441736..a3ad2ce0011 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -74,8 +74,7 @@ out: struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, - u64 objectid, u64 offset, - int cow) + u64 bytenr, int cow) { int ret; struct btrfs_key file_key; @@ -87,9 +86,9 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, btrfs_super_csum_size(&root->fs_info->super_copy); int csums_in_item; - file_key.objectid = objectid; - file_key.offset = offset; - btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); + file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; + file_key.offset = bytenr; + btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow); if (ret < 0) goto fail; @@ -100,11 +99,10 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, goto fail; path->slots[0]--; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (btrfs_key_type(&found_key) != BTRFS_CSUM_ITEM_KEY || - found_key.objectid != objectid) { + if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY) goto fail; - } - csum_offset = (offset - found_key.offset) >> + + csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); csums_in_item /= csum_size; @@ -143,7 +141,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, } int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, - struct bio *bio) + struct bio *bio, u32 *dst) { u32 sum; struct bio_vec *bvec = bio->bi_io_vec; @@ -151,6 +149,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, u64 offset; u64 item_start_offset = 0; u64 item_last_offset = 0; + u64 disk_bytenr; u32 diff; u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); @@ -165,21 +164,22 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, WARN_ON(bio->bi_vcnt <= 0); + disk_bytenr = (u64)bio->bi_sector << 9; while(bio_index < bio->bi_vcnt) { offset = page_offset(bvec->bv_page) + bvec->bv_offset; - ret = btrfs_find_ordered_sum(inode, offset, &sum); + ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) goto found; - if (!item || offset < item_start_offset || - offset >= item_last_offset) { + if (!item || disk_bytenr < item_start_offset || + disk_bytenr >= item_last_offset) { struct btrfs_key found_key; u32 item_size; if (item) btrfs_release_path(root, path); - item = btrfs_lookup_csum(NULL, root, path, - inode->i_ino, offset, 0); + item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, + path, disk_bytenr, 0); if (IS_ERR(item)) { ret = PTR_ERR(item); if (ret == -ENOENT || ret == -EFBIG) @@ -208,7 +208,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, * this byte range must be able to fit inside * a single leaf so it will also fit inside a u32 */ - diff = offset - item_start_offset; + diff = disk_bytenr - item_start_offset; diff = diff / root->sectorsize; diff = diff * csum_size; @@ -216,7 +216,11 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, ((unsigned long)item) + diff, csum_size); found: - set_state_private(io_tree, offset, sum); + if (dst) + *dst++ = sum; + else + set_state_private(io_tree, offset, sum); + disk_bytenr += bvec->bv_len; bio_index++; bvec++; } @@ -224,75 +228,8 @@ found: return 0; } -int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, - u64 start, unsigned long len) -{ - struct btrfs_ordered_sum *sums; - struct btrfs_sector_sum *sector_sum; - struct btrfs_ordered_extent *ordered; - char *data; - struct page *page; - unsigned long total_bytes = 0; - unsigned long this_sum_bytes = 0; - - sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); - if (!sums) - return -ENOMEM; - - sector_sum = sums->sums; - sums->file_offset = start; - sums->len = len; - INIT_LIST_HEAD(&sums->list); - ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); - BUG_ON(!ordered); - - while(len > 0) { - if (start >= ordered->file_offset + ordered->len || - start < ordered->file_offset) { - sums->len = this_sum_bytes; - this_sum_bytes = 0; - btrfs_add_ordered_sum(inode, ordered, sums); - btrfs_put_ordered_extent(ordered); - - sums = kzalloc(btrfs_ordered_sum_size(root, len), - GFP_NOFS); - BUG_ON(!sums); - sector_sum = sums->sums; - sums->len = len; - sums->file_offset = start; - ordered = btrfs_lookup_ordered_extent(inode, - sums->file_offset); - BUG_ON(!ordered); - } - - page = find_get_page(inode->i_mapping, - start >> PAGE_CACHE_SHIFT); - - data = kmap_atomic(page, KM_USER0); - sector_sum->sum = ~(u32)0; - sector_sum->sum = btrfs_csum_data(root, data, sector_sum->sum, - PAGE_CACHE_SIZE); - kunmap_atomic(data, KM_USER0); - btrfs_csum_final(sector_sum->sum, - (char *)§or_sum->sum); - sector_sum->offset = page_offset(page); - page_cache_release(page); - - sector_sum++; - total_bytes += PAGE_CACHE_SIZE; - this_sum_bytes += PAGE_CACHE_SIZE; - start += PAGE_CACHE_SIZE; - - WARN_ON(len < PAGE_CACHE_SIZE); - len -= PAGE_CACHE_SIZE; - } - btrfs_add_ordered_sum(inode, ordered, sums); - btrfs_put_ordered_extent(ordered); - return 0; -} - int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, - struct bio *bio) + struct bio *bio, u64 file_start, int contig) { struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; @@ -303,6 +240,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, unsigned long total_bytes = 0; unsigned long this_sum_bytes = 0; u64 offset; + u64 disk_bytenr; WARN_ON(bio->bi_vcnt <= 0); sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); @@ -310,16 +248,25 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, return -ENOMEM; sector_sum = sums->sums; - sums->file_offset = page_offset(bvec->bv_page) + bvec->bv_offset; + disk_bytenr = (u64)bio->bi_sector << 9; sums->len = bio->bi_size; INIT_LIST_HEAD(&sums->list); - ordered = btrfs_lookup_ordered_extent(inode, sums->file_offset); + + if (contig) + offset = file_start; + else + offset = page_offset(bvec->bv_page) + bvec->bv_offset; + + ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); + sums->bytenr = ordered->start; while(bio_index < bio->bi_vcnt) { - offset = page_offset(bvec->bv_page) + bvec->bv_offset; - if (offset >= ordered->file_offset + ordered->len || - offset < ordered->file_offset) { + if (!contig) + offset = page_offset(bvec->bv_page) + bvec->bv_offset; + + if (!contig && (offset >= ordered->file_offset + ordered->len || + offset < ordered->file_offset)) { unsigned long bytes_left; sums->len = this_sum_bytes; this_sum_bytes = 0; @@ -333,10 +280,9 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, BUG_ON(!sums); sector_sum = sums->sums; sums->len = bytes_left; - sums->file_offset = offset; - ordered = btrfs_lookup_ordered_extent(inode, - sums->file_offset); + ordered = btrfs_lookup_ordered_extent(inode, offset); BUG_ON(!ordered); + sums->bytenr = ordered->start; } data = kmap_atomic(bvec->bv_page, KM_USER0); @@ -348,13 +294,14 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, kunmap_atomic(data, KM_USER0); btrfs_csum_final(sector_sum->sum, (char *)§or_sum->sum); - sector_sum->offset = page_offset(bvec->bv_page) + - bvec->bv_offset; + sector_sum->bytenr = disk_bytenr; sector_sum++; bio_index++; total_bytes += bvec->bv_len; this_sum_bytes += bvec->bv_len; + disk_bytenr += bvec->bv_len; + offset += bvec->bv_len; bvec++; } this_sum_bytes = 0; @@ -364,11 +311,10 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, } int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, struct btrfs_ordered_sum *sums) { - u64 objectid = inode->i_ino; - u64 offset; + u64 bytenr; int ret; struct btrfs_key file_key; struct btrfs_key found_key; @@ -396,13 +342,12 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, again: next_offset = (u64)-1; found_next = 0; - offset = sector_sum->offset; - file_key.objectid = objectid; - file_key.offset = offset; - btrfs_set_key_type(&file_key, BTRFS_CSUM_ITEM_KEY); + file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; + file_key.offset = sector_sum->bytenr; + bytenr = sector_sum->bytenr; + btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY); - mutex_lock(&BTRFS_I(inode)->csum_mutex); - item = btrfs_lookup_csum(trans, root, path, objectid, offset, 1); + item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1); if (!IS_ERR(item)) { leaf = path->nodes[0]; ret = 0; @@ -432,8 +377,8 @@ again: slot = 0; } btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); - if (found_key.objectid != objectid || - found_key.type != BTRFS_CSUM_ITEM_KEY) { + if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || + found_key.type != BTRFS_EXTENT_CSUM_KEY) { found_next = 1; goto insert; } @@ -460,10 +405,10 @@ again: path->slots[0]--; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - csum_offset = (offset - found_key.offset) >> + csum_offset = (bytenr - found_key.offset) >> root->fs_info->sb->s_blocksize_bits; - if (btrfs_key_type(&found_key) != BTRFS_CSUM_ITEM_KEY || - found_key.objectid != objectid || + if (btrfs_key_type(&found_key) != BTRFS_EXTENT_CSUM_KEY || + found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) { goto insert; } @@ -482,8 +427,18 @@ insert: btrfs_release_path(root, path); csum_offset = 0; if (found_next) { - u64 tmp = min((u64)i_size_read(inode), next_offset); - tmp -= offset & ~((u64)root->sectorsize -1); + u64 tmp = total_bytes + root->sectorsize; + u64 next_sector = sector_sum->bytenr; + struct btrfs_sector_sum *next = sector_sum + 1; + + while(tmp < sums->len) { + if (next_sector + root->sectorsize != next->bytenr) + break; + tmp += root->sectorsize; + next_sector = next->bytenr; + next++; + } + tmp = min(tmp, next_offset - file_key.offset); tmp >>= root->fs_info->sb->s_blocksize_bits; tmp = max((u64)1, tmp); tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size)); @@ -510,7 +465,6 @@ found: item_end = (struct btrfs_csum_item *)((unsigned char *)item_end + btrfs_item_size_nr(leaf, path->slots[0])); eb_token = NULL; - mutex_unlock(&BTRFS_I(inode)->csum_mutex); cond_resched(); next_sector: @@ -541,9 +495,9 @@ next_sector: if (total_bytes < sums->len) { item = (struct btrfs_csum_item *)((char *)item + csum_size); - if (item < item_end && offset + PAGE_CACHE_SIZE == - sector_sum->offset) { - offset = sector_sum->offset; + if (item < item_end && bytenr + PAGE_CACHE_SIZE == + sector_sum->bytenr) { + bytenr = sector_sum->bytenr; goto next_sector; } } @@ -562,7 +516,6 @@ out: return ret; fail_unlock: - mutex_unlock(&BTRFS_I(inode)->csum_mutex); goto out; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 09efc9473a3..c03d847b8c4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1221,7 +1221,7 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; - ret = btrfs_csum_one_bio(root, inode, bio); + ret = btrfs_csum_one_bio(root, inode, bio, 0, 0); BUG_ON(ret); return 0; } @@ -1259,12 +1259,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, btrfs_test_flag(inode, NODATASUM); if (!(rw & (1 << BIO_RW))) { - - if (bio_flags & EXTENT_BIO_COMPRESSED) + if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, mirror_num, bio_flags); - else if (!skip_sum) - btrfs_lookup_bio_sums(root, inode, bio); + } else if (!skip_sum) + btrfs_lookup_bio_sums(root, inode, bio, NULL); goto mapit; } else if (!skip_sum) { /* we're doing a write, do the async checksumming */ @@ -1292,8 +1291,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, btrfs_set_trans_block_group(trans, inode); list_for_each(cur, list) { sum = list_entry(cur, struct btrfs_ordered_sum, list); - btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root, - inode, sum); + btrfs_csum_file_blocks(trans, + BTRFS_I(inode)->root->fs_info->csum_root, sum); } return 0; } @@ -1545,6 +1544,7 @@ struct io_failure_record { u64 start; u64 len; u64 logical; + unsigned long bio_flags; int last_mirror; }; @@ -1563,7 +1563,6 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, int ret; int rw; u64 logical; - unsigned long bio_flags = 0; ret = get_state_private(failure_tree, start, &private); if (ret) { @@ -1573,6 +1572,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, failrec->start = start; failrec->len = end - start + 1; failrec->last_mirror = 0; + failrec->bio_flags = 0; spin_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, failrec->len); @@ -1588,8 +1588,10 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, } logical = start - em->start; logical = em->block_start + logical; - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) - bio_flags = EXTENT_BIO_COMPRESSED; + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { + logical = em->block_start; + failrec->bio_flags = EXTENT_BIO_COMPRESSED; + } failrec->logical = logical; free_extent_map(em); set_extent_bits(failure_tree, start, end, EXTENT_LOCKED | @@ -1626,6 +1628,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, bio->bi_sector = failrec->logical >> 9; bio->bi_bdev = failed_bio->bi_bdev; bio->bi_size = 0; + bio_add_page(bio, page, failrec->len, start - page_offset(page)); if (failed_bio->bi_rw & (1 << BIO_RW)) rw = WRITE; @@ -1634,7 +1637,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio, failrec->last_mirror, - bio_flags); + failrec->bio_flags); return 0; } @@ -1688,9 +1691,14 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, u32 csum = ~(u32)0; unsigned long flags; + if (PageChecked(page)) { + ClearPageChecked(page); + goto good; + } if (btrfs_test_opt(root, NODATASUM) || btrfs_test_flag(inode, NODATASUM)) return 0; + if (state && state->start == start) { private = state->private; ret = 0; @@ -1709,7 +1717,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); - +good: /* if the io failure tree for this inode is non-empty, * check to see if we've recovered from a failed IO */ @@ -2243,6 +2251,7 @@ fail: return err; } +#if 0 /* * when truncating bytes in a file, it is possible to avoid reading * the leaves that contain only checksum items. This can be the @@ -2410,6 +2419,8 @@ out: return ret; } +#endif + /* * this can truncate away extent items, csum items and directory items. * It starts at a high offset and removes keys until it can't find @@ -2459,9 +2470,6 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, btrfs_init_path(path); - ret = drop_csum_leaves(trans, root, path, inode, new_size); - BUG_ON(ret); - search_again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) { @@ -2509,16 +2517,11 @@ search_again: } item_end--; } - if (found_type == BTRFS_CSUM_ITEM_KEY) { - ret = btrfs_csum_truncate(trans, root, path, - new_size); - BUG_ON(ret); - } if (item_end < new_size) { if (found_type == BTRFS_DIR_ITEM_KEY) { found_type = BTRFS_INODE_ITEM_KEY; } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { - found_type = BTRFS_CSUM_ITEM_KEY; + found_type = BTRFS_EXTENT_DATA_KEY; } else if (found_type == BTRFS_EXTENT_DATA_KEY) { found_type = BTRFS_XATTR_ITEM_KEY; } else if (found_type == BTRFS_XATTR_ITEM_KEY) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b4da53d55c8..6228b69c2b9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -714,8 +714,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, u64 len = olen; u64 bs = root->fs_info->sb->s_blocksize; u64 hint_byte; - u16 csum_size = - btrfs_super_csum_size(&root->fs_info->super_copy); + /* * TODO: * - split compressed inline extents. annoying: we need to @@ -833,7 +832,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); - if (btrfs_key_type(&key) > BTRFS_CSUM_ITEM_KEY || + if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || key.objectid != src->i_ino) break; @@ -958,56 +957,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_mark_buffer_dirty(leaf); } - if (btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) { - u32 size; - struct btrfs_key new_key; - u64 coverslen; - int coff, clen; - - size = btrfs_item_size_nr(leaf, slot); - coverslen = (size / csum_size) << - root->fs_info->sb->s_blocksize_bits; - printk("csums for %llu~%llu\n", - key.offset, coverslen); - if (key.offset + coverslen < off || - key.offset >= off+len) - goto next; - - read_extent_buffer(leaf, buf, - btrfs_item_ptr_offset(leaf, slot), - size); - btrfs_release_path(root, path); - - coff = 0; - if (off > key.offset) - coff = ((off - key.offset) >> - root->fs_info->sb->s_blocksize_bits) * - csum_size; - clen = size - coff; - if (key.offset + coverslen > off+len) - clen -= ((key.offset+coverslen-off-len) >> - root->fs_info->sb->s_blocksize_bits) * - csum_size; - printk(" will dup %d~%d of %d\n", - coff, clen, size); - - memcpy(&new_key, &key, sizeof(new_key)); - new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; - - ret = btrfs_insert_empty_item(trans, root, path, - &new_key, clen); - if (ret) - goto out; - - leaf = path->nodes[0]; - slot = path->slots[0]; - write_extent_buffer(leaf, buf + coff, - btrfs_item_ptr_offset(leaf, slot), - clen); - btrfs_mark_buffer_dirty(leaf); - } - next: btrfs_release_path(root, path); key.offset++; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 027ad6b3839..d9e232227da 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -610,7 +610,8 @@ out: * try to find a checksum. This is used because we allow pages to * be reclaimed before their checksum is actually put into the btree */ -int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, + u32 *sum) { struct btrfs_ordered_sum *ordered_sum; struct btrfs_sector_sum *sector_sums; @@ -629,11 +630,11 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum) mutex_lock(&tree->mutex); list_for_each_prev(cur, &ordered->list) { ordered_sum = list_entry(cur, struct btrfs_ordered_sum, list); - if (offset >= ordered_sum->file_offset) { + if (disk_bytenr >= ordered_sum->bytenr) { num_sectors = ordered_sum->len / sectorsize; sector_sums = ordered_sum->sums; for (i = 0; i < num_sectors; i++) { - if (sector_sums[i].offset == offset) { + if (sector_sums[i].bytenr == disk_bytenr) { *sum = sector_sums[i].sum; ret = 0; goto out; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 260bf95dfe0..ab66d5e8d6d 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -33,15 +33,17 @@ struct btrfs_ordered_inode_tree { * the ordered extent are on disk */ struct btrfs_sector_sum { - u64 offset; + /* bytenr on disk */ + u64 bytenr; u32 sum; }; struct btrfs_ordered_sum { - u64 file_offset; + /* bytenr is the start of this extent on disk */ + u64 bytenr; + /* * this is the length in bytes covered by the sums array below. - * But, the sums array may not be contiguous in the file. */ unsigned long len; struct list_head list; @@ -147,7 +149,7 @@ struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); int btrfs_ordered_update_i_size(struct inode *inode, struct btrfs_ordered_extent *ordered); -int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u32 *sum); +int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); int btrfs_wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c766649ad45..08469ec0585 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -934,24 +934,17 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, unsigned long file_bytes; struct btrfs_ordered_sum *sums; struct btrfs_sector_sum *sector_sum; - struct inode *inode; unsigned long ptr; file_bytes = (item_size / csum_size) * root->sectorsize; - inode = read_one_inode(root, key->objectid); - if (!inode) { - return -EIO; - } - sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS); if (!sums) { - iput(inode); return -ENOMEM; } INIT_LIST_HEAD(&sums->list); sums->len = file_bytes; - sums->file_offset = key->offset; + sums->bytenr = key->offset; /* * copy all the sums into the ordered sum struct @@ -960,7 +953,7 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, cur_offset = key->offset; ptr = btrfs_item_ptr_offset(eb, slot); while(item_size > 0) { - sector_sum->offset = cur_offset; + sector_sum->bytenr = cur_offset; read_extent_buffer(eb, §or_sum->sum, ptr, csum_size); sector_sum++; item_size -= csum_size; @@ -969,11 +962,9 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, } /* let btrfs_csum_file_blocks add them into the file */ - ret = btrfs_csum_file_blocks(trans, root, inode, sums); + ret = btrfs_csum_file_blocks(trans, root->fs_info->csum_root, sums); BUG_ON(ret); kfree(sums); - iput(inode); - return 0; } /* @@ -1670,7 +1661,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, ret = replay_one_extent(wc->trans, root, path, eb, i, &key); BUG_ON(ret); - } else if (key.type == BTRFS_CSUM_ITEM_KEY) { + } else if (key.type == BTRFS_EXTENT_CSUM_KEY) { ret = replay_one_csum(wc->trans, root, path, eb, i, &key); BUG_ON(ret); @@ -2466,6 +2457,85 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, return 0; } +static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, + struct list_head *list, + struct btrfs_root *root, + u64 disk_bytenr, u64 len) +{ + struct btrfs_ordered_sum *sums; + struct btrfs_sector_sum *sector_sum; + int ret; + struct btrfs_path *path; + struct btrfs_csum_item *item = NULL; + u64 end = disk_bytenr + len; + u64 item_start_offset = 0; + u64 item_last_offset = 0; + u32 diff; + u32 sum; + u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy); + + sums = kzalloc(btrfs_ordered_sum_size(root, len), GFP_NOFS); + + sector_sum = sums->sums; + sums->bytenr = disk_bytenr; + sums->len = len; + list_add_tail(&sums->list, list); + + path = btrfs_alloc_path(); + while(disk_bytenr < end) { + if (!item || disk_bytenr < item_start_offset || + disk_bytenr >= item_last_offset) { + struct btrfs_key found_key; + u32 item_size; + + if (item) + btrfs_release_path(root, path); + item = btrfs_lookup_csum(NULL, root, path, + disk_bytenr, 0); + if (IS_ERR(item)) { + ret = PTR_ERR(item); + if (ret == -ENOENT || ret == -EFBIG) + ret = 0; + sum = 0; + printk("log no csum found for byte %llu\n", + (unsigned long long)disk_bytenr); + item = NULL; + btrfs_release_path(root, path); + goto found; + } + btrfs_item_key_to_cpu(path->nodes[0], &found_key, + path->slots[0]); + + item_start_offset = found_key.offset; + item_size = btrfs_item_size_nr(path->nodes[0], + path->slots[0]); + item_last_offset = item_start_offset + + (item_size / csum_size) * + root->sectorsize; + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_csum_item); + } + /* + * this byte range must be able to fit inside + * a single leaf so it will also fit inside a u32 + */ + diff = disk_bytenr - item_start_offset; + diff = diff / root->sectorsize; + diff = diff * csum_size; + + read_extent_buffer(path->nodes[0], &sum, + ((unsigned long)item) + diff, + csum_size); +found: + sector_sum->bytenr = disk_bytenr; + sector_sum->sum = sum; + disk_bytenr += root->sectorsize; + sector_sum++; + } + btrfs_free_path(path); + return 0; +} + static noinline int copy_items(struct btrfs_trans_handle *trans, struct btrfs_root *log, struct btrfs_path *dst_path, @@ -2481,6 +2551,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, u32 *ins_sizes; char *ins_data; int i; + struct list_head ordered_sums; + + INIT_LIST_HEAD(&ordered_sums); ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); @@ -2535,6 +2608,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, extent); u64 dl = btrfs_file_extent_disk_num_bytes(src, extent); + u64 cs = btrfs_file_extent_offset(src, extent); + u64 cl = btrfs_file_extent_num_bytes(src, + extent);; /* ds == 0 is a hole */ if (ds != 0) { ret = btrfs_inc_extent_ref(trans, log, @@ -2544,6 +2620,11 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, trans->transid, ins_keys[i].objectid); BUG_ON(ret); + ret = copy_extent_csums(trans, + &ordered_sums, + log->fs_info->csum_root, + ds + cs, cl); + BUG_ON(ret); } } } @@ -2553,6 +2634,20 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(dst_path->nodes[0]); btrfs_release_path(log, dst_path); kfree(ins_data); + + /* + * we have to do this after the loop above to avoid changing the + * log tree while trying to change the log tree. + */ + while(!list_empty(&ordered_sums)) { + struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, + struct btrfs_ordered_sum, + list); + ret = btrfs_csum_file_blocks(trans, log, sums); + BUG_ON(ret); + list_del(&sums->list); + kfree(sums); + } return 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6c523b3360f..2049d179ccd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2771,6 +2771,7 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root, device->work.func = pending_bios_fn; fs_devices->num_devices++; spin_lock_init(&device->io_lock); + INIT_LIST_HEAD(&device->dev_alloc_list); memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); return device; } -- cgit v1.2.3-70-g09d2 From 934d375bacf9ea8a37fbfff5f3cf1c093f324095 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 8 Dec 2008 16:43:10 -0500 Subject: Btrfs: Use map_private_extent_buffer during generic_bin_search It is possible that generic_bin_search will be called on a tree block that has not been locked. This happens because cache_block_block skips locking on the tree blocks. Since the tree block isn't locked, we aren't allowed to change the extent_buffer->map_token field. Using map_private_extent_buffer avoids any changes to the internal extent buffer fields. Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 5 ++++- fs/btrfs/extent_io.c | 1 + fs/btrfs/volumes.c | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index a83cbdf1d8c..19c0dd33b1e 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -813,7 +813,8 @@ static noinline int generic_bin_search(struct extent_buffer *eb, unmap_extent_buffer(eb, map_token, KM_USER0); map_token = NULL; } - err = map_extent_buffer(eb, offset, + + err = map_private_extent_buffer(eb, offset, sizeof(struct btrfs_disk_key), &map_token, &kaddr, &map_start, &map_len, KM_USER0); @@ -3585,6 +3586,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, int level; int ret = 1; + WARN_ON(!path->keep_locks); again: cur = btrfs_lock_root_node(root); level = btrfs_header_level(cur); @@ -3708,6 +3710,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, int slot; struct extent_buffer *c; + WARN_ON(!path->keep_locks); while(level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7449ecf32c5..607f5ff2791 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3471,6 +3471,7 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start, unmap_extent_buffer(eb, eb->map_token, km); eb->map_token = NULL; save = 1; + WARN_ON(!mutex_is_locked(&eb->mutex)); } err = map_private_extent_buffer(eb, start, min_len, token, map, map_start, map_len, km); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a79b3cc09e9..825364fae69 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2594,12 +2594,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, stripe_nr = stripe_nr * map->num_stripes + i; } bytenr = chunk_start + stripe_nr * map->stripe_len; + WARN_ON(nr >= map->num_stripes); for (j = 0; j < nr; j++) { if (buf[j] == bytenr) break; } - if (j == nr) + if (j == nr) { + WARN_ON(nr >= map->num_stripes); buf[nr++] = bytenr; + } } for (i = 0; i > nr; i++) { -- cgit v1.2.3-70-g09d2 From cad321ad529400c6ab24c501a67c3be720a0744c Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 17 Dec 2008 14:51:42 -0500 Subject: Btrfs: shift all end_io work to thread pools bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 39 ++++++++++++++++++++++++++------------- fs/btrfs/extent_io.c | 51 ++++++++++++++++++++++++--------------------------- fs/btrfs/inode.c | 12 ++++++------ 4 files changed, 57 insertions(+), 46 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b89999de456..ccea0648e10 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -758,6 +758,7 @@ struct btrfs_fs_info { struct btrfs_workers delalloc_workers; struct btrfs_workers endio_workers; struct btrfs_workers endio_meta_workers; + struct btrfs_workers endio_meta_write_workers; struct btrfs_workers endio_write_workers; struct btrfs_workers submit_workers; /* diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 541a8279ac7..04f8d7080b1 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -447,8 +447,12 @@ static void end_workqueue_bio(struct bio *bio, int err) end_io_wq->work.flags = 0; if (bio->bi_rw & (1 << BIO_RW)) { - btrfs_queue_worker(&fs_info->endio_write_workers, - &end_io_wq->work); + if (end_io_wq->metadata) + btrfs_queue_worker(&fs_info->endio_meta_write_workers, + &end_io_wq->work); + else + btrfs_queue_worker(&fs_info->endio_write_workers, + &end_io_wq->work); } else { if (end_io_wq->metadata) btrfs_queue_worker(&fs_info->endio_meta_workers, @@ -624,23 +628,24 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) { - /* - * kthread helpers are used to submit writes so that checksumming - * can happen in parallel across all CPUs - */ + int ret; + + ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, + bio, 1); + BUG_ON(ret); + if (!(rw & (1 << BIO_RW))) { - int ret; /* * called for a read, do the setup so that checksum validation * can happen in the async kernel threads */ - ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info, - bio, 1); - BUG_ON(ret); - return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 0); } + /* + * kthread helpers are used to submit writes so that checksumming + * can happen in parallel across all CPUs + */ return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, inode, rw, bio, mirror_num, 0, __btree_submit_bio_start, @@ -1350,12 +1355,13 @@ static void end_workqueue_fn(struct btrfs_work *work) bio = end_io_wq->bio; fs_info = end_io_wq->info; - /* metadata bios are special because the whole tree block must + /* metadata bio reads are special because the whole tree block must * be checksummed at once. This makes sure the entire block is in * ram and up to date before trying to verify things. For * blocksize <= pagesize, it is basically a noop */ - if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { + if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && + !bio_ready_for_csum(bio)) { btrfs_queue_worker(&fs_info->endio_meta_workers, &end_io_wq->work); return; @@ -1668,6 +1674,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", fs_info->thread_pool_size); + btrfs_init_workers(&fs_info->endio_meta_write_workers, + "endio-meta-write", fs_info->thread_pool_size); btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", fs_info->thread_pool_size); @@ -1677,6 +1685,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, */ fs_info->endio_workers.idle_thresh = 4; fs_info->endio_write_workers.idle_thresh = 64; + fs_info->endio_meta_write_workers.idle_thresh = 64; btrfs_start_workers(&fs_info->workers, 1); btrfs_start_workers(&fs_info->submit_workers, 1); @@ -1685,6 +1694,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_meta_workers, fs_info->thread_pool_size); + btrfs_start_workers(&fs_info->endio_meta_write_workers, + fs_info->thread_pool_size); btrfs_start_workers(&fs_info->endio_write_workers, fs_info->thread_pool_size); @@ -1866,6 +1877,7 @@ fail_sb_buffer: btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_meta_workers); + btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); fail_iput: @@ -2253,6 +2265,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->workers); btrfs_stop_workers(&fs_info->endio_workers); btrfs_stop_workers(&fs_info->endio_meta_workers); + btrfs_stop_workers(&fs_info->endio_meta_write_workers); btrfs_stop_workers(&fs_info->endio_write_workers); btrfs_stop_workers(&fs_info->submit_workers); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 607f5ff2791..25ce2d18e5b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -477,7 +477,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state *state; struct extent_state *prealloc = NULL; struct rb_node *node; - unsigned long flags; int err; int set = 0; @@ -488,7 +487,7 @@ again: return -ENOMEM; } - spin_lock_irqsave(&tree->lock, flags); + spin_lock(&tree->lock); /* * this search will find the extents that end after * our range starts @@ -559,7 +558,7 @@ again: goto search_again; out: - spin_unlock_irqrestore(&tree->lock, flags); + spin_unlock(&tree->lock); if (prealloc) free_extent_state(prealloc); @@ -568,7 +567,7 @@ out: search_again: if (start > end) goto out; - spin_unlock_irqrestore(&tree->lock, flags); + spin_unlock(&tree->lock); if (mask & __GFP_WAIT) cond_resched(); goto again; @@ -582,9 +581,9 @@ static int wait_on_state(struct extent_io_tree *tree, { DEFINE_WAIT(wait); prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); schedule(); - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); finish_wait(&state->wq, &wait); return 0; } @@ -599,7 +598,7 @@ int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits) struct extent_state *state; struct rb_node *node; - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); again: while (1) { /* @@ -628,13 +627,13 @@ again: break; if (need_resched()) { - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); cond_resched(); - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); } } out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return 0; } EXPORT_SYMBOL(wait_extent_bit); @@ -668,7 +667,6 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int b struct extent_state *state; struct extent_state *prealloc = NULL; struct rb_node *node; - unsigned long flags; int err = 0; int set; u64 last_start; @@ -680,7 +678,7 @@ again: return -ENOMEM; } - spin_lock_irqsave(&tree->lock, flags); + spin_lock(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -800,7 +798,7 @@ again: goto search_again; out: - spin_unlock_irqrestore(&tree->lock, flags); + spin_unlock(&tree->lock); if (prealloc) free_extent_state(prealloc); @@ -809,7 +807,7 @@ out: search_again: if (start > end) goto out; - spin_unlock_irqrestore(&tree->lock, flags); + spin_unlock(&tree->lock); if (mask & __GFP_WAIT) cond_resched(); goto again; @@ -1021,7 +1019,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, struct extent_state *state; int ret = 1; - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -1044,7 +1042,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, break; } out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return ret; } EXPORT_SYMBOL(find_first_extent_bit); @@ -1097,7 +1095,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, u64 found = 0; u64 total_bytes = 0; - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); /* * this search will find all the extents that end after @@ -1134,7 +1132,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, break; } out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return found; } @@ -1391,7 +1389,7 @@ u64 count_range_bits(struct extent_io_tree *tree, return 0; } - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); if (cur_start == 0 && bits == EXTENT_DIRTY) { total_bytes = tree->dirty_bytes; goto out; @@ -1424,7 +1422,7 @@ u64 count_range_bits(struct extent_io_tree *tree, break; } out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return total_bytes; } @@ -1501,7 +1499,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) struct extent_state *state; int ret = 0; - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -1518,7 +1516,7 @@ int set_state_private(struct extent_io_tree *tree, u64 start, u64 private) } state->private = private; out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return ret; } @@ -1528,7 +1526,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) struct extent_state *state; int ret = 0; - spin_lock_irq(&tree->lock); + spin_lock(&tree->lock); /* * this search will find all the extents that end after * our range starts. @@ -1545,7 +1543,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private) } *private = state->private; out: - spin_unlock_irq(&tree->lock); + spin_unlock(&tree->lock); return ret; } @@ -1561,9 +1559,8 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state *state = NULL; struct rb_node *node; int bitset = 0; - unsigned long flags; - spin_lock_irqsave(&tree->lock, flags); + spin_lock(&tree->lock); node = tree_search(tree, start); while (node && start <= end) { state = rb_entry(node, struct extent_state, rb_node); @@ -1594,7 +1591,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, break; } } - spin_unlock_irqrestore(&tree->lock, flags); + spin_unlock(&tree->lock); return bitset; } EXPORT_SYMBOL(test_range_bit); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 0577e77e661..068bad46338 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1282,8 +1282,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, } /* - * extent_io.c submission hook. This does the right thing for csum calculation on write, - * or reading the csums from the tree before a read + * extent_io.c submission hook. This does the right thing for csum calculation + * on write, or reading the csums from the tree before a read */ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int mirror_num, unsigned long bio_flags) @@ -1292,11 +1292,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, int ret = 0; int skip_sum; + skip_sum = btrfs_test_flag(inode, NODATASUM); + ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); BUG_ON(ret); - skip_sum = btrfs_test_flag(inode, NODATASUM); - if (!(rw & (1 << BIO_RW))) { if (bio_flags & EXTENT_BIO_COMPRESSED) { return btrfs_submit_compressed_read(inode, bio, @@ -1648,13 +1648,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, failrec->logical, failrec->len); failrec->last_mirror++; if (!state) { - spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); + spin_lock(&BTRFS_I(inode)->io_tree.lock); state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, failrec->start, EXTENT_LOCKED); if (state && state->start != failrec->start) state = NULL; - spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); + spin_unlock(&BTRFS_I(inode)->io_tree.lock); } if (!state || failrec->last_mirror > num_copies) { set_state_private(failure_tree, failrec->start, 0); -- cgit v1.2.3-70-g09d2 From c584482b47f47b051cdc1d5236b99ad18f1b1cfb Mon Sep 17 00:00:00 2001 From: Liu Hui Date: Mon, 5 Jan 2009 15:49:55 -0500 Subject: Btrfs: Fix typo in clear_state_cb In clear_state_cb, we should check 'tree->ops->clear_bit_hook' instead of 'tree->ops->set_bit_hook'. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 25ce2d18e5b..0bf7684207a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -340,7 +340,7 @@ static void clear_state_cb(struct extent_io_tree *tree, struct extent_state *state, unsigned long bits) { - if (tree->ops && tree->ops->set_bit_hook) { + if (tree->ops && tree->ops->clear_bit_hook) { tree->ops->clear_bit_hook(tree->mapping->host, state->start, state->end, state->state, bits); } -- cgit v1.2.3-70-g09d2 From d397712bcc6a759a560fd247e6053ecae091f958 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 5 Jan 2009 21:25:51 -0500 Subject: Btrfs: Fix checkpatch.pl warnings There were many, most are fixed now. struct-funcs.c generates some warnings but these are bogus. Signed-off-by: Chris Mason --- fs/btrfs/acl.c | 5 +- fs/btrfs/async-thread.c | 6 +- fs/btrfs/compat.h | 4 +- fs/btrfs/compression.c | 12 +-- fs/btrfs/ctree.c | 217 ++++++++++++++++-------------------------- fs/btrfs/ctree.h | 30 +++--- fs/btrfs/dir-item.c | 2 +- fs/btrfs/disk-io.c | 187 +++++++++++++------------------------ fs/btrfs/export.c | 8 +- fs/btrfs/extent-tree.c | 223 +++++++++++++++++++++++--------------------- fs/btrfs/extent_io.c | 213 ++++++++++++++++++++---------------------- fs/btrfs/extent_map.c | 14 +-- fs/btrfs/file-item.c | 18 ++-- fs/btrfs/file.c | 49 +++++----- fs/btrfs/free-space-cache.c | 37 ++++---- fs/btrfs/inode-map.c | 1 - fs/btrfs/inode.c | 173 +++++++++++++++++----------------- fs/btrfs/ioctl.c | 37 ++++---- fs/btrfs/locking.c | 5 +- fs/btrfs/ordered-data.c | 34 +++---- fs/btrfs/print-tree.c | 73 +++++++++------ fs/btrfs/ref-cache.c | 12 +-- fs/btrfs/root-tree.c | 17 ++-- fs/btrfs/struct-funcs.c | 4 +- fs/btrfs/super.c | 25 +++-- fs/btrfs/sysfs.c | 6 +- fs/btrfs/transaction.c | 45 ++++----- fs/btrfs/transaction.h | 6 +- fs/btrfs/tree-defrag.c | 9 +- fs/btrfs/tree-log.c | 70 +++++++------- fs/btrfs/volumes.c | 78 ++++++++-------- fs/btrfs/xattr.c | 3 +- fs/btrfs/zlib.c | 45 ++++----- 33 files changed, 770 insertions(+), 898 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 867eaf1f8ef..1d53b62dbba 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -161,8 +161,7 @@ static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) ret = __btrfs_setxattr(inode, name, value, size, 0); out: - if (value) - kfree(value); + kfree(value); if (!ret) btrfs_update_cached_acl(inode, p_acl, acl); @@ -213,7 +212,7 @@ static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name, } static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name, - const void *value, size_t size, int flags) + const void *value, size_t size, int flags) { return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size); } diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 4229450b759..8e2fec05dbe 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c @@ -104,7 +104,7 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, spin_lock_irqsave(&workers->lock, flags); - while(!list_empty(&workers->order_list)) { + while (!list_empty(&workers->order_list)) { work = list_entry(workers->order_list.next, struct btrfs_work, order_list); @@ -143,7 +143,7 @@ static int worker_loop(void *arg) struct btrfs_work *work; do { spin_lock_irq(&worker->lock); - while(!list_empty(&worker->pending)) { + while (!list_empty(&worker->pending)) { cur = worker->pending.next; work = list_entry(cur, struct btrfs_work, list); list_del(&work->list); @@ -188,7 +188,7 @@ int btrfs_stop_workers(struct btrfs_workers *workers) struct btrfs_worker_thread *worker; list_splice_init(&workers->idle_list, &workers->worker_list); - while(!list_empty(&workers->worker_list)) { + while (!list_empty(&workers->worker_list)) { cur = workers->worker_list.next; worker = list_entry(cur, struct btrfs_worker_thread, worker_list); diff --git a/fs/btrfs/compat.h b/fs/btrfs/compat.h index 75e4426d6fb..594d60bdd3c 100644 --- a/fs/btrfs/compat.h +++ b/fs/btrfs/compat.h @@ -4,7 +4,7 @@ #define btrfs_drop_nlink(inode) drop_nlink(inode) #define btrfs_inc_nlink(inode) inc_nlink(inode) -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27) +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) static inline struct dentry *d_obtain_alias(struct inode *inode) { struct dentry *d; @@ -21,7 +21,7 @@ static inline struct dentry *d_obtain_alias(struct inode *inode) } #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) # define __pagevec_lru_add_file __pagevec_lru_add # define open_bdev_exclusive open_bdev_excl # define close_bdev_exclusive(bdev, mode) close_bdev_excl(bdev) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 2436163d543..ee848d8585d 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -137,7 +137,8 @@ static int check_compressed_csum(struct inode *inode, kunmap_atomic(kaddr, KM_USER0); if (csum != *cb_sum) { - printk("btrfs csum failed ino %lu extent %llu csum %u " + printk(KERN_INFO "btrfs csum failed ino %lu " + "extent %llu csum %u " "wanted %u mirror %d\n", inode->i_ino, (unsigned long long)disk_start, csum, *cb_sum, cb->mirror_num); @@ -217,7 +218,7 @@ csum_failed: * we have verified the checksum already, set page * checked so the end_io handlers know about it */ - while(bio_index < cb->orig_bio->bi_vcnt) { + while (bio_index < cb->orig_bio->bi_vcnt) { SetPageChecked(bvec->bv_page); bvec++; bio_index++; @@ -246,7 +247,7 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start, int i; int ret; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -463,7 +464,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; pagevec_init(&pvec, 0); - while(last_offset < compressed_end) { + while (last_offset < compressed_end) { page_index = last_offset >> PAGE_CACHE_SHIFT; if (page_index > end_index) @@ -697,9 +698,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); BUG_ON(ret); - if (!btrfs_test_flag(inode, NODATASUM)) { + if (!btrfs_test_flag(inode, NODATASUM)) btrfs_lookup_bio_sums(root, inode, comp_bio, sums); - } ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); BUG_ON(ret); diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 7fad2e3ad6f..9e46c077681 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -67,7 +67,7 @@ void btrfs_free_path(struct btrfs_path *p) * * It is safe to call this on paths that no locks or extent buffers held. */ -void noinline btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) +noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) { int i; @@ -112,7 +112,7 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; - while(1) { + while (1) { eb = btrfs_root_node(root); btrfs_tree_lock(eb); @@ -202,22 +202,22 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, } /* - * does the dirty work in cow of a single block. The parent block - * (if supplied) is updated to point to the new cow copy. The new - * buffer is marked dirty and returned locked. If you modify the block - * it needs to be marked dirty again. + * does the dirty work in cow of a single block. The parent block (if + * supplied) is updated to point to the new cow copy. The new buffer is marked + * dirty and returned locked. If you modify the block it needs to be marked + * dirty again. * * search_start -- an allocation hint for the new block * - * empty_size -- a hint that you plan on doing more cow. This is the size in bytes - * the allocator should try to find free next to the block it returns. This is - * just a hint and may be ignored by the allocator. + * empty_size -- a hint that you plan on doing more cow. This is the size in + * bytes the allocator should try to find free next to the block it returns. + * This is just a hint and may be ignored by the allocator. * * prealloc_dest -- if you have already reserved a destination for the cow, - * this uses that block instead of allocating a new one. btrfs_alloc_reserved_extent - * is used to finish the allocation. + * this uses that block instead of allocating a new one. + * btrfs_alloc_reserved_extent is used to finish the allocation. */ -static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, +static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, @@ -366,7 +366,7 @@ static int noinline __btrfs_cow_block(struct btrfs_trans_handle *trans, * This version of it has extra checks so that a block isn't cow'd more than * once per transaction, as long as it hasn't been written yet */ -int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, +noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret, u64 prealloc_dest) @@ -375,13 +375,16 @@ int noinline btrfs_cow_block(struct btrfs_trans_handle *trans, int ret; if (trans->transaction != root->fs_info->running_transaction) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, + printk(KERN_CRIT "trans %llu running %llu\n", + (unsigned long long)trans->transid, + (unsigned long long) root->fs_info->running_transaction->transid); WARN_ON(1); } if (trans->transid != root->fs_info->generation) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->generation); + printk(KERN_CRIT "trans %llu running %llu\n", + (unsigned long long)trans->transid, + (unsigned long long)root->fs_info->generation); WARN_ON(1); } @@ -489,16 +492,10 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, if (cache_only && parent_level != 1) return 0; - if (trans->transaction != root->fs_info->running_transaction) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->running_transaction->transid); + if (trans->transaction != root->fs_info->running_transaction) WARN_ON(1); - } - if (trans->transid != root->fs_info->generation) { - printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid, - root->fs_info->generation); + if (trans->transid != root->fs_info->generation) WARN_ON(1); - } parent_nritems = btrfs_header_nritems(parent); blocksize = btrfs_level_size(root, parent_level - 1); @@ -681,51 +678,18 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, BUG_ON(btrfs_node_blockptr(parent, parent_slot) != btrfs_header_bytenr(leaf)); } -#if 0 - for (i = 0; nritems > 1 && i < nritems - 2; i++) { - btrfs_item_key_to_cpu(leaf, &cpukey, i + 1); - btrfs_item_key(leaf, &leaf_key, i); - if (comp_keys(&leaf_key, &cpukey) >= 0) { - btrfs_print_leaf(root, leaf); - printk("slot %d offset bad key\n", i); - BUG_ON(1); - } - if (btrfs_item_offset_nr(leaf, i) != - btrfs_item_end_nr(leaf, i + 1)) { - btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", i); - BUG_ON(1); - } - if (i == 0) { - if (btrfs_item_offset_nr(leaf, i) + - btrfs_item_size_nr(leaf, i) != - BTRFS_LEAF_DATA_SIZE(root)) { - btrfs_print_leaf(root, leaf); - printk("slot %d first offset bad\n", i); - BUG_ON(1); - } - } - } - if (nritems > 0) { - if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) { - btrfs_print_leaf(root, leaf); - printk("slot %d bad size \n", nritems - 1); - BUG_ON(1); - } - } -#endif if (slot != 0 && slot < nritems - 1) { btrfs_item_key(leaf, &leaf_key, slot); btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1); if (comp_keys(&leaf_key, &cpukey) <= 0) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad key\n", slot); + printk(KERN_CRIT "slot %d offset bad key\n", slot); BUG_ON(1); } if (btrfs_item_offset_nr(leaf, slot - 1) != btrfs_item_end_nr(leaf, slot)) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", slot); + printk(KERN_CRIT "slot %d offset bad\n", slot); BUG_ON(1); } } @@ -736,7 +700,7 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, if (btrfs_item_offset_nr(leaf, slot) != btrfs_item_end_nr(leaf, slot + 1)) { btrfs_print_leaf(root, leaf); - printk("slot %d offset bad\n", slot); + printk(KERN_CRIT "slot %d offset bad\n", slot); BUG_ON(1); } } @@ -745,30 +709,10 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, return 0; } -static int noinline check_block(struct btrfs_root *root, +static noinline int check_block(struct btrfs_root *root, struct btrfs_path *path, int level) { - u64 found_start; return 0; - if (btrfs_header_level(path->nodes[level]) != level) - printk("warning: bad level %Lu wanted %d found %d\n", - path->nodes[level]->start, level, - btrfs_header_level(path->nodes[level])); - found_start = btrfs_header_bytenr(path->nodes[level]); - if (found_start != path->nodes[level]->start) { - printk("warning: bad bytentr %Lu found %Lu\n", - path->nodes[level]->start, found_start); - } -#if 0 - struct extent_buffer *buf = path->nodes[level]; - - if (memcmp_extent_buffer(buf, root->fs_info->fsid, - (unsigned long)btrfs_header_fsid(buf), - BTRFS_FSID_SIZE)) { - printk("warning bad block %Lu\n", buf->start); - return 1; - } -#endif if (level == 0) return check_leaf(root, path, level); return check_node(root, path, level); @@ -802,7 +746,7 @@ static noinline int generic_bin_search(struct extent_buffer *eb, unsigned long map_len = 0; int err; - while(low < high) { + while (low < high) { mid = (low + high) / 2; offset = p + mid * item_size; @@ -1130,7 +1074,7 @@ enospc: * when they are completely full. This is also done top down, so we * have to be pessimistic. */ -static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, +static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -1296,7 +1240,7 @@ static noinline void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; - while(1) { + while (1) { if (direction < 0) { if (nr == 0) break; @@ -1322,7 +1266,8 @@ static noinline void reada_for_search(struct btrfs_root *root, nscan++; if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32)) break; - if(nread > (256 * 1024) || nscan > 128) + + if (nread > (256 * 1024) || nscan > 128) break; if (search < lowest_read) @@ -1333,17 +1278,17 @@ static noinline void reada_for_search(struct btrfs_root *root, } /* - * when we walk down the tree, it is usually safe to unlock the higher layers in - * the tree. The exceptions are when our path goes through slot 0, because operations - * on the tree might require changing key pointers higher up in the tree. + * when we walk down the tree, it is usually safe to unlock the higher layers + * in the tree. The exceptions are when our path goes through slot 0, because + * operations on the tree might require changing key pointers higher up in the + * tree. * - * callers might also have set path->keep_locks, which tells this code to - * keep the lock if the path points to the last slot in the block. This is - * part of walking through the tree, and selecting the next slot in the higher - * block. + * callers might also have set path->keep_locks, which tells this code to keep + * the lock if the path points to the last slot in the block. This is part of + * walking through the tree, and selecting the next slot in the higher block. * - * lowest_unlock sets the lowest level in the tree we're allowed to unlock. - * so if lowest_unlock is 1, level 0 won't be unlocked + * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so + * if lowest_unlock is 1, level 0 won't be unlocked */ static noinline void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) @@ -1832,9 +1777,8 @@ static int push_node_left(struct btrfs_trans_handle *trans, if (!empty && src_nritems <= 8) return 1; - if (push_items <= 0) { + if (push_items <= 0) return 1; - } if (empty) { push_items = min(src_nritems, push_items); @@ -1854,7 +1798,7 @@ static int push_node_left(struct btrfs_trans_handle *trans, copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), btrfs_node_key_ptr_offset(0), - push_items * sizeof(struct btrfs_key_ptr)); + push_items * sizeof(struct btrfs_key_ptr)); if (push_items < src_nritems) { memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), @@ -1899,19 +1843,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans, src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; - if (push_items <= 0) { + if (push_items <= 0) return 1; - } - if (src_nritems < 4) { + if (src_nritems < 4) return 1; - } max_push = src_nritems / 2 + 1; /* don't try to empty the node */ - if (max_push >= src_nritems) { + if (max_push >= src_nritems) return 1; - } if (max_push < push_items) push_items = max_push; @@ -1924,7 +1865,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(0), btrfs_node_key_ptr_offset(src_nritems - push_items), - push_items * sizeof(struct btrfs_key_ptr)); + push_items * sizeof(struct btrfs_key_ptr)); btrfs_set_header_nritems(src, src_nritems - push_items); btrfs_set_header_nritems(dst, dst_nritems + push_items); @@ -1945,7 +1886,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, * * returns zero on success or < 0 on failure. */ -static int noinline insert_new_root(struct btrfs_trans_handle *trans, +static noinline int insert_new_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level) { @@ -2176,14 +2117,15 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr) * the start of the leaf data. IOW, how much room * the leaf has left for both items and data */ -int noinline btrfs_leaf_free_space(struct btrfs_root *root, +noinline int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf) { int nritems = btrfs_header_nritems(leaf); int ret; ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems); if (ret < 0) { - printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n", + printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, " + "used %d nritems %d\n", ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root), leaf_space_used(leaf, 0, nritems), nritems); } @@ -2219,9 +2161,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root int ret; slot = path->slots[1]; - if (!path->nodes[1]) { + if (!path->nodes[1]) return 1; - } + upper = path->nodes[1]; if (slot >= btrfs_header_nritems(upper) - 1) return 1; @@ -2418,9 +2360,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root return 1; right_nritems = btrfs_header_nritems(right); - if (right_nritems == 0) { + if (right_nritems == 0) return 1; - } WARN_ON(!btrfs_tree_locked(path->nodes[1])); @@ -2502,7 +2443,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root push_items * sizeof(struct btrfs_item)); push_space = BTRFS_LEAF_DATA_SIZE(root) - - btrfs_item_offset_nr(right, push_items -1); + btrfs_item_offset_nr(right, push_items - 1); copy_extent_buffer(left, right, btrfs_leaf_data(left) + leaf_data_end(root, left) - push_space, @@ -2537,7 +2478,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root /* fixup right node */ if (push_items > right_nritems) { - printk("push items %d nr %u\n", push_items, right_nritems); + printk(KERN_CRIT "push items %d nr %u\n", push_items, + right_nritems); WARN_ON(1); } @@ -2640,9 +2582,8 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, /* first try to make some room by pushing left and right */ if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) { wret = push_leaf_right(trans, root, path, data_size, 0); - if (wret < 0) { + if (wret < 0) return wret; - } if (wret) { wret = push_leaf_left(trans, root, path, data_size, 0); if (wret < 0) @@ -2665,7 +2606,7 @@ again: l = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(l); - mid = (nritems + 1)/ 2; + mid = (nritems + 1) / 2; right = btrfs_alloc_free_block(trans, root, root->leafsize, path->nodes[1]->start, @@ -2734,7 +2675,7 @@ again: path->slots[0] = 0; if (path->slots[1] == 0) { wret = fixup_low_keys(trans, root, - path, &disk_key, 1); + path, &disk_key, 1); if (wret) ret = wret; } @@ -3033,8 +2974,8 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, BTRFS_FILE_EXTENT_INLINE) { ptr = btrfs_item_ptr_offset(leaf, slot); memmove_extent_buffer(leaf, ptr, - (unsigned long)fi, - offsetof(struct btrfs_file_extent_item, + (unsigned long)fi, + offsetof(struct btrfs_file_extent_item, disk_bytenr)); } } @@ -3096,7 +3037,8 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, BUG_ON(slot < 0); if (slot >= nritems) { btrfs_print_leaf(root, leaf); - printk("slot %d too large, nritems %d\n", slot, nritems); + printk(KERN_CRIT "slot %d too large, nritems %d\n", + slot, nritems); BUG_ON(1); } @@ -3218,7 +3160,7 @@ int btrfs_insert_some_items(struct btrfs_trans_handle *trans, if (old_data < data_end) { btrfs_print_leaf(root, leaf); - printk("slot %d old_data %d data_end %d\n", + printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } @@ -3317,9 +3259,8 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, unsigned int data_end; struct btrfs_disk_key disk_key; - for (i = 0; i < nr; i++) { + for (i = 0; i < nr; i++) total_data += data_size[i]; - } total_size = total_data + (nr * sizeof(struct btrfs_item)); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); @@ -3336,7 +3277,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, if (btrfs_leaf_free_space(root, leaf) < total_size) { btrfs_print_leaf(root, leaf); - printk("not enough freespace need %u have %d\n", + printk(KERN_CRIT "not enough freespace need %u have %d\n", total_size, btrfs_leaf_free_space(root, leaf)); BUG(); } @@ -3349,7 +3290,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, if (old_data < data_end) { btrfs_print_leaf(root, leaf); - printk("slot %d old_data %d data_end %d\n", + printk(KERN_CRIT "slot %d old_data %d data_end %d\n", slot, old_data, data_end); BUG_ON(1); } @@ -3457,7 +3398,7 @@ static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, int wret; nritems = btrfs_header_nritems(parent); - if (slot != nritems -1) { + if (slot != nritems - 1) { memmove_extent_buffer(parent, btrfs_node_key_ptr_offset(slot), btrfs_node_key_ptr_offset(slot + 1), @@ -3614,7 +3555,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (btrfs_header_nritems(leaf) == 0) { path->slots[1] = slot; - ret = btrfs_del_leaf(trans, root, path, leaf->start); + ret = btrfs_del_leaf(trans, root, path, + leaf->start); BUG_ON(ret); free_extent_buffer(leaf); } else { @@ -3717,7 +3659,7 @@ again: ret = 1; goto out; } - while(1) { + while (1) { nritems = btrfs_header_nritems(cur); level = btrfs_header_level(cur); sret = bin_search(cur, min_key, level, &slot); @@ -3738,7 +3680,7 @@ again: * min_trans parameters. If it isn't in cache or is too * old, skip to the next one. */ - while(slot < nritems) { + while (slot < nritems) { u64 blockptr; u64 gen; struct extent_buffer *tmp; @@ -3830,7 +3772,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, struct extent_buffer *c; WARN_ON(!path->keep_locks); - while(level < BTRFS_MAX_LEVEL) { + while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; @@ -3839,9 +3781,8 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, next: if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) { + if (level == BTRFS_MAX_LEVEL) return 1; - } continue; } if (level == 0) @@ -3889,9 +3830,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) int ret; nritems = btrfs_header_nritems(path->nodes[0]); - if (nritems == 0) { + if (nritems == 0) return 1; - } btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); @@ -3915,7 +3855,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) goto done; } - while(level < BTRFS_MAX_LEVEL) { + while (level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) return 1; @@ -3923,9 +3863,8 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) { + if (level == BTRFS_MAX_LEVEL) return 1; - } continue; } @@ -3946,7 +3885,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) break; } path->slots[level] = slot; - while(1) { + while (1) { level--; c = path->nodes[level]; if (path->locks[level]) @@ -3986,7 +3925,7 @@ int btrfs_previous_item(struct btrfs_root *root, u32 nritems; int ret; - while(1) { + while (1) { if (path->slots[0] == 0) { ret = btrfs_prev_leaf(root, path); if (ret != 0) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ccea0648e10..eee060f8811 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -126,7 +126,6 @@ struct btrfs_ordered_sum; static int btrfs_csum_sizes[] = { 4, 0 }; /* four bytes for CRC32 */ -//#define BTRFS_CRC32_SIZE 4 #define BTRFS_EMPTY_DIR_SIZE 0 #define BTRFS_FT_UNKNOWN 0 @@ -283,8 +282,8 @@ struct btrfs_header { } __attribute__ ((__packed__)); #define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \ - sizeof(struct btrfs_header)) / \ - sizeof(struct btrfs_key_ptr)) + sizeof(struct btrfs_header)) / \ + sizeof(struct btrfs_key_ptr)) #define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header)) #define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->leafsize)) #define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \ @@ -1512,7 +1511,7 @@ static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) static inline int btrfs_is_leaf(struct extent_buffer *eb) { - return (btrfs_header_level(eb) == 0); + return btrfs_header_level(eb) == 0; } /* struct btrfs_root_item */ @@ -1597,8 +1596,8 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) /* struct btrfs_file_extent_item */ BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); -static inline unsigned long btrfs_file_extent_inline_start(struct - btrfs_file_extent_item *e) +static inline unsigned long +btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) { unsigned long offset = (unsigned long)e; offset += offsetof(struct btrfs_file_extent_item, disk_bytenr); @@ -1660,20 +1659,20 @@ static inline int btrfs_set_root_name(struct btrfs_root *root, const char *name, int len) { /* if we already have a name just free it */ - if (root->name) - kfree(root->name); + kfree(root->name); root->name = kmalloc(len+1, GFP_KERNEL); if (!root->name) return -ENOMEM; memcpy(root->name, name, len); - root->name[len] ='\0'; + root->name[len] = '\0'; return 0; } -static inline u32 btrfs_level_size(struct btrfs_root *root, int level) { +static inline u32 btrfs_level_size(struct btrfs_root *root, int level) +{ if (level == 0) return root->leafsize; return root->nodesize; @@ -1707,9 +1706,9 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, int btrfs_extent_post_op(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); -struct btrfs_block_group_cache *btrfs_lookup_block_group(struct - btrfs_fs_info *info, - u64 bytenr); +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, + u64 bytenr); u64 btrfs_find_block_group(struct btrfs_root *root, u64 search_start, u64 search_hint, int owner); struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans, @@ -1908,8 +1907,9 @@ int btrfs_search_root(struct btrfs_root *root, u64 search_start, int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest_root); /* dir-item.c */ -int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root - *root, const char *name, int name_len, u64 dir, +int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, + struct btrfs_root *root, const char *name, + int name_len, u64 dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 5040b71f190..926a0b287a7 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -333,7 +333,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); total_len = btrfs_item_size_nr(leaf, path->slots[0]); - while(cur < total_len) { + while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index dae25e78a6b..81a313874ae 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -23,7 +23,7 @@ #include #include #include -#include // for block_sync_page +#include #include #include #include @@ -40,19 +40,6 @@ #include "ref-cache.h" #include "tree-log.h" -#if 0 -static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf) -{ - if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) { - printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n", - (unsigned long long)extent_buffer_blocknr(buf), - (unsigned long long)btrfs_header_blocknr(buf)); - return 1; - } - return 0; -} -#endif - static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); @@ -128,23 +115,13 @@ static struct extent_map *btree_get_extent(struct inode *inode, u64 failed_start = em->start; u64 failed_len = em->len; - printk("failed to insert %Lu %Lu -> %Lu into tree\n", - em->start, em->len, em->block_start); free_extent_map(em); em = lookup_extent_mapping(em_tree, start, len); if (em) { - printk("after failing, found %Lu %Lu %Lu\n", - em->start, em->len, em->block_start); ret = 0; } else { em = lookup_extent_mapping(em_tree, failed_start, failed_len); - if (em) { - printk("double failure lookup gives us " - "%Lu %Lu -> %Lu\n", em->start, - em->len, em->block_start); - free_extent_map(em); - } ret = -EIO; } } else if (ret) { @@ -191,15 +168,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, unsigned long inline_result; len = buf->len - offset; - while(len > 0) { + while (len > 0) { err = map_private_extent_buffer(buf, offset, 32, &map_token, &kaddr, &map_start, &map_len, KM_USER0); - if (err) { - printk("failed to map extent buffer! %lu\n", - offset); + if (err) return 1; - } cur_len = min(len, map_len - (offset - map_start)); crc = btrfs_csum_data(root, kaddr + offset - map_start, crc, cur_len); @@ -218,15 +192,14 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, btrfs_csum_final(crc, result); if (verify) { - /* FIXME, this is not good */ if (memcmp_extent_buffer(buf, result, 0, csum_size)) { u32 val; u32 found = 0; memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - printk("btrfs: %s checksum verify failed on %llu " - "wanted %X found %X level %d\n", + printk(KERN_INFO "btrfs: %s checksum verify failed " + "on %llu wanted %X found %X level %d\n", root->fs_info->sb->s_id, buf->start, val, found, btrfs_header_level(buf)); if (result != (char *)&inline_result) @@ -293,7 +266,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; -printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); + num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) @@ -307,9 +280,10 @@ printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror } /* - * checksum a dirty tree block before IO. This has extra checks to make - * sure we only fill in the checksum field in the first page of a multi-page block + * checksum a dirty tree block before IO. This has extra checks to make sure + * we only fill in the checksum field in the first page of a multi-page block */ + static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) { struct extent_io_tree *tree; @@ -327,28 +301,22 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) if (!page->private) goto out; len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n", - start, found_start, len); WARN_ON(1); goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); WARN_ON(1); goto err; } if (!PageUptodate(page)) { - printk("csum not up to date page %lu\n", page->index); WARN_ON(1); goto err; } @@ -396,29 +364,30 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, goto out; if (!page->private) goto out; + len = page->private >> 2; - if (len == 0) { - WARN_ON(1); - } + WARN_ON(len == 0); + eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); found_start = btrfs_header_bytenr(eb); if (found_start != start) { - printk("bad tree block start %llu %llu\n", + printk(KERN_INFO "btrfs bad tree block start %llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } if (eb->first_page != page) { - printk("bad first page %lu %lu\n", eb->first_page->index, - page->index); + printk(KERN_INFO "btrfs bad first page %lu %lu\n", + eb->first_page->index, page->index); WARN_ON(1); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { - printk("bad fsid on block %Lu\n", eb->start); + printk(KERN_INFO "btrfs bad fsid on block %llu\n", + (unsigned long long)eb->start); ret = -EIO; goto err; } @@ -578,7 +547,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, HZ/10); } #endif - while(atomic_read(&fs_info->async_submit_draining) && + while (atomic_read(&fs_info->async_submit_draining) && atomic_read(&fs_info->nr_async_submits)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0)); @@ -594,7 +563,7 @@ static int btree_csum_one_bio(struct bio *bio) struct btrfs_root *root; WARN_ON(bio->bi_vcnt <= 0); - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { root = BTRFS_I(bvec->bv_page->mapping->host)->root; csum_dirty_buffer(root, bvec->bv_page); bio_index++; @@ -680,9 +649,8 @@ static int btree_writepages(struct address_space *mapping, num_dirty = count_range_bits(tree, &start, (u64)-1, thresh, EXTENT_DIRTY); - if (num_dirty < thresh) { + if (num_dirty < thresh) return 0; - } } return extent_writepages(tree, mapping, btree_get_extent, wbc); } @@ -701,15 +669,14 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags) int ret; if (PageWriteback(page) || PageDirty(page)) - return 0; + return 0; tree = &BTRFS_I(page->mapping->host)->io_tree; map = &BTRFS_I(page->mapping->host)->extent_tree; ret = try_release_extent_state(map, tree, page, gfp_flags); - if (!ret) { + if (!ret) return 0; - } ret = try_release_extent_buffer(tree, page); if (ret == 1) { @@ -728,8 +695,8 @@ static void btree_invalidatepage(struct page *page, unsigned long offset) extent_invalidatepage(tree, page, offset); btree_releasepage(page, GFP_NOFS); if (PagePrivate(page)) { - printk("warning page private not zero on page %Lu\n", - page_offset(page)); + printk(KERN_WARNING "btrfs warning page private not zero " + "on page %llu\n", (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); page_cache_release(page); @@ -813,7 +780,7 @@ int btrfs_write_tree_block(struct extent_buffer *buf) int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) { return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, - buf->start, buf->start + buf->len -1); + buf->start, buf->start + buf->len - 1); } struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, @@ -832,11 +799,10 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { + if (ret == 0) buf->flags |= EXTENT_UPTODATE; - } else { + else WARN_ON(1); - } return buf; } @@ -944,7 +910,7 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, if (!log_root_tree) return 0; - while(1) { + while (1) { ret = find_first_extent_bit(&log_root_tree->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -1165,24 +1131,6 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, root->in_sysfs = 1; return root; } -#if 0 -static int add_hasher(struct btrfs_fs_info *info, char *type) { - struct btrfs_hasher *hasher; - - hasher = kmalloc(sizeof(*hasher), GFP_NOFS); - if (!hasher) - return -ENOMEM; - hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC); - if (!hasher->hash_tfm) { - kfree(hasher); - return -EINVAL; - } - spin_lock(&info->hash_lock); - list_add(&hasher->list, &info->hashers); - spin_unlock(&info->hash_lock); - return 0; -} -#endif static int btrfs_congested_fn(void *congested_data, int bdi_bits) { @@ -1226,9 +1174,8 @@ static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page) continue; bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) { + if (bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, page); - } } } @@ -1420,8 +1367,9 @@ static int transaction_kthread(void *arg) mutex_lock(&root->fs_info->transaction_kthread_mutex); if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) { - printk("btrfs: total reference cache size %Lu\n", - root->fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: total reference cache " + "size %llu\n", + root->fs_info->total_ref_cache_size); } mutex_lock(&root->fs_info->trans_mutex); @@ -1592,14 +1540,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, atomic_set(&fs_info->tree_log_writers, 0); fs_info->tree_log_transid = 0; -#if 0 - ret = add_hasher(fs_info, "crc32c"); - if (ret) { - printk("btrfs: failed hash setup, modprobe cryptomgr?\n"); - err = -ENOMEM; - goto fail_iput; - } -#endif __setup_root(4096, 4096, 4096, 4096, tree_root, fs_info, BTRFS_ROOT_TREE_OBJECTID); @@ -1720,7 +1660,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, sizeof(disk_super->magic))) { - printk("btrfs: valid FS not found on %s\n", sb->s_id); + printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } @@ -1728,8 +1668,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk("btrfs: failed to read the system array on %s\n", - sb->s_id); + printk(KERN_WARNING "btrfs: failed to read the system " + "array on %s\n", sb->s_id); goto fail_sys_array; } @@ -1746,14 +1686,15 @@ struct btrfs_root *open_ctree(struct super_block *sb, BUG_ON(!chunk_root->node); read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, - (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), - BTRFS_UUID_SIZE); + (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node), + BTRFS_UUID_SIZE); mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_chunk_tree(chunk_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk("btrfs: failed to read chunk tree on %s\n", sb->s_id); + printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n", + sb->s_id); goto fail_chunk_root; } @@ -1812,7 +1753,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { - printk("Btrfs log replay required on RO media\n"); + printk(KERN_WARNING "Btrfs log replay required " + "on RO media\n"); err = -EIO; goto fail_trans_kthread; } @@ -2097,7 +2039,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } @@ -2114,7 +2057,8 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) total_errors++; } if (total_errors > max_errors) { - printk("btrfs: %d errors while writing supers\n", total_errors); + printk(KERN_ERR "btrfs: %d errors while writing supers\n", + total_errors); BUG(); } return 0; @@ -2137,16 +2081,11 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) down_write(&root->anon_super.s_umount); kill_anon_super(&root->anon_super); } -#if 0 - if (root->in_sysfs) - btrfs_sysfs_del_root(root); -#endif if (root->node) free_extent_buffer(root->node); if (root->commit_root) free_extent_buffer(root->commit_root); - if (root->name) - kfree(root->name); + kfree(root->name); kfree(root); return 0; } @@ -2157,7 +2096,7 @@ static int del_fs_roots(struct btrfs_fs_info *fs_info) struct btrfs_root *gang[8]; int i; - while(1) { + while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); @@ -2228,18 +2167,17 @@ int close_ctree(struct btrfs_root *root) if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); - if (ret) { - printk("btrfs: commit super returns %d\n", ret); - } + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } if (fs_info->delalloc_bytes) { - printk("btrfs: at unmount delalloc count %Lu\n", + printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", fs_info->delalloc_bytes); } if (fs_info->total_ref_cache_size) { - printk("btrfs: at umount reference cache size %Lu\n", - fs_info->total_ref_cache_size); + printk(KERN_INFO "btrfs: at umount reference cache size %llu\n", + (unsigned long long)fs_info->total_ref_cache_size); } if (fs_info->extent_root->node) @@ -2248,13 +2186,13 @@ int close_ctree(struct btrfs_root *root) if (fs_info->tree_root->node) free_extent_buffer(fs_info->tree_root->node); - if (root->fs_info->chunk_root->node); + if (root->fs_info->chunk_root->node) free_extent_buffer(root->fs_info->chunk_root->node); - if (root->fs_info->dev_root->node); + if (root->fs_info->dev_root->node) free_extent_buffer(root->fs_info->dev_root->node); - if (root->fs_info->csum_root->node); + if (root->fs_info->csum_root->node) free_extent_buffer(root->fs_info->csum_root->node); btrfs_free_block_groups(root->fs_info); @@ -2273,7 +2211,7 @@ int close_ctree(struct btrfs_root *root) btrfs_stop_workers(&fs_info->submit_workers); #if 0 - while(!list_empty(&fs_info->hashers)) { + while (!list_empty(&fs_info->hashers)) { struct btrfs_hasher *hasher; hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher, hashers); @@ -2324,9 +2262,11 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) WARN_ON(!btrfs_tree_locked(buf)); if (transid != root->fs_info->generation) { - printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n", + printk(KERN_CRIT "btrfs transid mismatch buffer %llu, " + "found %llu running %llu\n", (unsigned long long)buf->start, - transid, root->fs_info->generation); + (unsigned long long)transid, + (unsigned long long)root->fs_info->generation); WARN_ON(1); } set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf); @@ -2361,9 +2301,8 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid) struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root; int ret; ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); - if (ret == 0) { + if (ret == 0) buf->flags |= EXTENT_UPTODATE; - } return ret; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 48b82cd7583..85315d2c90d 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -7,9 +7,11 @@ #include "export.h" #include "compat.h" -#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4) -#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4) -#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4) +#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, \ + parent_objectid) / 4) +#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, \ + parent_root_objectid) / 4) +#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4) static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, int connectable) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 780c1eeb829..ec43fa526d7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -49,10 +49,10 @@ struct pending_extent_op { int del; }; -static int finish_current_insert(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all); -static int del_pending_extents(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all); +static int finish_current_insert(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all); +static int del_pending_extents(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all); static int pin_down_bytes(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int is_data); @@ -247,7 +247,7 @@ static int cache_block_group(struct btrfs_root *root, if (ret < 0) goto err; - while(1) { + while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { @@ -292,9 +292,8 @@ err: /* * return the block group that starts at or after bytenr */ -static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct - btrfs_fs_info *info, - u64 bytenr) +static struct btrfs_block_group_cache * +btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr) { struct btrfs_block_group_cache *cache; @@ -306,9 +305,9 @@ static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct /* * return the block group that contains teh given bytenr */ -struct btrfs_block_group_cache *btrfs_lookup_block_group(struct - btrfs_fs_info *info, - u64 bytenr) +struct btrfs_block_group_cache *btrfs_lookup_block_group( + struct btrfs_fs_info *info, + u64 bytenr) { struct btrfs_block_group_cache *cache; @@ -492,7 +491,7 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) * to the key objectid. */ -static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans, +static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, @@ -537,7 +536,7 @@ out: * updates all the backrefs that are pending on update_list for the * extent_root */ -static int noinline update_backrefs(struct btrfs_trans_handle *trans, +static noinline int update_backrefs(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct list_head *update_list) @@ -573,9 +572,11 @@ loop: btrfs_ref_generation(leaf, ref) != op->orig_generation || (ref_objectid != op->level && ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) { - printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, " - "owner %u\n", op->bytenr, op->orig_parent, - ref_root, op->level); + printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, " + "root %llu, owner %u\n", + (unsigned long long)op->bytenr, + (unsigned long long)op->orig_parent, + (unsigned long long)ref_root, op->level); btrfs_print_leaf(extent_root, leaf); BUG(); } @@ -620,7 +621,7 @@ out: return 0; } -static int noinline insert_extents(struct btrfs_trans_handle *trans, +static noinline int insert_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct list_head *insert_list, int nr) @@ -781,7 +782,7 @@ static int noinline insert_extents(struct btrfs_trans_handle *trans, return ret; } -static int noinline insert_extent_backref(struct btrfs_trans_handle *trans, +static noinline int insert_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 bytenr, u64 parent, @@ -840,7 +841,7 @@ out: return ret; } -static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, +static noinline int remove_extent_backref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path) { @@ -868,7 +869,7 @@ static int noinline remove_extent_backref(struct btrfs_trans_handle *trans, static void btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len) { -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); #else blkdev_issue_discard(bdev, start >> 9, len >> 9); @@ -908,7 +909,7 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, #endif } -static int noinline free_extents(struct btrfs_trans_handle *trans, +static noinline int free_extents(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct list_head *del_list) { @@ -937,10 +938,11 @@ search: extent_root->root_key.objectid, op->orig_generation, op->level, 1); if (ret) { - printk("Unable to find backref byte nr %Lu root %Lu gen %Lu " - "owner %u\n", op->bytenr, - extent_root->root_key.objectid, op->orig_generation, - op->level); + printk(KERN_ERR "btrfs unable to find backref byte nr %llu " + "root %llu gen %llu owner %u\n", + (unsigned long long)op->bytenr, + (unsigned long long)extent_root->root_key.objectid, + (unsigned long long)op->orig_generation, op->level); btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); goto out; @@ -1282,7 +1284,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, btrfs_item_key_to_cpu(l, &key, path->slots[0]); if (key.objectid != bytenr) { btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]); - printk("wanted %Lu found %Lu\n", bytenr, key.objectid); + printk(KERN_ERR "btrfs wanted %llu found %llu\n", + (unsigned long long)bytenr, + (unsigned long long)key.objectid); BUG(); } BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY); @@ -1353,7 +1357,8 @@ int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans, goto out; if (ret != 0) { btrfs_print_leaf(root, path->nodes[0]); - printk("failed to find block number %Lu\n", bytenr); + printk(KERN_INFO "btrfs failed to find block number %llu\n", + (unsigned long long)bytenr); BUG(); } l = path->nodes[0]; @@ -1738,7 +1743,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, if (!path) return -ENOMEM; - while(1) { + while (1) { cache = NULL; spin_lock(&root->fs_info->block_group_cache_lock); for (n = rb_first(&root->fs_info->block_group_cache_tree); @@ -1921,10 +1926,8 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, spin_unlock(&space_info->lock); ret = btrfs_alloc_chunk(trans, extent_root, flags); - if (ret) { -printk("space info full %Lu\n", flags); + if (ret) space_info->full = 1; - } out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; @@ -1941,7 +1944,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, u64 old_val; u64 byte_in_group; - while(total) { + while (total) { cache = btrfs_lookup_block_group(info, bytenr); if (!cache) return -1; @@ -2089,7 +2092,7 @@ int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy) int ret; mutex_lock(&root->fs_info->pinned_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(pinned_extents, last, &start, &end, EXTENT_DIRTY); if (ret) @@ -2110,7 +2113,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, int ret; mutex_lock(&root->fs_info->pinned_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -2400,7 +2403,7 @@ static int __free_extent(struct btrfs_trans_handle *trans, if (ret == 0) { struct btrfs_key found_key; extent_slot = path->slots[0]; - while(extent_slot > 0) { + while (extent_slot > 0) { extent_slot--; btrfs_item_key_to_cpu(path->nodes[0], &found_key, extent_slot); @@ -2422,8 +2425,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, &key, path, -1, 1); if (ret) { printk(KERN_ERR "umm, got %d back from search" - ", was looking for %Lu\n", ret, - bytenr); + ", was looking for %llu\n", ret, + (unsigned long long)bytenr); btrfs_print_leaf(extent_root, path->nodes[0]); } BUG_ON(ret); @@ -2432,9 +2435,12 @@ static int __free_extent(struct btrfs_trans_handle *trans, } else { btrfs_print_leaf(extent_root, path->nodes[0]); WARN_ON(1); - printk("Unable to find ref byte nr %Lu root %Lu " - "gen %Lu owner %Lu\n", bytenr, - root_objectid, ref_generation, owner_objectid); + printk(KERN_ERR "btrfs unable to find ref byte nr %llu " + "root %llu gen %llu owner %llu\n", + (unsigned long long)bytenr, + (unsigned long long)root_objectid, + (unsigned long long)ref_generation, + (unsigned long long)owner_objectid); } leaf = path->nodes[0]; @@ -2517,8 +2523,8 @@ static int __free_extent(struct btrfs_trans_handle *trans, * find all the blocks marked as pending in the radix tree and remove * them from the extent map */ -static int del_pending_extents(struct btrfs_trans_handle *trans, struct - btrfs_root *extent_root, int all) +static int del_pending_extents(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, int all) { int ret; int err = 0; @@ -2539,7 +2545,7 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct again: mutex_lock(&info->extent_ins_mutex); - while(1) { + while (1) { ret = find_first_extent_bit(pending_del, search, &start, &end, EXTENT_WRITEBACK); if (ret) { @@ -2753,7 +2759,7 @@ static u64 stripe_align(struct btrfs_root *root, u64 val) * ins->offset == number of blocks * Any available blocks before search_start are skipped. */ -static int noinline find_free_extent(struct btrfs_trans_handle *trans, +static noinline int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *orig_root, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, @@ -2762,7 +2768,7 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans, int data) { int ret = 0; - struct btrfs_root * root = orig_root->fs_info->extent_root; + struct btrfs_root *root = orig_root->fs_info->extent_root; u64 total_needed = num_bytes; u64 *last_ptr = NULL; u64 last_wanted = 0; @@ -2995,8 +3001,10 @@ loop_check: *last_ptr = ins->objectid + ins->offset; ret = 0; } else if (!ret) { - printk(KERN_ERR "we were searching for %Lu bytes, num_bytes %Lu," - " loop %d, allowed_alloc %d\n", total_needed, num_bytes, + printk(KERN_ERR "btrfs searching for %llu bytes, " + "num_bytes %llu, loop %d, allowed_alloc %d\n", + (unsigned long long)total_needed, + (unsigned long long)num_bytes, loop, allowed_chunk_alloc); ret = -ENOSPC; } @@ -3012,19 +3020,22 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes) struct btrfs_block_group_cache *cache; struct list_head *l; - printk(KERN_INFO "space_info has %Lu free, is %sfull\n", - info->total_bytes - info->bytes_used - info->bytes_pinned - - info->bytes_reserved, (info->full) ? "" : "not "); + printk(KERN_INFO "space_info has %llu free, is %sfull\n", + (unsigned long long)(info->total_bytes - info->bytes_used - + info->bytes_pinned - info->bytes_reserved), + (info->full) ? "" : "not "); down_read(&info->groups_sem); list_for_each(l, &info->block_groups) { cache = list_entry(l, struct btrfs_block_group_cache, list); spin_lock(&cache->lock); - printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used " - "%Lu pinned %Lu reserved\n", - cache->key.objectid, cache->key.offset, - btrfs_block_group_used(&cache->item), - cache->pinned, cache->reserved); + printk(KERN_INFO "block group %llu has %llu bytes, %llu used " + "%llu pinned %llu reserved\n", + (unsigned long long)cache->key.objectid, + (unsigned long long)cache->key.offset, + (unsigned long long)btrfs_block_group_used(&cache->item), + (unsigned long long)cache->pinned, + (unsigned long long)cache->reserved); btrfs_dump_free_space(cache, bytes); spin_unlock(&cache->lock); } @@ -3045,15 +3056,15 @@ static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans, if (data) { alloc_profile = info->avail_data_alloc_bits & - info->data_alloc_profile; + info->data_alloc_profile; data = BTRFS_BLOCK_GROUP_DATA | alloc_profile; } else if (root == root->fs_info->chunk_root) { alloc_profile = info->avail_system_alloc_bits & - info->system_alloc_profile; + info->system_alloc_profile; data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile; } else { alloc_profile = info->avail_metadata_alloc_bits & - info->metadata_alloc_profile; + info->metadata_alloc_profile; data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile; } again: @@ -3092,8 +3103,9 @@ again: struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); - printk("allocation failed flags %Lu, wanted %Lu\n", - data, num_bytes); + printk(KERN_ERR "btrfs allocation failed flags %llu, " + "wanted %llu\n", (unsigned long long)data, + (unsigned long long)num_bytes); dump_space_info(sinfo, num_bytes); BUG(); } @@ -3108,7 +3120,8 @@ int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len) cache = btrfs_lookup_block_group(root->fs_info, start); if (!cache) { - printk(KERN_ERR "Unable to find block group for %Lu\n", start); + printk(KERN_ERR "Unable to find block group for %llu\n", + (unsigned long long)start); return -ENOSPC; } @@ -3235,10 +3248,12 @@ static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans, } update_block: - ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0); + ret = update_block_group(trans, root, ins->objectid, + ins->offset, 1, 0); if (ret) { - printk("update block group failed for %Lu %Lu\n", - ins->objectid, ins->offset); + printk(KERN_ERR "btrfs update block group failed for %llu " + "%llu\n", (unsigned long long)ins->objectid, + (unsigned long long)ins->offset); BUG(); } out: @@ -3420,7 +3435,7 @@ int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, return 0; } -static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, +static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_leaf_ref *ref) { @@ -3445,15 +3460,15 @@ static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans, return 0; } -static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len, - u32 *refs) +static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, + u64 len, u32 *refs) { int ret; ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs); BUG_ON(ret); -#if 0 // some debugging code in case we see problems here +#if 0 /* some debugging code in case we see problems here */ /* if the refs count is one, it won't get increased again. But * if the ref count is > 1, someone may be decreasing it at * the same time we are. @@ -3474,8 +3489,8 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len free_extent_buffer(eb); } if (*refs == 1) { - printk("block %llu went down to one during drop_snap\n", - (unsigned long long)start); + printk(KERN_ERR "btrfs block %llu went down to one " + "during drop_snap\n", (unsigned long long)start); } } @@ -3489,7 +3504,7 @@ static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len * helper function for drop_snapshot, this walks down the tree dropping ref * counts as it goes. */ -static int noinline walk_down_tree(struct btrfs_trans_handle *trans, +static noinline int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level) { @@ -3516,7 +3531,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, /* * walk down to the last node level and free all the leaves */ - while(*level >= 0) { + while (*level >= 0) { WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; @@ -3576,10 +3591,6 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans, *level = 0; break; } - if (printk_ratelimit()) { - printk("leaf ref miss for bytenr %llu\n", - (unsigned long long)bytenr); - } } next = btrfs_find_tree_block(root, bytenr, blocksize); if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) { @@ -3641,7 +3652,7 @@ out: * walk_down_tree. The main difference is that it checks reference * counts while tree blocks are locked. */ -static int noinline walk_down_subtree(struct btrfs_trans_handle *trans, +static noinline int walk_down_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level) { @@ -3730,7 +3741,7 @@ out: * to find the first node higher up where we haven't yet gone through * all the slots */ -static int noinline walk_up_tree(struct btrfs_trans_handle *trans, +static noinline int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, int max_level) @@ -3839,7 +3850,7 @@ int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root } } } - while(1) { + while (1) { wret = walk_down_tree(trans, root, path, &level); if (wret > 0) break; @@ -3920,7 +3931,7 @@ static unsigned long calc_ra(unsigned long start, unsigned long last, return min(last, start + nr - 1); } -static int noinline relocate_inode_pages(struct inode *inode, u64 start, +static noinline int relocate_inode_pages(struct inode *inode, u64 start, u64 len) { u64 page_start; @@ -4011,7 +4022,7 @@ out_unlock: return ret; } -static int noinline relocate_data_extent(struct inode *reloc_inode, +static noinline int relocate_data_extent(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset) { @@ -4087,7 +4098,7 @@ static int is_cowonly_root(u64 root_objectid) return 0; } -static int noinline __next_ref_path(struct btrfs_trans_handle *trans, +static noinline int __next_ref_path(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_ref_path *ref_path, int first_time) @@ -4119,11 +4130,10 @@ walk_down: if (level < ref_path->lowest_level) break; - if (level >= 0) { + if (level >= 0) bytenr = ref_path->nodes[level]; - } else { + else bytenr = ref_path->extent_start; - } BUG_ON(bytenr == 0); parent = ref_path->nodes[level + 1]; @@ -4170,11 +4180,12 @@ walk_up: level = ref_path->current_level; while (level < BTRFS_MAX_LEVEL - 1) { u64 ref_objectid; - if (level >= 0) { + + if (level >= 0) bytenr = ref_path->nodes[level]; - } else { + else bytenr = ref_path->extent_start; - } + BUG_ON(bytenr == 0); key.objectid = bytenr; @@ -4299,7 +4310,7 @@ static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, return __next_ref_path(trans, extent_root, ref_path, 0); } -static int noinline get_new_locations(struct inode *reloc_inode, +static noinline int get_new_locations(struct inode *reloc_inode, struct btrfs_key *extent_key, u64 offset, int no_fragment, struct disk_extent **extents, @@ -4420,7 +4431,7 @@ out: return ret; } -static int noinline replace_one_extent(struct btrfs_trans_handle *trans, +static noinline int replace_one_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *extent_key, @@ -4778,7 +4789,7 @@ int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, return 0; } -static int noinline invalidate_extent_cache(struct btrfs_root *root, +static noinline int invalidate_extent_cache(struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, struct btrfs_root *target_root) @@ -4826,7 +4837,7 @@ static int noinline invalidate_extent_cache(struct btrfs_root *root, return 0; } -static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans, +static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *leaf, struct btrfs_block_group_cache *group, @@ -5035,7 +5046,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) return 0; } -static int noinline init_reloc_tree(struct btrfs_trans_handle *trans, +static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root *reloc_root; @@ -5102,7 +5113,7 @@ static int noinline init_reloc_tree(struct btrfs_trans_handle *trans, * tree blocks are shared between reloc trees, so they are also shared * between subvols. */ -static int noinline relocate_one_path(struct btrfs_trans_handle *trans, +static noinline int relocate_one_path(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, @@ -5199,7 +5210,7 @@ static int noinline relocate_one_path(struct btrfs_trans_handle *trans, return 0; } -static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, +static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *first_key, @@ -5217,7 +5228,7 @@ static int noinline relocate_tree_block(struct btrfs_trans_handle *trans, return 0; } -static int noinline del_extent_zero(struct btrfs_trans_handle *trans, +static noinline int del_extent_zero(struct btrfs_trans_handle *trans, struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key) @@ -5233,7 +5244,7 @@ out: return ret; } -static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info, +static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, struct btrfs_ref_path *ref_path) { struct btrfs_key root_key; @@ -5248,7 +5259,7 @@ static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info, return btrfs_read_fs_root_no_name(fs_info, &root_key); } -static int noinline relocate_one_extent(struct btrfs_root *extent_root, +static noinline int relocate_one_extent(struct btrfs_root *extent_root, struct btrfs_path *path, struct btrfs_key *extent_key, struct btrfs_block_group_cache *group, @@ -5276,8 +5287,8 @@ static int noinline relocate_one_extent(struct btrfs_root *extent_root, ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); if (!ref_path) { - ret = -ENOMEM; - goto out; + ret = -ENOMEM; + goto out; } for (loops = 0; ; loops++) { @@ -5497,7 +5508,7 @@ out: return ret; } -static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info, +static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, struct btrfs_block_group_cache *group) { struct inode *inode = NULL; @@ -5617,7 +5628,7 @@ int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start) block_group = btrfs_lookup_block_group(info, group_start); BUG_ON(!block_group); - printk("btrfs relocating block group %llu flags %llu\n", + printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n", (unsigned long long)block_group->key.objectid, (unsigned long long)block_group->flags); @@ -5649,7 +5660,7 @@ again: btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1); mutex_unlock(&root->fs_info->cleaner_mutex); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; @@ -5712,7 +5723,7 @@ next: } if (total_found > 0) { - printk("btrfs found %llu extents in pass %d\n", + printk(KERN_INFO "btrfs found %llu extents in pass %d\n", (unsigned long long)total_found, pass); pass++; if (total_found == skipped && pass > 2) { @@ -5754,7 +5765,7 @@ static int find_first_block_group(struct btrfs_root *root, if (ret < 0) goto out; - while(1) { + while (1) { slot = path->slots[0]; leaf = path->nodes[0]; if (slot >= btrfs_header_nritems(leaf)) { @@ -5825,7 +5836,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) if (!path) return -ENOMEM; - while(1) { + while (1) { ret = find_first_block_group(root, path, &key); if (ret > 0) { ret = 0; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0bf7684207a..39edb551dca 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -32,7 +32,7 @@ static LIST_HEAD(states); #define LEAK_DEBUG 0 #ifdef LEAK_DEBUG -static spinlock_t leak_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(leak_lock); #endif #define BUFFER_LRU_MAX 64 @@ -81,7 +81,11 @@ void extent_io_exit(void) while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); - printk("state leak: start %Lu end %Lu state %lu in tree %p refs %d\n", state->start, state->end, state->state, state->tree, atomic_read(&state->refs)); + printk(KERN_ERR "btrfs state leak: start %llu end %llu " + "state %lu in tree %p refs %d\n", + (unsigned long long)state->start, + (unsigned long long)state->end, + state->state, state->tree, atomic_read(&state->refs)); list_del(&state->leak_list); kmem_cache_free(extent_state_cache, state); @@ -89,7 +93,9 @@ void extent_io_exit(void) while (!list_empty(&buffers)) { eb = list_entry(buffers.next, struct extent_buffer, leak_list); - printk("buffer leak start %Lu len %lu refs %d\n", eb->start, eb->len, atomic_read(&eb->refs)); + printk(KERN_ERR "btrfs buffer leak start %llu len %lu " + "refs %d\n", (unsigned long long)eb->start, + eb->len, atomic_read(&eb->refs)); list_del(&eb->leak_list); kmem_cache_free(extent_buffer_cache, eb); } @@ -158,11 +164,11 @@ EXPORT_SYMBOL(free_extent_state); static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct tree_entry *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct tree_entry, rb_node); @@ -185,13 +191,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, struct rb_node **next_ret) { struct rb_root *root = &tree->state; - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct tree_entry *entry; struct tree_entry *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct tree_entry, rb_node); prev = n; prev_entry = entry; @@ -200,14 +206,13 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, n = n->rb_left; else if (offset > entry->end) n = n->rb_right; - else { + else return n; - } } if (prev_ret) { orig_prev = prev; - while(prev && offset > prev_entry->end) { + while (prev && offset > prev_entry->end) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct tree_entry, rb_node); } @@ -217,7 +222,7 @@ static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset, if (next_ret) { prev_entry = rb_entry(prev, struct tree_entry, rb_node); - while(prev && offset < prev_entry->start) { + while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct tree_entry, rb_node); } @@ -233,9 +238,8 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree, struct rb_node *ret; ret = __etree_search(tree, offset, &prev, NULL); - if (!ret) { + if (!ret) return prev; - } return ret; } @@ -243,11 +247,11 @@ static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree, u64 offset, struct rb_node *node) { struct rb_root *root = &tree->buffer; - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct extent_buffer *eb; - while(*p) { + while (*p) { parent = *p; eb = rb_entry(parent, struct extent_buffer, rb_node); @@ -268,10 +272,10 @@ static struct extent_buffer *buffer_search(struct extent_io_tree *tree, u64 offset) { struct rb_root *root = &tree->buffer; - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct extent_buffer *eb; - while(n) { + while (n) { eb = rb_entry(n, struct extent_buffer, rb_node); if (offset < eb->start) n = n->rb_left; @@ -363,7 +367,9 @@ static int insert_state(struct extent_io_tree *tree, struct rb_node *node; if (end < start) { - printk("end < start %Lu %Lu\n", end, start); + printk(KERN_ERR "btrfs end < start %llu %llu\n", + (unsigned long long)end, + (unsigned long long)start); WARN_ON(1); } if (bits & EXTENT_DIRTY) @@ -376,7 +382,10 @@ static int insert_state(struct extent_io_tree *tree, if (node) { struct extent_state *found; found = rb_entry(node, struct extent_state, rb_node); - printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end); + printk(KERN_ERR "btrfs found node %llu %llu on insert of " + "%llu %llu\n", (unsigned long long)found->start, + (unsigned long long)found->end, + (unsigned long long)start, (unsigned long long)end); free_extent_state(state); return -EEXIST; } @@ -412,7 +421,6 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, if (node) { struct extent_state *found; found = rb_entry(node, struct extent_state, rb_node); - printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end); free_extent_state(prealloc); return -EEXIST; } @@ -661,8 +669,9 @@ static void set_state_bits(struct extent_io_tree *tree, * [start, end] is inclusive * This takes the tree lock. */ -static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, - int exclusive, u64 *failed_start, gfp_t mask) +static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, + int bits, int exclusive, u64 *failed_start, + gfp_t mask) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -763,7 +772,7 @@ again: if (end < last_start) this_end = end; else - this_end = last_start -1; + this_end = last_start - 1; err = insert_state(tree, prealloc, start, this_end, bits); prealloc = NULL; @@ -891,8 +900,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, } EXPORT_SYMBOL(set_extent_uptodate); -static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask); } @@ -904,8 +913,8 @@ static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, 0, NULL, mask); } -static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +static int clear_extent_writeback(struct extent_io_tree *tree, u64 start, + u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask); } @@ -1025,11 +1034,10 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, * our range starts. */ node = tree_search(tree, start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (state->end >= start && (state->state & bits)) { *start_ret = state->start; @@ -1062,15 +1070,14 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); - if (state->end >= start && (state->state & bits)) { + if (state->end >= start && (state->state & bits)) return state; - } + node = rb_next(node); if (!node) break; @@ -1108,7 +1115,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree, goto out; } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (found && (state->start != cur_start || (state->state & EXTENT_BOUNDARY))) { @@ -1150,7 +1157,7 @@ static noinline int __unlock_for_delalloc(struct inode *inode, if (index == locked_page->index && end_index == index) return 0; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -1186,7 +1193,7 @@ static noinline int lock_delalloc_pages(struct inode *inode, /* skip the page at the start index */ nrpages = end_index - index + 1; - while(nrpages > 0) { + while (nrpages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nrpages, ARRAY_SIZE(pages)), pages); @@ -1263,17 +1270,16 @@ again: * pages in order, so we can't process delalloc bytes before * locked_page */ - if (delalloc_start < *start) { + if (delalloc_start < *start) delalloc_start = *start; - } /* * make sure to limit the number of pages we try to lock down * if we're looping. */ - if (delalloc_end + 1 - delalloc_start > max_bytes && loops) { + if (delalloc_end + 1 - delalloc_start > max_bytes && loops) delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1; - } + /* step two, lock all the pages after the page that has start */ ret = lock_delalloc_pages(inode, locked_page, delalloc_start, delalloc_end); @@ -1341,7 +1347,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) return 0; - while(nr_pages > 0) { + while (nr_pages > 0) { ret = find_get_pages_contig(inode->i_mapping, index, min_t(unsigned long, nr_pages, ARRAY_SIZE(pages)), pages); @@ -1384,7 +1390,6 @@ u64 count_range_bits(struct extent_io_tree *tree, int found = 0; if (search_end <= cur_start) { - printk("search_end %Lu start %Lu\n", search_end, cur_start); WARN_ON(1); return 0; } @@ -1399,11 +1404,10 @@ u64 count_range_bits(struct extent_io_tree *tree, * our range starts. */ node = tree_search(tree, cur_start); - if (!node) { + if (!node) goto out; - } - while(1) { + while (1) { state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; @@ -1927,19 +1931,15 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); - if (!bio) { - printk("failed to allocate bio nr %d\n", nr); - } bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; bio->bi_private = tree; - if (bio_ret) { + if (bio_ret) *bio_ret = bio; - } else { + else ret = submit_one_bio(rw, bio, mirror_num, bio_flags); - } return ret; } @@ -2028,13 +2028,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, break; } extent_offset = cur - em->start; - if (extent_map_end(em) <= cur) { -printk("bad mapping em [%Lu %Lu] cur %Lu\n", em->start, extent_map_end(em), cur); - } BUG_ON(extent_map_end(em) <= cur); - if (end < cur) { -printk("2bad mapping end %Lu cur %Lu\n", end, cur); - } BUG_ON(end < cur); if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) @@ -2199,7 +2193,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, delalloc_end = 0; page_started = 0; if (!epd->extent_locked) { - while(delalloc_end < page_end) { + while (delalloc_end < page_end) { nr_delalloc = find_lock_delalloc_range(inode, tree, page, &delalloc_start, @@ -2242,9 +2236,8 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, nr_written++; end = page_end; - if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) { - printk("found delalloc bits after lock_extent\n"); - } + if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) + printk(KERN_ERR "btrfs delalloc bits after lock_extent\n"); if (last_byte <= start) { clear_extent_dirty(tree, start, page_end, GFP_NOFS); @@ -2297,7 +2290,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); - unlock_extent(tree, unlock_start, cur + iosize -1, + unlock_extent(tree, unlock_start, cur + iosize - 1, GFP_NOFS); /* @@ -2344,9 +2337,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { - printk("warning page %lu not writeback, " - "cur %llu end %llu\n", page->index, - (unsigned long long)cur, + printk(KERN_ERR "btrfs warning page %lu not " + "writeback, cur %llu end %llu\n", + page->index, (unsigned long long)cur, (unsigned long long)end); } @@ -2430,8 +2423,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, retry: while (!done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, - PAGECACHE_TAG_DIRTY, - min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { + PAGECACHE_TAG_DIRTY, min(end - index, + (pgoff_t)PAGEVEC_SIZE-1) + 1))) { unsigned i; scanned = 1; @@ -2536,9 +2529,8 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, extent_write_cache_pages(tree, mapping, &wbc_writepages, __extent_writepage, &epd, flush_write_bio); - if (epd.bio) { + if (epd.bio) submit_one_bio(WRITE, epd.bio, 0, 0); - } return ret; } EXPORT_SYMBOL(extent_write_full_page); @@ -2568,7 +2560,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, .range_end = end + 1, }; - while(start <= end) { + while (start <= end) { page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); if (clear_page_dirty_for_io(page)) ret = __extent_writepage(page, &wbc_writepages, &epd); @@ -2606,9 +2598,8 @@ int extent_writepages(struct extent_io_tree *tree, ret = extent_write_cache_pages(tree, mapping, wbc, __extent_writepage, &epd, flush_write_bio); - if (epd.bio) { + if (epd.bio) submit_one_bio(WRITE, epd.bio, 0, 0); - } return ret; } EXPORT_SYMBOL(extent_writepages); @@ -2666,7 +2657,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, u64 end = start + PAGE_CACHE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; - start += (offset + blocksize -1) & ~(blocksize - 1); + start += (offset + blocksize - 1) & ~(blocksize - 1); if (start > end) return 0; @@ -2727,12 +2718,12 @@ int extent_prepare_write(struct extent_io_tree *tree, orig_block_start = block_start; lock_extent(tree, page_start, page_end, GFP_NOFS); - while(block_start <= block_end) { + while (block_start <= block_end) { em = get_extent(inode, page, page_offset, block_start, block_end - block_start + 1, 1); - if (IS_ERR(em) || !em) { + if (IS_ERR(em) || !em) goto err; - } + cur_end = min(block_end, extent_map_end(em) - 1); block_off_start = block_start & (PAGE_CACHE_SIZE - 1); block_off_end = block_off_start + blocksize; @@ -3170,7 +3161,7 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); set_extent_dirty(tree, page_offset(page), - page_offset(page) + PAGE_CACHE_SIZE -1, + page_offset(page) + PAGE_CACHE_SIZE - 1, GFP_NOFS); unlock_page(page); } @@ -3235,7 +3226,7 @@ int extent_range_uptodate(struct extent_io_tree *tree, ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1); if (ret) return 1; - while(start <= end) { + while (start <= end) { index = start >> PAGE_CACHE_SHIFT; page = find_get_page(tree->mapping, index); uptodate = PageUptodate(page); @@ -3321,16 +3312,12 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, lock_page(page); } locked_pages++; - if (!PageUptodate(page)) { + if (!PageUptodate(page)) all_uptodate = 0; - } } if (all_uptodate) { if (start_i == 0) eb->flags |= EXTENT_UPTODATE; - if (ret) { - printk("all up to date but ret is %d\n", ret); - } goto unlock_exit; } @@ -3345,10 +3332,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, err = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num, &bio_flags); - if (err) { + if (err) ret = err; - printk("err %d from __extent_read_full_page\n", ret); - } } else { unlock_page(page); } @@ -3357,26 +3342,23 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (bio) submit_one_bio(READ, bio, mirror_num, bio_flags); - if (ret || !wait) { - if (ret) - printk("ret %d wait %d returning\n", ret, wait); + if (ret || !wait) return ret; - } + for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); - if (!PageUptodate(page)) { - printk("page not uptodate after wait_on_page_locked\n"); + if (!PageUptodate(page)) ret = -EIO; - } } + if (!ret) eb->flags |= EXTENT_UPTODATE; return ret; unlock_exit: i = start_i; - while(locked_pages > 0) { + while (locked_pages > 0) { page = extent_buffer_page(eb, i); i++; unlock_page(page); @@ -3403,7 +3385,7 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); cur = min(len, (PAGE_CACHE_SIZE - offset)); @@ -3442,8 +3424,11 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, offset = 0; *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; } + if (start + min_len > eb->len) { -printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len); + printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, " + "wanted %lu %lu\n", (unsigned long long)eb->start, + eb->len, start, min_len); WARN_ON(1); } @@ -3506,7 +3491,7 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); cur = min(len, (PAGE_CACHE_SIZE - offset)); @@ -3542,7 +3527,7 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); WARN_ON(!PageUptodate(page)); @@ -3574,7 +3559,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(eb, i); WARN_ON(!PageUptodate(page)); @@ -3607,7 +3592,7 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, offset = (start_offset + dst_offset) & ((unsigned long)PAGE_CACHE_SIZE - 1); - while(len > 0) { + while (len > 0) { page = extent_buffer_page(dst, i); WARN_ON(!PageUptodate(page)); @@ -3674,17 +3659,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_i; if (src_offset + len > dst->len) { - printk("memmove bogus src_offset %lu move len %lu len %lu\n", - src_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " + "len %lu dst len %lu\n", src_offset, len, dst->len); BUG_ON(1); } if (dst_offset + len > dst->len) { - printk("memmove bogus dst_offset %lu move len %lu len %lu\n", - dst_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " + "len %lu dst len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } - while(len > 0) { + while (len > 0) { dst_off_in_page = (start_offset + dst_offset) & ((unsigned long)PAGE_CACHE_SIZE - 1); src_off_in_page = (start_offset + src_offset) & @@ -3722,20 +3707,20 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_i; if (src_offset + len > dst->len) { - printk("memmove bogus src_offset %lu move len %lu len %lu\n", - src_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus src_offset %lu move " + "len %lu len %lu\n", src_offset, len, dst->len); BUG_ON(1); } if (dst_offset + len > dst->len) { - printk("memmove bogus dst_offset %lu move len %lu len %lu\n", - dst_offset, len, dst->len); + printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move " + "len %lu len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } if (dst_offset < src_offset) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } - while(len > 0) { + while (len > 0) { dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index fd3ebfb8c3c..4a83e33ada3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -89,11 +89,11 @@ EXPORT_SYMBOL(free_extent_map); static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct extent_map *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); @@ -122,13 +122,13 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_ret, struct rb_node **next_ret) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; @@ -145,7 +145,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, if (prev_ret) { orig_prev = prev; - while(prev && offset >= extent_map_end(prev_entry)) { + while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } @@ -155,7 +155,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, if (next_ret) { prev_entry = rb_entry(prev, struct extent_map, rb_node); - while(prev && offset < prev_entry->start) { + while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index cc6e0b6de94..b11abfad81a 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -24,7 +24,7 @@ #include "transaction.h" #include "print-tree.h" -#define MAX_CSUM_ITEMS(r,size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ +#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ sizeof(struct btrfs_item) * 2) / \ size) - 1)) int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, @@ -166,7 +166,7 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, WARN_ON(bio->bi_vcnt <= 0); disk_bytenr = (u64)bio->bi_sector << 9; - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { offset = page_offset(bvec->bv_page) + bvec->bv_offset; ret = btrfs_find_ordered_sum(inode, offset, disk_bytenr, &sum); if (ret == 0) @@ -192,8 +192,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, offset + bvec->bv_len - 1, EXTENT_NODATASUM, GFP_NOFS); } else { - printk("no csum found for inode %lu " - "start %llu\n", inode->i_ino, + printk(KERN_INFO "btrfs no csum found " + "for inode %lu start %llu\n", + inode->i_ino, (unsigned long long)offset); } item = NULL; @@ -373,7 +374,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, BUG_ON(!ordered); sums->bytenr = ordered->start; - while(bio_index < bio->bi_vcnt) { + while (bio_index < bio->bi_vcnt) { if (!contig) offset = page_offset(bvec->bv_page) + bvec->bv_offset; @@ -507,7 +508,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); - while(1) { + while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key.offset = end_byte - 1; key.type = BTRFS_EXTENT_CSUM_KEY; @@ -715,9 +716,8 @@ again: goto csum; diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); - if (diff != csum_size) { + if (diff != csum_size) goto insert; - } ret = btrfs_extend_item(trans, root, path, diff); BUG_ON(ret); @@ -732,7 +732,7 @@ insert: u64 next_sector = sector_sum->bytenr; struct btrfs_sector_sum *next = sector_sum + 1; - while(tmp < sums->len) { + while (tmp < sums->len) { if (next_sector + root->sectorsize != next->bytenr) break; tmp += root->sectorsize; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5908521922f..0e3a13a4565 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -44,10 +44,10 @@ /* simple helper to fault in pages and copy. This should go away * and be replaced with calls into generic code. */ -static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, +static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, - const char __user * buf) + const char __user *buf) { long page_fault = 0; int i; @@ -78,7 +78,7 @@ static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, /* * unlocks pages after btrfs_file_write is done with them */ -static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) +static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -103,7 +103,7 @@ static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, +static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct file *file, struct page **pages, @@ -137,9 +137,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, btrfs_set_trans_block_group(trans, inode); hint_byte = 0; - if ((end_of_last_block & 4095) == 0) { - printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block); - } set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); /* check for reserved extents on each page, we don't want @@ -185,7 +182,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, len = (u64)-1; testend = 0; } - while(1) { + while (1) { if (!split) split = alloc_extent_map(GFP_NOFS); if (!split2) @@ -295,7 +292,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) path = btrfs_alloc_path(); ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino, last_offset, 0); - while(1) { + while (1) { nritems = btrfs_header_nritems(path->nodes[0]); if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(root, path); @@ -314,8 +311,10 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) if (found_key.offset < last_offset) { WARN_ON(1); btrfs_print_leaf(root, leaf); - printk("inode %lu found offset %Lu expected %Lu\n", - inode->i_ino, found_key.offset, last_offset); + printk(KERN_ERR "inode %lu found offset %llu " + "expected %llu\n", inode->i_ino, + (unsigned long long)found_key.offset, + (unsigned long long)last_offset); err = 1; goto out; } @@ -331,7 +330,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) extent_end = found_key.offset + btrfs_file_extent_inline_len(leaf, extent); extent_end = (extent_end + root->sectorsize - 1) & - ~((u64)root->sectorsize -1 ); + ~((u64)root->sectorsize - 1); } last_offset = extent_end; path->slots[0]++; @@ -339,8 +338,9 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) if (0 && last_offset < inode->i_size) { WARN_ON(1); btrfs_print_leaf(root, leaf); - printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino, - last_offset, inode->i_size); + printk(KERN_ERR "inode %lu found offset %llu size %llu\n", + inode->i_ino, (unsigned long long)last_offset, + (unsigned long long)inode->i_size); err = 1; } @@ -362,7 +362,7 @@ out: * inline_limit is used to tell this code which offsets in the file to keep * if they contain inline extents. */ -int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, +noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { @@ -398,7 +398,7 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) return -ENOMEM; - while(1) { + while (1) { recow = 0; btrfs_release_path(root, path); ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, @@ -649,16 +649,15 @@ next_slot: if (disk_bytenr != 0) { ret = btrfs_update_extent_ref(trans, root, disk_bytenr, orig_parent, - leaf->start, + leaf->start, root->root_key.objectid, trans->transid, ins.objectid); BUG_ON(ret); } btrfs_release_path(root, path); - if (disk_bytenr != 0) { + if (disk_bytenr != 0) inode_add_bytes(inode, extent_end - end); - } } if (found_extent && !keep) { @@ -944,7 +943,7 @@ done: * waits for data=ordered extents to finish before allowing the pages to be * modified. */ -static int noinline prepare_pages(struct btrfs_root *root, struct file *file, +static noinline int prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) @@ -979,7 +978,8 @@ again: struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, start_pos, last_pos - 1, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1); + ordered = btrfs_lookup_first_ordered_extent(inode, + last_pos - 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset < last_pos) { @@ -1085,7 +1085,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, } } - while(count > 0) { + while (count > 0) { size_t offset = pos & (PAGE_CACHE_SIZE - 1); size_t write_bytes = min(count, nrptrs * (size_t)PAGE_CACHE_SIZE - @@ -1178,7 +1178,7 @@ out_nolock: return num_written ? num_written : err; } -int btrfs_release_file(struct inode * inode, struct file * filp) +int btrfs_release_file(struct inode *inode, struct file *filp) { if (filp->private_data) btrfs_ioctl_trans_end(filp); @@ -1237,9 +1237,8 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync) } ret = btrfs_log_dentry_safe(trans, root, file->f_dentry); - if (ret < 0) { + if (ret < 0) goto out; - } /* we've logged all the items and now have a consistent * version of the file in the log. It is possible that diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2e69b9c3043..d1e5f0e84c5 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -213,10 +213,13 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = offset; info->bytes += bytes; } else if (right_info && right_info->offset != offset+bytes) { - printk(KERN_ERR "adding space in the middle of an existing " - "free space area. existing: offset=%Lu, bytes=%Lu. " - "new: offset=%Lu, bytes=%Lu\n", right_info->offset, - right_info->bytes, offset, bytes); + printk(KERN_ERR "btrfs adding space in the middle of an " + "existing free space area. existing: " + "offset=%llu, bytes=%llu. new: offset=%llu, " + "bytes=%llu\n", (unsigned long long)right_info->offset, + (unsigned long long)right_info->bytes, + (unsigned long long)offset, + (unsigned long long)bytes); BUG(); } @@ -225,11 +228,14 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, if (unlikely((left_info->offset + left_info->bytes) != offset)) { - printk(KERN_ERR "free space to the left of new free " - "space isn't quite right. existing: offset=%Lu," - " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n", - left_info->offset, left_info->bytes, offset, - bytes); + printk(KERN_ERR "btrfs free space to the left " + "of new free space isn't " + "quite right. existing: offset=%llu, " + "bytes=%llu. new: offset=%llu, bytes=%llu\n", + (unsigned long long)left_info->offset, + (unsigned long long)left_info->bytes, + (unsigned long long)offset, + (unsigned long long)bytes); BUG(); } @@ -265,8 +271,7 @@ out: BUG(); } - if (alloc_info) - kfree(alloc_info); + kfree(alloc_info); return ret; } @@ -283,9 +288,11 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, if (info && info->offset == offset) { if (info->bytes < bytes) { - printk(KERN_ERR "Found free space at %Lu, size %Lu," - "trying to use %Lu\n", - info->offset, info->bytes, bytes); + printk(KERN_ERR "Found free space at %llu, size %llu," + "trying to use %llu\n", + (unsigned long long)info->offset, + (unsigned long long)info->bytes, + (unsigned long long)bytes); WARN_ON(1); ret = -EINVAL; goto out; @@ -401,8 +408,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, info = rb_entry(n, struct btrfs_free_space, offset_index); if (info->bytes >= bytes) count++; - //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset, - // info->bytes); } printk(KERN_INFO "%d blocks of free space at or bigger than bytes is" "\n", count); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 80038c5ef7c..2aa79873eb4 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -129,7 +129,6 @@ int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, last_ino = key.objectid + 1; path->slots[0]++; } - // FIXME -ENOSPC BUG_ON(1); found: btrfs_release_path(root, path); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 068bad46338..1b35ea63b6c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -124,7 +124,7 @@ int btrfs_check_free_space(struct btrfs_root *root, u64 num_required, * the btree. The caller should have done a btrfs_drop_extents so that * no overlapping inline items exist in the btree */ -static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, +static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, size_t size, size_t compressed_size, struct page **compressed_pages) @@ -148,7 +148,8 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, cur_size = compressed_size; } - path = btrfs_alloc_path(); if (!path) + path = btrfs_alloc_path(); + if (!path) return -ENOMEM; btrfs_set_trans_block_group(trans, inode); @@ -165,7 +166,6 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); if (ret) { err = ret; - printk("got bad ret %d\n", ret); goto fail; } leaf = path->nodes[0]; @@ -181,7 +181,7 @@ static int noinline insert_inline_extent(struct btrfs_trans_handle *trans, if (use_compress) { struct page *cpage; int i = 0; - while(compressed_size > 0) { + while (compressed_size > 0) { cpage = compressed_pages[i]; cur_size = min_t(unsigned long, compressed_size, PAGE_CACHE_SIZE); @@ -519,8 +519,7 @@ free_pages_out: WARN_ON(pages[i]->mapping); page_cache_release(pages[i]); } - if (pages) - kfree(pages); + kfree(pages); goto out; } @@ -549,7 +548,7 @@ static noinline int submit_compressed_extents(struct inode *inode, trans = btrfs_join_transaction(root, 1); - while(!list_empty(&async_cow->extents)) { + while (!list_empty(&async_cow->extents)) { async_extent = list_entry(async_cow->extents.next, struct async_extent, list); list_del(&async_extent->list); @@ -562,8 +561,8 @@ static noinline int submit_compressed_extents(struct inode *inode, unsigned long nr_written = 0; lock_extent(io_tree, async_extent->start, - async_extent->start + async_extent->ram_size - 1, - GFP_NOFS); + async_extent->start + + async_extent->ram_size - 1, GFP_NOFS); /* allocate blocks */ cow_file_range(inode, async_cow->locked_page, @@ -581,7 +580,7 @@ static noinline int submit_compressed_extents(struct inode *inode, if (!page_started) extent_write_locked_range(io_tree, inode, async_extent->start, - async_extent->start + + async_extent->start + async_extent->ram_size - 1, btrfs_get_extent, WB_SYNC_ALL); @@ -618,7 +617,7 @@ static noinline int submit_compressed_extents(struct inode *inode, set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); - while(1) { + while (1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); spin_unlock(&em_tree->lock); @@ -651,11 +650,11 @@ static noinline int submit_compressed_extents(struct inode *inode, NULL, 1, 1, 0, 1, 1, 0); ret = btrfs_submit_compressed_write(inode, - async_extent->start, - async_extent->ram_size, - ins.objectid, - ins.offset, async_extent->pages, - async_extent->nr_pages); + async_extent->start, + async_extent->ram_size, + ins.objectid, + ins.offset, async_extent->pages, + async_extent->nr_pages); BUG_ON(ret); trans = btrfs_join_transaction(root, 1); @@ -735,14 +734,13 @@ static noinline int cow_file_range(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); - while(disk_num_bytes > 0) { + while (disk_num_bytes > 0) { cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent); ret = btrfs_reserve_extent(trans, root, cur_alloc_size, root->sectorsize, 0, alloc_hint, (u64)-1, &ins, 1); - if (ret) { - BUG(); - } + BUG_ON(ret); + em = alloc_extent_map(GFP_NOFS); em->start = start; em->orig_start = em->start; @@ -755,7 +753,7 @@ static noinline int cow_file_range(struct inode *inode, em->bdev = root->fs_info->fs_devices->latest_bdev; set_bit(EXTENT_FLAG_PINNED, &em->flags); - while(1) { + while (1) { spin_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); spin_unlock(&em_tree->lock); @@ -779,11 +777,9 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(ret); } - if (disk_num_bytes < cur_alloc_size) { - printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes, - cur_alloc_size); + if (disk_num_bytes < cur_alloc_size) break; - } + /* we're not doing compressed IO, don't unlock the first * page (which the caller expects to stay locked), don't * clear any dirty bits and don't set any writeback bits @@ -842,9 +838,8 @@ static noinline void async_cow_submit(struct btrfs_work *work) waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); - if (async_cow->inode) { + if (async_cow->inode) submit_compressed_extents(async_cow->inode, async_cow); - } } static noinline void async_cow_free(struct btrfs_work *work) @@ -871,7 +866,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC, 1, 0, GFP_NOFS); - while(start < end) { + while (start < end) { async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); async_cow->inode = inode; async_cow->root = root; @@ -904,7 +899,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, limit)); } - while(atomic_read(&root->fs_info->async_submit_draining) && + while (atomic_read(&root->fs_info->async_submit_draining) && atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, (atomic_read(&root->fs_info->async_delalloc_pages) == @@ -918,7 +913,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, return 0; } -static int noinline csum_exist_in_range(struct btrfs_root *root, +static noinline int csum_exist_in_range(struct btrfs_root *root, u64 bytenr, u64 num_bytes) { int ret; @@ -1146,13 +1141,13 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, if (btrfs_test_flag(inode, NODATACOW)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 1, nr_written); + page_started, 1, nr_written); else if (btrfs_test_flag(inode, PREALLOC)) ret = run_delalloc_nocow(inode, locked_page, start, end, - page_started, 0, nr_written); + page_started, 0, nr_written); else ret = cow_file_range_async(inode, locked_page, start, end, - page_started, nr_written); + page_started, nr_written); return ret; } @@ -1200,8 +1195,11 @@ static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end, spin_lock(&root->fs_info->delalloc_lock); if (end - start + 1 > root->fs_info->delalloc_bytes) { - printk("warning: delalloc account %Lu %Lu\n", - end - start + 1, root->fs_info->delalloc_bytes); + printk(KERN_INFO "btrfs warning: delalloc account " + "%llu %llu\n", + (unsigned long long)end - start + 1, + (unsigned long long) + root->fs_info->delalloc_bytes); root->fs_info->delalloc_bytes = 0; BTRFS_I(inode)->delalloc_bytes = 0; } else { @@ -1241,9 +1239,8 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, ret = btrfs_map_block(map_tree, READ, logical, &map_length, NULL, 0); - if (map_length < length + size) { + if (map_length < length + size) return 1; - } return 0; } @@ -1255,8 +1252,9 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset, * At IO completion time the cums attached on the ordered extent record * are inserted into the btree */ -static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio, - int mirror_num, unsigned long bio_flags) +static int __btrfs_submit_bio_start(struct inode *inode, int rw, + struct bio *bio, int mirror_num, + unsigned long bio_flags) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; @@ -1341,9 +1339,8 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) { - if ((end & (PAGE_CACHE_SIZE - 1)) == 0) { + if ((end & (PAGE_CACHE_SIZE - 1)) == 0) WARN_ON(1); - } return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, GFP_NOFS); } @@ -1755,14 +1752,14 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } local_irq_save(flags); kaddr = kmap_atomic(page, KM_IRQ0); - if (ret) { + if (ret) goto zeroit; - } + csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1); btrfs_csum_final(csum, (char *)&csum); - if (csum != private) { + if (csum != private) goto zeroit; - } + kunmap_atomic(kaddr, KM_IRQ0); local_irq_restore(flags); good: @@ -1773,9 +1770,10 @@ good: return 0; zeroit: - printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n", - page->mapping->host->i_ino, (unsigned long long)start, csum, - private); + printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " + "private %llu\n", page->mapping->host->i_ino, + (unsigned long long)start, csum, + (unsigned long long)private); memset(kaddr + offset, 1, end - start + 1); flush_dcache_page(page); kunmap_atomic(kaddr, KM_IRQ0); @@ -2097,9 +2095,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, /* * copy everything in the in-memory inode into the btree. */ -int noinline btrfs_update_inode(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct inode *inode) +noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_root *root, struct inode *inode) { struct btrfs_inode_item *inode_item; struct btrfs_path *path; @@ -2174,7 +2171,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, inode->i_ino, dir->i_ino, &index); if (ret) { - printk("failed to delete reference to %.*s, " + printk(KERN_INFO "btrfs failed to delete reference to %.*s, " "inode %lu parent %lu\n", name_len, name, inode->i_ino, dir->i_ino); goto err; @@ -2280,9 +2277,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) /* now the directory is empty */ err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, dentry->d_name.name, dentry->d_name.len); - if (!err) { + if (!err) btrfs_i_size_write(inode, 0); - } fail_trans: nr = trans->blocks_used; @@ -2516,9 +2512,9 @@ noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, search_again: ret = btrfs_search_slot(trans, root, &key, path, -1, 1); - if (ret < 0) { + if (ret < 0) goto error; - } + if (ret > 0) { /* there are no items in the tree for us to truncate, we're * done @@ -2530,7 +2526,7 @@ search_again: path->slots[0]--; } - while(1) { + while (1) { fi = NULL; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); @@ -2562,19 +2558,18 @@ search_again: item_end--; } if (item_end < new_size) { - if (found_type == BTRFS_DIR_ITEM_KEY) { + if (found_type == BTRFS_DIR_ITEM_KEY) found_type = BTRFS_INODE_ITEM_KEY; - } else if (found_type == BTRFS_EXTENT_ITEM_KEY) { + else if (found_type == BTRFS_EXTENT_ITEM_KEY) found_type = BTRFS_EXTENT_DATA_KEY; - } else if (found_type == BTRFS_EXTENT_DATA_KEY) { + else if (found_type == BTRFS_EXTENT_DATA_KEY) found_type = BTRFS_XATTR_ITEM_KEY; - } else if (found_type == BTRFS_XATTR_ITEM_KEY) { + else if (found_type == BTRFS_XATTR_ITEM_KEY) found_type = BTRFS_INODE_REF_KEY; - } else if (found_type) { + else if (found_type) found_type--; - } else { + else break; - } btrfs_set_key_type(&key, found_type); goto next; } @@ -2656,7 +2651,7 @@ delete: pending_del_nr++; pending_del_slot = path->slots[0]; } else { - printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot); + BUG(); } } else { break; @@ -2938,9 +2933,10 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, namelen, 0); if (IS_ERR(di)) ret = PTR_ERR(di); - if (!di || IS_ERR(di)) { + + if (!di || IS_ERR(di)) goto out_err; - } + btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); out: btrfs_free_path(path); @@ -3020,8 +3016,8 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) static int btrfs_find_actor(struct inode *inode, void *opaque) { struct btrfs_iget_args *args = opaque; - return (args->ino == inode->i_ino && - args->root == BTRFS_I(inode)->root); + return args->ino == inode->i_ino && + args->root == BTRFS_I(inode)->root; } struct inode *btrfs_ilookup(struct super_block *s, u64 objectid, @@ -3085,7 +3081,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) { - struct inode * inode; + struct inode *inode; struct btrfs_inode *bi = BTRFS_I(dir); struct btrfs_root *root = bi->root; struct btrfs_root *sub_root = root; @@ -3385,9 +3381,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) if (BTRFS_I(dir)->index_cnt == (u64)-1) { ret = btrfs_set_inode_index_count(dir); - if (ret) { + if (ret) return ret; - } } *index = BTRFS_I(dir)->index_cnt; @@ -3879,12 +3874,13 @@ static noinline int uncompress_inline(struct btrfs_path *path, /* * a bit scary, this does extent mapping from logical file offset to the disk. - * the ugly parts come from merging extents from the disk with the - * in-ram representation. This gets more complex because of the data=ordered code, + * the ugly parts come from merging extents from the disk with the in-ram + * representation. This gets more complex because of the data=ordered code, * where the in-ram extents might be locked pending data=ordered completion. * * This also copies inline extents directly into the page. */ + struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) @@ -4081,7 +4077,7 @@ again: extent_map_end(em) - 1, GFP_NOFS); goto insert; } else { - printk("unkknown found_type %d\n", found_type); + printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); WARN_ON(1); } not_found: @@ -4093,7 +4089,11 @@ not_found_em: insert: btrfs_release_path(root, path); if (em->start > start || extent_map_end(em) <= start) { - printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len); + printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " + "[%llu %llu]\n", (unsigned long long)em->start, + (unsigned long long)em->len, + (unsigned long long)start, + (unsigned long long)len); err = -EIO; goto out; } @@ -4130,8 +4130,6 @@ insert: } } else { err = -EIO; - printk("failing to insert %Lu %Lu\n", - start, len); free_extent_map(em); em = NULL; } @@ -4147,9 +4145,8 @@ out: btrfs_free_path(path); if (trans) { ret = btrfs_end_transaction(trans, root); - if (!err) { + if (!err) err = ret; - } } if (err) { free_extent_map(em); @@ -4482,13 +4479,15 @@ void btrfs_destroy_inode(struct inode *inode) } spin_unlock(&BTRFS_I(inode)->root->list_lock); - while(1) { + while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); if (!ordered) break; else { - printk("found ordered extent %Lu %Lu\n", - ordered->file_offset, ordered->len); + printk(KERN_ERR "btrfs found ordered " + "extent %llu %llu on inode cleanup\n", + (unsigned long long)ordered->file_offset, + (unsigned long long)ordered->len); btrfs_remove_ordered_extent(inode, ordered); btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered); @@ -4572,8 +4571,8 @@ static int btrfs_getattr(struct vfsmount *mnt, return 0; } -static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry, - struct inode * new_dir,struct dentry *new_dentry) +static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) { struct btrfs_trans_handle *trans; struct btrfs_root *root = BTRFS_I(old_dir)->root; @@ -4663,7 +4662,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) return -EROFS; spin_lock(&root->fs_info->delalloc_lock); - while(!list_empty(head)) { + while (!list_empty(head)) { binode = list_entry(head->next, struct btrfs_inode, delalloc_inodes); inode = igrab(&binode->vfs_inode); @@ -4684,7 +4683,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root) * ordered extents get created before we return */ atomic_inc(&root->fs_info->async_submit_draining); - while(atomic_read(&root->fs_info->nr_async_submits) || + while (atomic_read(&root->fs_info->nr_async_submits) || atomic_read(&root->fs_info->async_delalloc_pages)) { wait_event(root->fs_info->async_submit_wait, (atomic_read(&root->fs_info->nr_async_submits) == 0 && diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ba484aac1b9..c2aa33e3feb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -311,7 +311,7 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, * to see if is references the subvolume where we are * placing this new snapshot. */ - while(1) { + while (1) { if (!test || dir == snap_src->fs_info->sb->s_root || test == snap_src->fs_info->sb->s_root || @@ -319,7 +319,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, break; } if (S_ISLNK(test->d_inode->i_mode)) { - printk("Symlink in snapshot path, failed\n"); + printk(KERN_INFO "Btrfs symlink in snapshot " + "path, failed\n"); error = -EMLINK; btrfs_free_path(path); goto out_drop_write; @@ -329,7 +330,8 @@ static noinline int btrfs_mksubvol(struct path *parent, char *name, ret = btrfs_find_root_ref(snap_src->fs_info->tree_root, path, test_oid, parent_oid); if (ret == 0) { - printk("Snapshot creation failed, looping\n"); + printk(KERN_INFO "Btrfs snapshot creation " + "failed, looping\n"); error = -EMLINK; btrfs_free_path(path); goto out_drop_write; @@ -617,7 +619,8 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, src_inode = src_file->f_path.dentry->d_inode; if (src_inode->i_sb != file->f_path.dentry->d_inode->i_sb) { - printk("btrfs: Snapshot src from another FS\n"); + printk(KERN_INFO "btrfs: Snapshot src from " + "another FS\n"); ret = -EINVAL; fput(src_file); goto out; @@ -810,9 +813,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ((off + len) & (bs-1))) goto out_unlock; - printk("final src extent is %llu~%llu\n", off, len); - printk("final dst extent is %llu~%llu\n", destoff, len); - /* do any pending delalloc/csum calc on src, one way or another, and lock file content */ while (1) { @@ -883,10 +883,13 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, comp = btrfs_file_extent_compression(leaf, extent); type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG) { - disko = btrfs_file_extent_disk_bytenr(leaf, extent); - diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); + disko = btrfs_file_extent_disk_bytenr(leaf, + extent); + diskl = btrfs_file_extent_disk_num_bytes(leaf, + extent); datao = btrfs_file_extent_offset(leaf, extent); - datal = btrfs_file_extent_num_bytes(leaf, extent); + datal = btrfs_file_extent_num_bytes(leaf, + extent); } else if (type == BTRFS_FILE_EXTENT_INLINE) { /* take upper bound, may be compressed */ datal = btrfs_file_extent_ram_bytes(leaf, @@ -916,8 +919,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); - printk(" orig disk %llu~%llu data %llu~%llu\n", - disko, diskl, datao, datal); if (off > key.offset) { datao += off - key.offset; @@ -929,8 +930,6 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, /* disko == 0 means it's a hole */ if (!disko) datao = 0; - printk(" final disk %llu~%llu data %llu~%llu\n", - disko, diskl, datao, datal); btrfs_set_file_extent_offset(leaf, extent, datao); @@ -952,12 +951,11 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, skip = off - key.offset; new_key.offset += skip; } + if (key.offset + datal > off+len) trim = key.offset + datal - (off+len); - printk("len %lld skip %lld trim %lld\n", - datal, skip, trim); + if (comp && (skip || trim)) { - printk("btrfs clone_range can't split compressed inline extents yet\n"); ret = -EINVAL; goto out; } @@ -969,7 +967,8 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, goto out; if (skip) { - u32 start = btrfs_file_extent_calc_inline_size(0); + u32 start = + btrfs_file_extent_calc_inline_size(0); memmove(buf+start, buf+start+skip, datal); } @@ -985,7 +984,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_mark_buffer_dirty(leaf); } - next: +next: btrfs_release_path(root, path); key.offset++; } diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index e30aa6e2958..39bae7761db 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -31,9 +31,10 @@ * difference in almost every workload, but spinning for the right amount of * time needs some help. * - * In general, we want to spin as long as the lock holder is doing btree searches, - * and we should give up if they are in more expensive code. + * In general, we want to spin as long as the lock holder is doing btree + * searches, and we should give up if they are in more expensive code. */ + int btrfs_tree_lock(struct extent_buffer *eb) { int i; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index d9e232227da..a2094017027 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -39,11 +39,11 @@ static u64 entry_end(struct btrfs_ordered_extent *entry) static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct btrfs_ordered_extent *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); @@ -67,13 +67,13 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, struct rb_node **prev_ret) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *test; struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *prev_entry = NULL; - while(n) { + while (n) { entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); prev = n; prev_entry = entry; @@ -88,7 +88,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, if (!prev_ret) return NULL; - while(prev && file_offset >= entry_end(prev_entry)) { + while (prev && file_offset >= entry_end(prev_entry)) { test = rb_next(prev); if (!test) break; @@ -102,7 +102,7 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, if (prev) prev_entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); - while(prev && file_offset < entry_end(prev_entry)) { + while (prev && file_offset < entry_end(prev_entry)) { test = rb_prev(prev); if (!test) break; @@ -193,10 +193,8 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, node = tree_insert(&tree->tree, file_offset, &entry->rb_node); - if (node) { - printk("warning dup entry from add_ordered_extent\n"); - BUG(); - } + BUG_ON(node); + set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset, entry_end(entry) - 1, GFP_NOFS); @@ -282,7 +280,7 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) struct btrfs_ordered_sum *sum; if (atomic_dec_and_test(&entry->refs)) { - while(!list_empty(&entry->list)) { + while (!list_empty(&entry->list)) { cur = entry->list.next; sum = list_entry(cur, struct btrfs_ordered_sum, list); list_del(&sum->list); @@ -432,11 +430,10 @@ again: orig_end >> PAGE_CACHE_SHIFT); end = orig_end; - while(1) { + while (1) { ordered = btrfs_lookup_first_ordered_extent(inode, end); - if (!ordered) { + if (!ordered) break; - } if (ordered->file_offset > orig_end) { btrfs_put_ordered_extent(ordered); break; @@ -492,7 +489,7 @@ out: * if none is found */ struct btrfs_ordered_extent * -btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset) +btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -553,7 +550,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, * yet */ node = &ordered->rb_node; - while(1) { + while (1) { node = rb_prev(node); if (!node) break; @@ -581,9 +578,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, * between our ordered extent and the next one. */ test = rb_entry(node, struct btrfs_ordered_extent, rb_node); - if (test->file_offset > entry_end(ordered)) { + if (test->file_offset > entry_end(ordered)) i_size_test = test->file_offset; - } } else { i_size_test = i_size_read(inode); } diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 64725c13aa1..5f8f218c100 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -24,13 +24,14 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) { int num_stripes = btrfs_chunk_num_stripes(eb, chunk); int i; - printk("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n", + printk(KERN_INFO "\t\tchunk length %llu owner %llu type %llu " + "num_stripes %d\n", (unsigned long long)btrfs_chunk_length(eb, chunk), (unsigned long long)btrfs_chunk_owner(eb, chunk), (unsigned long long)btrfs_chunk_type(eb, chunk), num_stripes); for (i = 0 ; i < num_stripes ; i++) { - printk("\t\t\tstripe %d devid %llu offset %llu\n", i, + printk(KERN_INFO "\t\t\tstripe %d devid %llu offset %llu\n", i, (unsigned long long)btrfs_stripe_devid_nr(eb, chunk, i), (unsigned long long)btrfs_stripe_offset_nr(eb, chunk, i)); } @@ -38,8 +39,8 @@ static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) static void print_dev_item(struct extent_buffer *eb, struct btrfs_dev_item *dev_item) { - printk("\t\tdev item devid %llu " - "total_bytes %llu bytes used %Lu\n", + printk(KERN_INFO "\t\tdev item devid %llu " + "total_bytes %llu bytes used %llu\n", (unsigned long long)btrfs_device_id(eb, dev_item), (unsigned long long)btrfs_device_total_bytes(eb, dev_item), (unsigned long long)btrfs_device_bytes_used(eb, dev_item)); @@ -61,14 +62,15 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) struct btrfs_dev_extent *dev_extent; u32 type; - printk("leaf %llu total ptrs %d free space %d\n", + printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n", (unsigned long long)btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l)); for (i = 0 ; i < nr ; i++) { item = btrfs_item_nr(l, i); btrfs_item_key_to_cpu(l, &key, i); type = btrfs_key_type(&key); - printk("\titem %d key (%llu %x %llu) itemoff %d itemsize %d\n", + printk(KERN_INFO "\titem %d key (%llu %x %llu) itemoff %d " + "itemsize %d\n", i, (unsigned long long)key.objectid, type, (unsigned long long)key.offset, @@ -76,33 +78,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) switch (type) { case BTRFS_INODE_ITEM_KEY: ii = btrfs_item_ptr(l, i, struct btrfs_inode_item); - printk("\t\tinode generation %llu size %llu mode %o\n", - (unsigned long long)btrfs_inode_generation(l, ii), + printk(KERN_INFO "\t\tinode generation %llu size %llu " + "mode %o\n", + (unsigned long long) + btrfs_inode_generation(l, ii), (unsigned long long)btrfs_inode_size(l, ii), btrfs_inode_mode(l, ii)); break; case BTRFS_DIR_ITEM_KEY: di = btrfs_item_ptr(l, i, struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(l, di, &found_key); - printk("\t\tdir oid %llu type %u\n", + printk(KERN_INFO "\t\tdir oid %llu type %u\n", (unsigned long long)found_key.objectid, btrfs_dir_type(l, di)); break; case BTRFS_ROOT_ITEM_KEY: ri = btrfs_item_ptr(l, i, struct btrfs_root_item); - printk("\t\troot data bytenr %llu refs %u\n", - (unsigned long long)btrfs_disk_root_bytenr(l, ri), + printk(KERN_INFO "\t\troot data bytenr %llu refs %u\n", + (unsigned long long) + btrfs_disk_root_bytenr(l, ri), btrfs_disk_root_refs(l, ri)); break; case BTRFS_EXTENT_ITEM_KEY: ei = btrfs_item_ptr(l, i, struct btrfs_extent_item); - printk("\t\textent data refs %u\n", + printk(KERN_INFO "\t\textent data refs %u\n", btrfs_extent_refs(l, ei)); break; case BTRFS_EXTENT_REF_KEY: ref = btrfs_item_ptr(l, i, struct btrfs_extent_ref); - printk("\t\textent back ref root %llu gen %llu " - "owner %llu num_refs %lu\n", + printk(KERN_INFO "\t\textent back ref root %llu " + "gen %llu owner %llu num_refs %lu\n", (unsigned long long)btrfs_ref_root(l, ref), (unsigned long long)btrfs_ref_generation(l, ref), (unsigned long long)btrfs_ref_objectid(l, ref), @@ -114,26 +119,36 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) struct btrfs_file_extent_item); if (btrfs_file_extent_type(l, fi) == BTRFS_FILE_EXTENT_INLINE) { - printk("\t\tinline extent data size %u\n", - btrfs_file_extent_inline_len(l, fi)); + printk(KERN_INFO "\t\tinline extent data " + "size %u\n", + btrfs_file_extent_inline_len(l, fi)); break; } - printk("\t\textent data disk bytenr %llu nr %llu\n", - (unsigned long long)btrfs_file_extent_disk_bytenr(l, fi), - (unsigned long long)btrfs_file_extent_disk_num_bytes(l, fi)); - printk("\t\textent data offset %llu nr %llu ram %llu\n", - (unsigned long long)btrfs_file_extent_offset(l, fi), - (unsigned long long)btrfs_file_extent_num_bytes(l, fi), - (unsigned long long)btrfs_file_extent_ram_bytes(l, fi)); + printk(KERN_INFO "\t\textent data disk bytenr %llu " + "nr %llu\n", + (unsigned long long) + btrfs_file_extent_disk_bytenr(l, fi), + (unsigned long long) + btrfs_file_extent_disk_num_bytes(l, fi)); + printk(KERN_INFO "\t\textent data offset %llu " + "nr %llu ram %llu\n", + (unsigned long long) + btrfs_file_extent_offset(l, fi), + (unsigned long long) + btrfs_file_extent_num_bytes(l, fi), + (unsigned long long) + btrfs_file_extent_ram_bytes(l, fi)); break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); - printk("\t\tblock group used %llu\n", - (unsigned long long)btrfs_disk_block_group_used(l, bi)); + printk(KERN_INFO "\t\tblock group used %llu\n", + (unsigned long long) + btrfs_disk_block_group_used(l, bi)); break; case BTRFS_CHUNK_ITEM_KEY: - print_chunk(l, btrfs_item_ptr(l, i, struct btrfs_chunk)); + print_chunk(l, btrfs_item_ptr(l, i, + struct btrfs_chunk)); break; case BTRFS_DEV_ITEM_KEY: print_dev_item(l, btrfs_item_ptr(l, i, @@ -142,7 +157,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) case BTRFS_DEV_EXTENT_KEY: dev_extent = btrfs_item_ptr(l, i, struct btrfs_dev_extent); - printk("\t\tdev extent chunk_tree %llu\n" + printk(KERN_INFO "\t\tdev extent chunk_tree %llu\n" "\t\tchunk objectid %llu chunk offset %llu " "length %llu\n", (unsigned long long) @@ -171,13 +186,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c) btrfs_print_leaf(root, c); return; } - printk("node %llu level %d total ptrs %d free spc %u\n", + printk(KERN_INFO "node %llu level %d total ptrs %d free spc %u\n", (unsigned long long)btrfs_header_bytenr(c), btrfs_header_level(c), nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); - printk("\tkey %d (%llu %u %llu) block %llu\n", + printk(KERN_INFO "\tkey %d (%llu %u %llu) block %llu\n", i, (unsigned long long)key.objectid, key.type, diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a50ebb67055..6f0acc4c9ea 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c @@ -74,11 +74,11 @@ void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { - struct rb_node ** p = &root->rb_node; - struct rb_node * parent = NULL; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; struct btrfs_leaf_ref *entry; - while(*p) { + while (*p) { parent = *p; entry = rb_entry(parent, struct btrfs_leaf_ref, rb_node); @@ -98,10 +98,10 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) { - struct rb_node * n = root->rb_node; + struct rb_node *n = root->rb_node; struct btrfs_leaf_ref *entry; - while(n) { + while (n) { entry = rb_entry(n, struct btrfs_leaf_ref, rb_node); WARN_ON(!entry->in_tree); @@ -127,7 +127,7 @@ int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, return 0; spin_lock(&tree->lock); - while(!list_empty(&tree->list)) { + while (!list_empty(&tree->list)) { ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); BUG_ON(ref->tree != tree); if (ref->root_gen > max_root_gen) diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index f99335a999d..b48650de447 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -132,8 +132,9 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root if (ret != 0) { btrfs_print_leaf(root, path->nodes[0]); - printk("unable to update root key %Lu %u %Lu\n", - key->objectid, key->type, key->offset); + printk(KERN_CRIT "unable to update root key %llu %u %llu\n", + (unsigned long long)key->objectid, key->type, + (unsigned long long)key->offset); BUG_ON(1); } @@ -159,9 +160,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root /* * at mount time we want to find all the old transaction snapshots that were in - * the process of being deleted if we crashed. This is any root item with an offset - * lower than the latest root. They need to be queued for deletion to finish - * what was happening when we crashed. + * the process of being deleted if we crashed. This is any root item with an + * offset lower than the latest root. They need to be queued for deletion to + * finish what was happening when we crashed. */ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid, struct btrfs_root *latest) @@ -188,7 +189,7 @@ again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - while(1) { + while (1) { leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; @@ -258,11 +259,7 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) goto out; - if (ret) { -btrfs_print_leaf(root, path->nodes[0]); -printk("failed to del %Lu %u %Lu\n", key->objectid, key->type, key->offset); - } BUG_ON(ret != 0); leaf = path->nodes[0]; ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index 8d7f568009c..c0f7ecaf1e7 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -66,7 +66,7 @@ u##bits btrfs_##name(struct extent_buffer *eb, \ unsigned long map_len; \ u##bits res; \ err = map_extent_buffer(eb, offset, \ - sizeof(((type *)0)->member), \ + sizeof(((type *)0)->member), \ &map_token, &kaddr, \ &map_start, &map_len, KM_USER1); \ if (err) { \ @@ -103,7 +103,7 @@ void btrfs_set_##name(struct extent_buffer *eb, \ unsigned long map_start; \ unsigned long map_len; \ err = map_extent_buffer(eb, offset, \ - sizeof(((type *)0)->member), \ + sizeof(((type *)0)->member), \ &map_token, &kaddr, \ &map_start, &map_len, KM_USER1); \ if (err) { \ diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index ccdcb7bb7ad..b4c101d9322 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -55,18 +55,12 @@ static struct super_operations btrfs_super_ops; -static void btrfs_put_super (struct super_block * sb) +static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); int ret; ret = close_ctree(root); - if (ret) { - printk("close ctree returns %d\n", ret); - } -#if 0 - btrfs_sysfs_del_super(root->fs_info); -#endif sb->s_fs_info = NULL; } @@ -299,12 +293,12 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, return error; } -static int btrfs_fill_super(struct super_block * sb, +static int btrfs_fill_super(struct super_block *sb, struct btrfs_fs_devices *fs_devices, - void * data, int silent) + void *data, int silent) { - struct inode * inode; - struct dentry * root_dentry; + struct inode *inode; + struct dentry *root_dentry; struct btrfs_super_block *disk_super; struct btrfs_root *tree_root; struct btrfs_inode *bi; @@ -479,8 +473,10 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, root = dget(s->s_root); else { mutex_lock(&s->s_root->d_inode->i_mutex); - root = lookup_one_len(subvol_name, s->s_root, strlen(subvol_name)); + root = lookup_one_len(subvol_name, s->s_root, + strlen(subvol_name)); mutex_unlock(&s->s_root->d_inode->i_mutex); + if (IS_ERR(root)) { up_write(&s->s_umount); deactivate_super(s); @@ -557,8 +553,9 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_bavail = buf->f_bfree; buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; + /* We treat it as constant endianness (it doesn't matter _which_) - because we want the fsid to come out the same whether mounted + because we want the fsid to come out the same whether mounted on a big-endian or little-endian host */ buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); @@ -658,7 +655,7 @@ static int btrfs_interface_init(void) static void btrfs_interface_exit(void) { if (misc_deregister(&btrfs_misc) < 0) - printk("misc_deregister failed for control device"); + printk(KERN_INFO "misc_deregister failed for control device"); } static int __init init_btrfs_fs(void) diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 04087c02084..a240b6fa81d 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -67,7 +67,8 @@ struct btrfs_root_attr { }; #define ROOT_ATTR(name, mode, show, store) \ -static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, show, store) +static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ + show, store) ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); @@ -86,7 +87,8 @@ struct btrfs_super_attr { }; #define SUPER_ATTR(name, mode, show, store) \ -static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, show, store) +static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ + show, store) SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4e7b56e9d3a..56ab1f5ea11 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -28,9 +28,6 @@ #include "ref-cache.h" #include "tree-log.h" -extern struct kmem_cache *btrfs_trans_handle_cachep; -extern struct kmem_cache *btrfs_transaction_cachep; - #define BTRFS_ROOT_TRANS_TAG 0 static noinline void put_transaction(struct btrfs_transaction *transaction) @@ -85,10 +82,10 @@ static noinline int join_transaction(struct btrfs_root *root) } /* - * this does all the record keeping required to make sure that a - * reference counted root is properly recorded in a given transaction. - * This is required to make sure the old root from before we joined the transaction - * is deleted when the transaction commits + * this does all the record keeping required to make sure that a reference + * counted root is properly recorded in a given transaction. This is required + * to make sure the old root from before we joined the transaction is deleted + * when the transaction commits */ noinline int btrfs_record_root_in_trans(struct btrfs_root *root) { @@ -144,7 +141,7 @@ static void wait_current_trans(struct btrfs_root *root) if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); cur_trans->use_count++; - while(1) { + while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); if (cur_trans->blocked) { @@ -213,7 +210,7 @@ static noinline int wait_for_commit(struct btrfs_root *root, { DEFINE_WAIT(wait); mutex_lock(&root->fs_info->trans_mutex); - while(!commit->commit_done) { + while (!commit->commit_done) { prepare_to_wait(&commit->commit_wait, &wait, TASK_UNINTERRUPTIBLE); if (commit->commit_done) @@ -228,8 +225,8 @@ static noinline int wait_for_commit(struct btrfs_root *root, } /* - * rate limit against the drop_snapshot code. This helps to slow down new operations - * if the drop_snapshot code isn't able to keep up. + * rate limit against the drop_snapshot code. This helps to slow down new + * operations if the drop_snapshot code isn't able to keep up. */ static void throttle_on_drops(struct btrfs_root *root) { @@ -332,12 +329,12 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, u64 end; unsigned long index; - while(1) { + while (1) { ret = find_first_extent_bit(dirty_pages, start, &start, &end, EXTENT_DIRTY); if (ret) break; - while(start <= end) { + while (start <= end) { cond_resched(); index = start >> PAGE_CACHE_SHIFT; @@ -368,14 +365,14 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, page_cache_release(page); } } - while(1) { + while (1) { ret = find_first_extent_bit(dirty_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) break; clear_extent_dirty(dirty_pages, start, end, GFP_NOFS); - while(start <= end) { + while (start <= end) { index = start >> PAGE_CACHE_SHIFT; start = (u64)(index + 1) << PAGE_CACHE_SHIFT; page = find_get_page(btree_inode->i_mapping, index); @@ -431,7 +428,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, btrfs_write_dirty_block_groups(trans, root); btrfs_extent_post_op(trans, root); - while(1) { + while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start) break; @@ -472,7 +469,7 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, btrfs_extent_post_op(trans, fs_info->tree_root); - while(!list_empty(&fs_info->dirty_cowonly_roots)) { + while (!list_empty(&fs_info->dirty_cowonly_roots)) { next = fs_info->dirty_cowonly_roots.next; list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); @@ -521,7 +518,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans, int err = 0; u32 refs; - while(1) { + while (1) { ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0, ARRAY_SIZE(gang), BTRFS_ROOT_TRANS_TAG); @@ -653,7 +650,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, int ret = 0; int err; - while(!list_empty(list)) { + while (!list_empty(list)) { struct btrfs_root *root; dirty = list_entry(list->prev, struct btrfs_dirty_root, list); @@ -663,13 +660,12 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root, root = dirty->latest_root; atomic_inc(&root->fs_info->throttles); - while(1) { + while (1) { trans = btrfs_start_transaction(tree_root, 1); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, dirty->root); - if (ret != -EAGAIN) { + if (ret != -EAGAIN) break; - } mutex_unlock(&root->fs_info->drop_mutex); err = btrfs_update_root(trans, @@ -874,7 +870,7 @@ static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans, struct list_head *head = &trans->transaction->pending_snapshots; int ret; - while(!list_empty(head)) { + while (!list_empty(head)) { pending = list_entry(head->next, struct btrfs_pending_snapshot, list); ret = finish_pending_snapshot(fs_info, pending); @@ -1076,9 +1072,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_trans_handle_cachep, trans); - if (root->fs_info->closing) { + if (root->fs_info->closing) drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots); - } return ret; } diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index ffe7f639732..ea292117f88 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -66,9 +66,9 @@ static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans, trans->block_group = BTRFS_I(inode)->block_group; } -static inline void btrfs_update_inode_block_group(struct - btrfs_trans_handle *trans, - struct inode *inode) +static inline void btrfs_update_inode_block_group( + struct btrfs_trans_handle *trans, + struct inode *inode) { BTRFS_I(inode)->block_group = trans->block_group; } diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index a6a3956cedf..3e8358c3616 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -23,10 +23,11 @@ #include "transaction.h" #include "locking.h" -/* defrag all the leaves in a given btree. If cache_only == 1, don't read things - * from disk, otherwise read all the leaves and try to get key order to +/* defrag all the leaves in a given btree. If cache_only == 1, don't read + * things from disk, otherwise read all the leaves and try to get key order to * better reflect disk order */ + int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root, int cache_only) { @@ -65,9 +66,9 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, level = btrfs_header_level(root->node); orig_level = level; - if (level == 0) { + if (level == 0) goto out; - } + if (root->defrag_progress.objectid == 0) { struct extent_buffer *root_node; u32 nritems; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b1c2921f5be..3a72a1b6c24 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -829,7 +829,7 @@ conflict_again: */ ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); - while(ptr < ptr_end) { + while (ptr < ptr_end) { victim_ref = (struct btrfs_inode_ref *)ptr; victim_name_len = btrfs_inode_ref_name_len(leaf, victim_ref); @@ -938,9 +938,8 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, file_bytes = (item_size / csum_size) * root->sectorsize; sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS); - if (!sums) { + if (!sums) return -ENOMEM; - } INIT_LIST_HEAD(&sums->list); sums->len = file_bytes; @@ -952,7 +951,7 @@ static noinline int replay_one_csum(struct btrfs_trans_handle *trans, sector_sum = sums->sums; cur_offset = key->offset; ptr = btrfs_item_ptr_offset(eb, slot); - while(item_size > 0) { + while (item_size > 0) { sector_sum->bytenr = cur_offset; read_extent_buffer(eb, §or_sum->sum, ptr, csum_size); sector_sum++; @@ -995,7 +994,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) break; @@ -1012,7 +1011,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], path->slots[0]); - while(ptr < ptr_end) { + while (ptr < ptr_end) { struct btrfs_inode_ref *ref; ref = (struct btrfs_inode_ref *)ptr; @@ -1048,7 +1047,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = (u64)-1; - while(1) { + while (1) { ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret < 0) break; @@ -1206,8 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, if (key->type == BTRFS_DIR_ITEM_KEY) { dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, name, name_len, 1); - } - else if (key->type == BTRFS_DIR_INDEX_KEY) { + } else if (key->type == BTRFS_DIR_INDEX_KEY) { dst_di = btrfs_lookup_dir_index_item(trans, root, path, key->objectid, key->offset, name, @@ -1282,7 +1280,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; - while(ptr < ptr_end) { + while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; name_len = btrfs_dir_name_len(eb, di); ret = replay_one_name(trans, root, path, eb, di, key); @@ -1408,7 +1406,7 @@ again: item_size = btrfs_item_size_nr(eb, slot); ptr = btrfs_item_ptr_offset(eb, slot); ptr_end = ptr + item_size; - while(ptr < ptr_end) { + while (ptr < ptr_end) { di = (struct btrfs_dir_item *)ptr; name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); @@ -1513,14 +1511,14 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, again: range_start = 0; range_end = 0; - while(1) { + while (1) { ret = find_dir_range(log, path, dirid, key_type, &range_start, &range_end); if (ret != 0) break; dir_key.offset = range_start; - while(1) { + while (1) { int nritems; ret = btrfs_search_slot(NULL, root, &dir_key, path, 0, 0); @@ -1676,7 +1674,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, return 0; } -static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, +static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) @@ -1694,7 +1692,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); - while(*level > 0) { + while (*level > 0) { WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); cur = path->nodes[*level]; @@ -1753,11 +1751,11 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, WARN_ON(*level < 0); WARN_ON(*level >= BTRFS_MAX_LEVEL); - if (path->nodes[*level] == root->node) { + if (path->nodes[*level] == root->node) parent = path->nodes[*level]; - } else { + else parent = path->nodes[*level + 1]; - } + bytenr = path->nodes[*level]->start; blocksize = btrfs_level_size(root, *level); @@ -1790,7 +1788,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans, return 0; } -static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, +static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int *level, struct walk_control *wc) @@ -1801,7 +1799,7 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans, int slot; int ret; - for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { + for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { slot = path->slots[i]; if (slot < btrfs_header_nritems(path->nodes[i]) - 1) { struct extent_buffer *node; @@ -1875,7 +1873,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, extent_buffer_get(log->node); path->slots[level] = 0; - while(1) { + while (1) { wret = walk_down_log_tree(trans, log, path, &level, wc); if (wret > 0) break; @@ -1941,7 +1939,7 @@ static int wait_log_commit(struct btrfs_root *log) schedule(); finish_wait(&log->fs_info->tree_log_wait, &wait); mutex_lock(&log->fs_info->tree_log_mutex); - } while(transid == log->fs_info->tree_log_transid && + } while (transid == log->fs_info->tree_log_transid && atomic_read(&log->fs_info->tree_log_commit)); return 0; } @@ -1965,13 +1963,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, } atomic_set(&log->fs_info->tree_log_commit, 1); - while(1) { + while (1) { batch = log->fs_info->tree_log_batch; mutex_unlock(&log->fs_info->tree_log_mutex); schedule_timeout_uninterruptible(1); mutex_lock(&log->fs_info->tree_log_mutex); - while(atomic_read(&log->fs_info->tree_log_writers)) { + while (atomic_read(&log->fs_info->tree_log_writers)) { DEFINE_WAIT(wait); prepare_to_wait(&log->fs_info->tree_log_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -2030,7 +2028,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) ret = walk_log_tree(trans, log, &wc); BUG_ON(ret); - while(1) { + while (1) { ret = find_first_extent_bit(&log->dirty_log_pages, 0, &start, &end, EXTENT_DIRTY); if (ret) @@ -2287,9 +2285,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, struct btrfs_key tmp; btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); - if (key_type == tmp.type) { + if (key_type == tmp.type) first_offset = max(min_offset, tmp.offset) + 1; - } } goto done; } @@ -2319,7 +2316,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, * we have a block from this transaction, log every item in it * from our directory */ - while(1) { + while (1) { struct btrfs_key tmp; src = path->nodes[0]; nritems = btrfs_header_nritems(src); @@ -2396,7 +2393,7 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans, again: min_key = 0; max_key = 0; - while(1) { + while (1) { ret = log_dir_items(trans, root, inode, path, dst_path, key_type, min_key, &max_key); @@ -2432,7 +2429,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, key.type = max_key_type; key.offset = (u64)-1; - while(1) { + while (1) { ret = btrfs_search_slot(trans, log, &key, path, -1, 1); if (ret != 1) @@ -2481,7 +2478,7 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, list_add_tail(&sums->list, list); path = btrfs_alloc_path(); - while(disk_bytenr < end) { + while (disk_bytenr < end) { if (!item || disk_bytenr < item_start_offset || disk_bytenr >= item_last_offset) { struct btrfs_key found_key; @@ -2496,7 +2493,8 @@ static noinline int copy_extent_csums(struct btrfs_trans_handle *trans, if (ret == -ENOENT || ret == -EFBIG) ret = 0; sum = 0; - printk("log no csum found for byte %llu\n", + printk(KERN_INFO "log no csum found for " + "byte %llu\n", (unsigned long long)disk_bytenr); item = NULL; btrfs_release_path(root, path); @@ -2643,7 +2641,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, * we have to do this after the loop above to avoid changing the * log tree while trying to change the log tree. */ - while(!list_empty(&ordered_sums)) { + while (!list_empty(&ordered_sums)) { struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, struct btrfs_ordered_sum, list); @@ -2736,7 +2734,7 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans, BUG_ON(ret); path->keep_locks = 1; - while(1) { + while (1) { ins_nr = 0; ret = btrfs_search_forward(root, &min_key, &max_key, path, 0, trans->transid); @@ -2848,7 +2846,7 @@ int btrfs_log_dentry(struct btrfs_trans_handle *trans, start_log_trans(trans, root); sb = dentry->d_inode->i_sb; - while(1) { + while (1) { ret = __btrfs_log_inode(trans, root, dentry->d_inode, inode_only); BUG_ON(ret); @@ -2919,7 +2917,7 @@ again: key.offset = (u64)-1; btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); - while(1) { + while (1) { ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) break; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6672adcec9f..b187b537888 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -140,7 +140,7 @@ static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ -static int noinline run_scheduled_bios(struct btrfs_device *device) +static noinline int run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; @@ -187,7 +187,7 @@ loop: } spin_unlock(&device->io_lock); - while(pending) { + while (pending) { cur = pending; pending = pending->bi_next; cur->bi_next = NULL; @@ -458,7 +458,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, bdev = open_bdev_exclusive(device->name, flags, holder); if (IS_ERR(bdev)) { - printk("open %s failed\n", device->name); + printk(KERN_INFO "open %s failed\n", device->name); goto error; } set_blocksize(bdev, 4096); @@ -570,14 +570,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, devid = le64_to_cpu(disk_super->dev_item.devid); transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) - printk("device label %s ", disk_super->label); + printk(KERN_INFO "device label %s ", disk_super->label); else { /* FIXME, make a readl uuid parser */ - printk("device fsid %llx-%llx ", + printk(KERN_INFO "device fsid %llx-%llx ", *(unsigned long long *)disk_super->fsid, *(unsigned long long *)(disk_super->fsid + 8)); } - printk("devid %Lu transid %Lu %s\n", devid, transid, path); + printk(KERN_INFO "devid %llu transid %llu %s\n", + (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); brelse(bh); @@ -683,9 +684,8 @@ no_more_items: goto check_pending; } } - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) { + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) goto next; - } start_found = 1; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); @@ -1001,14 +1001,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && root->fs_info->fs_devices->rw_devices <= 4) { - printk("btrfs: unable to go below four devices on raid10\n"); + printk(KERN_ERR "btrfs: unable to go below four devices " + "on raid10\n"); ret = -EINVAL; goto out; } if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && root->fs_info->fs_devices->rw_devices <= 2) { - printk("btrfs: unable to go below two devices on raid1\n"); + printk(KERN_ERR "btrfs: unable to go below two " + "devices on raid1\n"); ret = -EINVAL; goto out; } @@ -1031,7 +1033,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) bh = NULL; disk_super = NULL; if (!device) { - printk("btrfs: no missing devices found to remove\n"); + printk(KERN_ERR "btrfs: no missing devices found to " + "remove\n"); goto out; } } else { @@ -1060,7 +1063,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { - printk("btrfs: unable to remove the only writeable device\n"); + printk(KERN_ERR "btrfs: unable to remove the only writeable " + "device\n"); ret = -EINVAL; goto error_brelse; } @@ -1286,9 +1290,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) return -EINVAL; bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); - if (!bdev) { + if (!bdev) return -EIO; - } if (root->fs_info->fs_devices->seeding) { seeding_dev = 1; @@ -1401,8 +1404,8 @@ error: goto out; } -static int noinline btrfs_update_device(struct btrfs_trans_handle *trans, - struct btrfs_device *device) +static noinline int btrfs_update_device(struct btrfs_trans_handle *trans, + struct btrfs_device *device) { int ret; struct btrfs_path *path; @@ -1563,7 +1566,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, int ret; int i; - printk("btrfs relocating chunk %llu\n", + printk(KERN_INFO "btrfs relocating chunk %llu\n", (unsigned long long)chunk_offset); root = root->fs_info->chunk_root; extent_root = root->fs_info->extent_root; @@ -1748,7 +1751,7 @@ int btrfs_balance(struct btrfs_root *dev_root) key.offset = (u64)-1; key.type = BTRFS_CHUNK_ITEM_KEY; - while(1) { + while (1) { ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0); if (ret < 0) goto error; @@ -1916,7 +1919,7 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } -static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, +static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) @@ -2041,7 +2044,7 @@ again: min_free += 1024 * 1024; INIT_LIST_HEAD(&private_devs); - while(index < num_stripes) { + while (index < num_stripes) { device = list_entry(cur, struct btrfs_device, dev_alloc_list); BUG_ON(!device->writeable); if (device->total_bytes > device->bytes_used) @@ -2242,7 +2245,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, return 0; } -static int noinline init_first_rw_device(struct btrfs_trans_handle *trans, +static noinline int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device) { @@ -2338,7 +2341,7 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) { struct extent_map *em; - while(1) { + while (1) { spin_lock(&tree->map_tree.lock); em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1); if (em) @@ -2413,9 +2416,8 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, int max_errors = 0; struct btrfs_multi_bio *multi = NULL; - if (multi_ret && !(rw & (1 << BIO_RW))) { + if (multi_ret && !(rw & (1 << BIO_RW))) stripes_allocated = 1; - } again: if (multi_ret) { multi = kzalloc(btrfs_multi_bio_size(stripes_allocated), @@ -2434,7 +2436,9 @@ again: return 0; if (!em) { - printk("unable to find logical %Lu len %Lu\n", logical, *length); + printk(KERN_CRIT "unable to find logical %llu len %llu\n", + (unsigned long long)logical, + (unsigned long long)*length); BUG(); } @@ -2541,9 +2545,8 @@ again: device = map->stripes[stripe_index].dev; if (device->bdev) { bdi = blk_get_backing_dev_info(device->bdev); - if (bdi->unplug_io_fn) { + if (bdi->unplug_io_fn) bdi->unplug_io_fn(bdi, unplug_page); - } } } else { multi->stripes[i].physical = @@ -2717,7 +2720,7 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ -static int noinline schedule_bio(struct btrfs_root *root, +static noinline int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { @@ -2785,8 +2788,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, total_devs = multi->num_stripes; if (map_length < length) { - printk("mapping failed logical %Lu bio len %Lu " - "len %Lu\n", logical, length, map_length); + printk(KERN_CRIT "mapping failed logical %llu bio len %llu " + "len %llu\n", (unsigned long long)logical, + (unsigned long long)length, + (unsigned long long)map_length); BUG(); } multi->end_io = first_bio->bi_end_io; @@ -2794,7 +2799,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, multi->orig_bio = first_bio; atomic_set(&multi->stripes_pending, multi->num_stripes); - while(dev_nr < total_devs) { + while (dev_nr < total_devs) { if (total_devs > 1) { if (dev_nr < total_devs - 1) { bio = bio_clone(first_bio, GFP_NOFS); @@ -3058,7 +3063,8 @@ static int read_one_dev(struct btrfs_root *root, return -EIO; if (!device) { - printk("warning devid %Lu missing\n", devid); + printk(KERN_WARNING "warning devid %llu missing\n", + (unsigned long long)devid); device = add_missing_dev(root, devid, dev_uuid); if (!device) return -ENOMEM; @@ -3078,12 +3084,6 @@ static int read_one_dev(struct btrfs_root *root, if (device->writeable) device->fs_devices->total_rw_bytes += device->total_bytes; ret = 0; -#if 0 - ret = btrfs_open_device(device); - if (ret) { - kfree(device); - } -#endif return ret; } @@ -3174,7 +3174,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) key.type = 0; again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); - while(1) { + while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 4146f0710e6..7f332e27089 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -264,7 +264,8 @@ struct xattr_handler *btrfs_xattr_handlers[] = { */ static bool btrfs_is_valid_xattr(const char *name) { - return !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || + return !strncmp(name, XATTR_SECURITY_PREFIX, + XATTR_SECURITY_PREFIX_LEN) || !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index c4617cde6c7..ecfbce836d3 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -152,7 +152,7 @@ static int free_workspace(struct workspace *workspace) static void free_workspaces(void) { struct workspace *workspace; - while(!list_empty(&idle_workspace)) { + while (!list_empty(&idle_workspace)) { workspace = list_entry(idle_workspace.next, struct workspace, list); list_del(&workspace->list); @@ -397,12 +397,10 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, ret = -1; goto out; } - while(workspace->inf_strm.total_in < srclen) { + while (workspace->inf_strm.total_in < srclen) { ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) { + if (ret != Z_OK && ret != Z_STREAM_END) break; - } - /* * buf start is the byte offset we're of the start of * our workspace buffer @@ -424,16 +422,14 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, /* we didn't make progress in this inflate * call, we're done */ - if (ret != Z_STREAM_END) { + if (ret != Z_STREAM_END) ret = -1; - } break; } /* we haven't yet hit data corresponding to this page */ - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } /* * the start of the data we care about is offset into @@ -448,7 +444,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, current_buf_start = buf_start; /* copy bytes from the working buffer into the pages */ - while(working_bytes > 0) { + while (working_bytes > 0) { bytes = min(PAGE_CACHE_SIZE - pg_offset, PAGE_CACHE_SIZE - buf_offset); bytes = min(bytes, working_bytes); @@ -471,6 +467,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, ret = 0; goto done; } + page_out = bvec[page_out_index].bv_page; pg_offset = 0; page_bytes_left = PAGE_CACHE_SIZE; @@ -480,9 +477,8 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, * make sure our new page is covered by this * working buffer */ - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } /* the next page in the biovec might not * be adjacent to the last page, but it @@ -517,11 +513,10 @@ next: PAGE_CACHE_SIZE); } } - if (ret != Z_STREAM_END) { + if (ret != Z_STREAM_END) ret = -1; - } else { + else ret = 0; - } done: zlib_inflateEnd(&workspace->inf_strm); if (data_in) @@ -579,16 +574,15 @@ int btrfs_zlib_decompress(unsigned char *data_in, goto out; } - while(bytes_left > 0) { + while (bytes_left > 0) { unsigned long buf_start; unsigned long buf_offset; unsigned long bytes; unsigned long pg_offset = 0; ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) { + if (ret != Z_OK && ret != Z_STREAM_END) break; - } buf_start = total_out; total_out = workspace->inf_strm.total_out; @@ -598,15 +592,13 @@ int btrfs_zlib_decompress(unsigned char *data_in, break; } - if (total_out <= start_byte) { + if (total_out <= start_byte) goto next; - } - if (total_out > start_byte && buf_start < start_byte) { + if (total_out > start_byte && buf_start < start_byte) buf_offset = start_byte - buf_start; - } else { + else buf_offset = 0; - } bytes = min(PAGE_CACHE_SIZE - pg_offset, PAGE_CACHE_SIZE - buf_offset); @@ -622,11 +614,12 @@ next: workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; } - if (ret != Z_STREAM_END && bytes_left != 0) { + + if (ret != Z_STREAM_END && bytes_left != 0) ret = -1; - } else { + else ret = 0; - } + zlib_inflateEnd(&workspace->inf_strm); out: free_workspace(workspace); -- cgit v1.2.3-70-g09d2 From 43b774ba1378dc5e952aadb829048bbe90ba33e4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 5 Jan 2009 22:05:48 -0500 Subject: Btrfs: drop EXPORT symbols from extent_io.c They should stay out until this is turned into generic code. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 56 ---------------------------------------------------- 1 file changed, 56 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 39edb551dca..e086d407f1f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -116,7 +116,6 @@ void extent_io_tree_init(struct extent_io_tree *tree, spin_lock_init(&tree->buffer_lock); tree->mapping = mapping; } -EXPORT_SYMBOL(extent_io_tree_init); static struct extent_state *alloc_extent_state(gfp_t mask) { @@ -140,7 +139,6 @@ static struct extent_state *alloc_extent_state(gfp_t mask) init_waitqueue_head(&state->wq); return state; } -EXPORT_SYMBOL(alloc_extent_state); static void free_extent_state(struct extent_state *state) { @@ -159,7 +157,6 @@ static void free_extent_state(struct extent_state *state) kmem_cache_free(extent_state_cache, state); } } -EXPORT_SYMBOL(free_extent_state); static struct rb_node *tree_insert(struct rb_root *root, u64 offset, struct rb_node *node) @@ -580,7 +577,6 @@ search_again: cond_resched(); goto again; } -EXPORT_SYMBOL(clear_extent_bit); static int wait_on_state(struct extent_io_tree *tree, struct extent_state *state) @@ -644,7 +640,6 @@ out: spin_unlock(&tree->lock); return 0; } -EXPORT_SYMBOL(wait_extent_bit); static void set_state_bits(struct extent_io_tree *tree, struct extent_state *state, @@ -821,7 +816,6 @@ search_again: cond_resched(); goto again; } -EXPORT_SYMBOL(set_extent_bit); /* wrappers around set/clear extent bit */ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, @@ -830,14 +824,12 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_dirty); int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_ordered); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask) @@ -845,14 +837,12 @@ int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, return set_extent_bit(tree, start, end, bits, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_bits); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int bits, gfp_t mask) { return clear_extent_bit(tree, start, end, bits, 0, 0, mask); } -EXPORT_SYMBOL(clear_extent_bits); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -861,7 +851,6 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_delalloc); int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -869,14 +858,12 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, return clear_extent_bit(tree, start, end, EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask); } -EXPORT_SYMBOL(clear_extent_dirty); int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask); } -EXPORT_SYMBOL(clear_extent_ordered); int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -884,7 +871,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_new); static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -898,7 +884,6 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, mask); } -EXPORT_SYMBOL(set_extent_uptodate); static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -923,7 +908,6 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) { return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); } -EXPORT_SYMBOL(wait_on_extent_writeback); /* * either insert or lock state struct between start and end use mask to tell @@ -946,7 +930,6 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) } return err; } -EXPORT_SYMBOL(lock_extent); int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) @@ -964,14 +947,12 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, } return 1; } -EXPORT_SYMBOL(try_lock_extent); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask); } -EXPORT_SYMBOL(unlock_extent); /* * helper function to set pages and extents in the tree dirty @@ -992,7 +973,6 @@ int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) set_extent_dirty(tree, start, end, GFP_NOFS); return 0; } -EXPORT_SYMBOL(set_range_dirty); /* * helper function to set both pages and extents in the tree writeback @@ -1053,7 +1033,6 @@ out: spin_unlock(&tree->lock); return ret; } -EXPORT_SYMBOL(find_first_extent_bit); /* find the first state struct with 'bits' set after 'start', and * return it. tree->lock must be held. NULL will returned if @@ -1085,7 +1064,6 @@ struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, out: return NULL; } -EXPORT_SYMBOL(find_first_extent_bit_state); /* * find a contiguous range of bytes in the file marked as delalloc, not @@ -1372,7 +1350,6 @@ int extent_clear_unlock_delalloc(struct inode *inode, } return 0; } -EXPORT_SYMBOL(extent_clear_unlock_delalloc); /* * count the number of bytes in the tree that have a given bit(s) @@ -1598,7 +1575,6 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, spin_unlock(&tree->lock); return bitset; } -EXPORT_SYMBOL(test_range_bit); /* * helper function to set a given page up to date if all the @@ -1952,7 +1928,6 @@ void set_page_extent_mapped(struct page *page) set_page_private(page, EXTENT_PAGE_PRIVATE); } } -EXPORT_SYMBOL(set_page_extent_mapped); static void set_page_extent_head(struct page *page, unsigned long len) { @@ -2128,7 +2103,6 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, submit_one_bio(READ, bio, 0, bio_flags); return ret; } -EXPORT_SYMBOL(extent_read_full_page); /* * the writepage semantics are similar to regular writepage. extent @@ -2533,7 +2507,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page, submit_one_bio(WRITE, epd.bio, 0, 0); return ret; } -EXPORT_SYMBOL(extent_write_full_page); int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, u64 start, u64 end, get_extent_t *get_extent, @@ -2579,8 +2552,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, submit_one_bio(WRITE, epd.bio, 0, 0); return ret; } -EXPORT_SYMBOL(extent_write_locked_range); - int extent_writepages(struct extent_io_tree *tree, struct address_space *mapping, @@ -2602,7 +2573,6 @@ int extent_writepages(struct extent_io_tree *tree, submit_one_bio(WRITE, epd.bio, 0, 0); return ret; } -EXPORT_SYMBOL(extent_writepages); int extent_readpages(struct extent_io_tree *tree, struct address_space *mapping, @@ -2643,7 +2613,6 @@ int extent_readpages(struct extent_io_tree *tree, submit_one_bio(READ, bio, 0, bio_flags); return 0; } -EXPORT_SYMBOL(extent_readpages); /* * basic invalidatepage code, this waits on any locked or writeback @@ -2668,7 +2637,6 @@ int extent_invalidatepage(struct extent_io_tree *tree, 1, 1, GFP_NOFS); return 0; } -EXPORT_SYMBOL(extent_invalidatepage); /* * simple commit_write call, set_range_dirty is used to mark both @@ -2689,7 +2657,6 @@ int extent_commit_write(struct extent_io_tree *tree, } return 0; } -EXPORT_SYMBOL(extent_commit_write); int extent_prepare_write(struct extent_io_tree *tree, struct inode *inode, struct page *page, @@ -2787,7 +2754,6 @@ err: /* FIXME, zero out newly allocated blocks on error */ return err; } -EXPORT_SYMBOL(extent_prepare_write); /* * a helper for releasepage, this tests for areas of the page that @@ -2813,7 +2779,6 @@ int try_release_extent_state(struct extent_map_tree *map, } return ret; } -EXPORT_SYMBOL(try_release_extent_state); /* * a helper for releasepage. As long as there are no locked extents @@ -2863,7 +2828,6 @@ int try_release_extent_mapping(struct extent_map_tree *map, } return try_release_extent_state(map, tree, page, mask); } -EXPORT_SYMBOL(try_release_extent_mapping); sector_t extent_bmap(struct address_space *mapping, sector_t iblock, get_extent_t *get_extent) @@ -3042,7 +3006,6 @@ free_eb: __free_extent_buffer(eb); return exists; } -EXPORT_SYMBOL(alloc_extent_buffer); struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, u64 start, unsigned long len, @@ -3061,7 +3024,6 @@ struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, return eb; } -EXPORT_SYMBOL(find_extent_buffer); void free_extent_buffer(struct extent_buffer *eb) { @@ -3073,7 +3035,6 @@ void free_extent_buffer(struct extent_buffer *eb) WARN_ON(1); } -EXPORT_SYMBOL(free_extent_buffer); int clear_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) @@ -3128,7 +3089,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, } return 0; } -EXPORT_SYMBOL(clear_extent_buffer_dirty); int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, struct extent_buffer *eb) @@ -3136,7 +3096,6 @@ int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, return wait_on_extent_writeback(tree, eb->start, eb->start + eb->len - 1); } -EXPORT_SYMBOL(wait_on_extent_buffer_writeback); int set_extent_buffer_dirty(struct extent_io_tree *tree, struct extent_buffer *eb) @@ -3167,7 +3126,6 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, } return 0; } -EXPORT_SYMBOL(set_extent_buffer_dirty); int clear_extent_buffer_uptodate(struct extent_io_tree *tree, struct extent_buffer *eb) @@ -3212,7 +3170,6 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, } return 0; } -EXPORT_SYMBOL(set_extent_buffer_uptodate); int extent_range_uptodate(struct extent_io_tree *tree, u64 start, u64 end) @@ -3267,7 +3224,6 @@ int extent_buffer_uptodate(struct extent_io_tree *tree, } return pg_uptodate; } -EXPORT_SYMBOL(extent_buffer_uptodate); int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, @@ -3366,7 +3322,6 @@ unlock_exit: } return ret; } -EXPORT_SYMBOL(read_extent_buffer_pages); void read_extent_buffer(struct extent_buffer *eb, void *dstv, unsigned long start, @@ -3399,7 +3354,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, i++; } } -EXPORT_SYMBOL(read_extent_buffer); int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, unsigned long min_len, char **token, char **map, @@ -3439,7 +3393,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, *map_len = PAGE_CACHE_SIZE - offset; return 0; } -EXPORT_SYMBOL(map_private_extent_buffer); int map_extent_buffer(struct extent_buffer *eb, unsigned long start, unsigned long min_len, @@ -3465,13 +3418,11 @@ int map_extent_buffer(struct extent_buffer *eb, unsigned long start, } return err; } -EXPORT_SYMBOL(map_extent_buffer); void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km) { kunmap_atomic(token, km); } -EXPORT_SYMBOL(unmap_extent_buffer); int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, unsigned long start, @@ -3509,7 +3460,6 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, } return ret; } -EXPORT_SYMBOL(memcmp_extent_buffer); void write_extent_buffer(struct extent_buffer *eb, const void *srcv, unsigned long start, unsigned long len) @@ -3542,7 +3492,6 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, i++; } } -EXPORT_SYMBOL(write_extent_buffer); void memset_extent_buffer(struct extent_buffer *eb, char c, unsigned long start, unsigned long len) @@ -3573,7 +3522,6 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, i++; } } -EXPORT_SYMBOL(memset_extent_buffer); void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, @@ -3608,7 +3556,6 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, i++; } } -EXPORT_SYMBOL(copy_extent_buffer); static void move_pages(struct page *dst_page, struct page *src_page, unsigned long dst_off, unsigned long src_off, @@ -3692,7 +3639,6 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, len -= cur; } } -EXPORT_SYMBOL(memcpy_extent_buffer); void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len) @@ -3741,7 +3687,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, len -= cur; } } -EXPORT_SYMBOL(memmove_extent_buffer); int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) { @@ -3770,4 +3715,3 @@ out: spin_unlock(&tree->buffer_lock); return ret; } -EXPORT_SYMBOL(try_release_extent_buffer); -- cgit v1.2.3-70-g09d2